mirror of https://github.com/citusdata/citus.git
Normalize tests: Remove trailing whitespace
parent
6353c9907f
commit
7730bd449c
|
@ -63,9 +63,9 @@ s/"(ref_table_[0-9]_|ref_table_[0-9]_value_fkey_)[0-9]+"/"\1xxxxxxx"/g
|
|||
# Line info varies between versions
|
||||
/^LINE [0-9]+:.*$/d
|
||||
/^ *\^$/d
|
||||
#
|
||||
## Remove trailing whitespace
|
||||
#s/ *$//g
|
||||
|
||||
# Remove trailing whitespace
|
||||
s/ *$//g
|
||||
#
|
||||
## pg12 changes
|
||||
#s/Partitioned table "/Table "/g
|
||||
|
|
|
@ -5,9 +5,9 @@ SET citus.shard_count TO 4;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 801009000;
|
||||
SELECT create_distributed_table('test','x');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test VALUES (1,2);
|
||||
|
@ -18,7 +18,7 @@ SET citus.max_adaptive_executor_pool_size TO 2;
|
|||
SET citus.task_executor_type TO 'adaptive';
|
||||
BEGIN;
|
||||
SELECT count(*) FROM test a JOIN (SELECT x, pg_sleep(0.1) FROM test) b USING (x);
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
@ -27,7 +27,7 @@ SELECT sum(result::bigint) FROM run_command_on_workers($$
|
|||
SELECT count(*) FROM pg_stat_activity
|
||||
WHERE pid <> pg_backend_pid() AND query LIKE '%8010090%'
|
||||
$$);
|
||||
sum
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
@ -37,7 +37,7 @@ END;
|
|||
SET citus.executor_slow_start_interval TO '10ms';
|
||||
BEGIN;
|
||||
SELECT count(*) FROM test a JOIN (SELECT x, pg_sleep(0.1) FROM test) b USING (x);
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
@ -46,7 +46,7 @@ SELECT sum(result::bigint) FROM run_command_on_workers($$
|
|||
SELECT count(*) FROM pg_stat_activity
|
||||
WHERE pid <> pg_backend_pid() AND query LIKE '%8010090%'
|
||||
$$);
|
||||
sum
|
||||
sum
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
|
|
@ -5,47 +5,47 @@ SET citus.shard_replication_factor to 1;
|
|||
SET citus.enable_repartition_joins TO true;
|
||||
CREATE TABLE ab(a int, b int);
|
||||
SELECT create_distributed_table('ab', 'a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO ab SELECT *,* FROM generate_series(1,10);
|
||||
SELECT COUNT(*) FROM ab k, ab l
|
||||
WHERE k.a = l.b;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM ab k, ab l, ab m, ab t
|
||||
WHERE k.a = l.b AND k.a = m.b AND t.b = l.a;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM (SELECT k.a FROM ab k, ab l WHERE k.a = l.b) first, (SELECT * FROM ab) second WHERE first.a = second.b;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
@ -62,21 +62,21 @@ CREATE TABLE single_hash_repartition_first (id int, sum int, avg float);
|
|||
CREATE TABLE single_hash_repartition_second (id int, sum int, avg float);
|
||||
CREATE TABLE ref_table (id int, sum int, avg float);
|
||||
SELECT create_distributed_table('single_hash_repartition_first', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('single_hash_repartition_second', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('ref_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- single hash repartition after bcast joins
|
||||
|
@ -86,7 +86,7 @@ FROM
|
|||
ref_table r1, single_hash_repartition_second t1, single_hash_repartition_first t2
|
||||
WHERE
|
||||
r1.id = t1.id AND t2.sum = t1.id;
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
-> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
|
||||
|
@ -104,7 +104,7 @@ FROM
|
|||
single_hash_repartition_first t1, single_hash_repartition_first t2, single_hash_repartition_second t3
|
||||
WHERE
|
||||
t1.id = t2.id AND t1.sum = t3.id;
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
-> Custom Scan (Citus Adaptive) (cost=0.00..0.00 rows=0 width=0)
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset
|
||||
-- adding the same node again should return the existing nodeid
|
||||
SELECT master_add_node('localhost', :master_port, groupid => 0) = :master_nodeid;
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -15,8 +15,8 @@ ERROR: group 0 already has a primary node
|
|||
-- start_metadata_sync_to_node() for coordinator should raise a notice
|
||||
SELECT start_metadata_sync_to_node('localhost', :master_port);
|
||||
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
|
||||
start_metadata_sync_to_node
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -39,47 +39,47 @@ create aggregate sum2_strict (int) (
|
|||
combinefunc = sum2_sfunc_strict
|
||||
);
|
||||
select create_distributed_function('sum2(int)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
select create_distributed_function('sum2_strict(int)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
create table aggdata (id int, key int, val int, valf float8);
|
||||
select create_distributed_table('aggdata', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
insert into aggdata (id, key, val, valf) values (1, 1, 2, 11.2), (2, 1, NULL, 2.1), (3, 2, 2, 3.22), (4, 2, 3, 4.23), (5, 2, 5, 5.25), (6, 3, 4, 63.4), (7, 5, NULL, 75), (8, 6, NULL, NULL), (9, 6, NULL, 96), (10, 7, 8, 1078), (11, 9, 0, 1.19);
|
||||
select key, sum2(val), sum2_strict(val), stddev(valf) from aggdata group by key order by key;
|
||||
key | sum2 | sum2_strict | stddev
|
||||
key | sum2 | sum2_strict | stddev
|
||||
---------------------------------------------------------------------
|
||||
1 | | 4 | 6.43467170879758
|
||||
2 | 20 | 20 | 1.01500410508201
|
||||
3 | 8 | 8 |
|
||||
5 | | |
|
||||
6 | | |
|
||||
7 | 16 | 16 |
|
||||
9 | 0 | 0 |
|
||||
3 | 8 | 8 |
|
||||
5 | | |
|
||||
6 | | |
|
||||
7 | 16 | 16 |
|
||||
9 | 0 | 0 |
|
||||
(7 rows)
|
||||
|
||||
-- FILTER supported
|
||||
select key, sum2(val) filter (where valf < 5), sum2_strict(val) filter (where valf < 5) from aggdata group by key order by key;
|
||||
key | sum2 | sum2_strict
|
||||
key | sum2 | sum2_strict
|
||||
---------------------------------------------------------------------
|
||||
1 | |
|
||||
1 | |
|
||||
2 | 10 | 10
|
||||
3 | 0 |
|
||||
5 | 0 |
|
||||
6 | 0 |
|
||||
7 | 0 |
|
||||
3 | 0 |
|
||||
5 | 0 |
|
||||
6 | 0 |
|
||||
7 | 0 |
|
||||
9 | 0 | 0
|
||||
(7 rows)
|
||||
|
||||
|
@ -88,17 +88,17 @@ select key, sum2(distinct val), sum2_strict(distinct val) from aggdata group by
|
|||
ERROR: cannot compute aggregate (distinct)
|
||||
DETAIL: table partitioning is unsuitable for aggregate (distinct)
|
||||
select id, sum2(distinct val), sum2_strict(distinct val) from aggdata group by id order by id;
|
||||
id | sum2 | sum2_strict
|
||||
id | sum2 | sum2_strict
|
||||
---------------------------------------------------------------------
|
||||
1 | 4 | 4
|
||||
2 | |
|
||||
2 | |
|
||||
3 | 4 | 4
|
||||
4 | 6 | 6
|
||||
5 | 10 | 10
|
||||
6 | 8 | 8
|
||||
7 | |
|
||||
8 | |
|
||||
9 | |
|
||||
7 | |
|
||||
8 | |
|
||||
9 | |
|
||||
10 | 16 | 16
|
||||
11 | 0 | 0
|
||||
(11 rows)
|
||||
|
@ -108,9 +108,9 @@ select key, sum2(val order by valf), sum2_strict(val order by valf) from aggdata
|
|||
ERROR: unsupported aggregate function sum2
|
||||
-- Test handling a lack of intermediate results
|
||||
select sum2(val), sum2_strict(val) from aggdata where valf = 0;
|
||||
sum2 | sum2_strict
|
||||
sum2 | sum2_strict
|
||||
---------------------------------------------------------------------
|
||||
0 |
|
||||
0 |
|
||||
(1 row)
|
||||
|
||||
-- test polymorphic aggregates from https://github.com/citusdata/citus/issues/2397
|
||||
|
@ -136,15 +136,15 @@ CREATE AGGREGATE last (
|
|||
combinefunc = last_agg
|
||||
);
|
||||
SELECT create_distributed_function('first(anyelement)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('last(anyelement)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT key, first(val ORDER BY id), last(val ORDER BY id)
|
||||
|
@ -153,17 +153,17 @@ ERROR: unsupported aggregate function first
|
|||
-- However, GROUP BY on distribution column gets pushed down
|
||||
SELECT id, first(val ORDER BY key), last(val ORDER BY key)
|
||||
FROM aggdata GROUP BY id ORDER BY id;
|
||||
id | first | last
|
||||
id | first | last
|
||||
---------------------------------------------------------------------
|
||||
1 | 2 | 2
|
||||
2 | |
|
||||
2 | |
|
||||
3 | 2 | 2
|
||||
4 | 3 | 3
|
||||
5 | 5 | 5
|
||||
6 | 4 | 4
|
||||
7 | |
|
||||
8 | |
|
||||
9 | |
|
||||
7 | |
|
||||
8 | |
|
||||
9 | |
|
||||
10 | 8 | 8
|
||||
11 | 0 | 0
|
||||
(11 rows)
|
||||
|
@ -189,13 +189,13 @@ select sumstring(valf::text) from aggdata where valf is not null;
|
|||
ERROR: function "aggregate_support.sumstring(text)" does not exist
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
select create_distributed_function('sumstring(text)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
select sumstring(valf::text) from aggdata where valf is not null;
|
||||
sumstring
|
||||
sumstring
|
||||
---------------------------------------------------------------------
|
||||
1339.59
|
||||
(1 row)
|
||||
|
@ -213,13 +213,13 @@ create aggregate array_collect_sort(el int) (
|
|||
initcond = '{}'
|
||||
);
|
||||
select create_distributed_function('array_collect_sort(int)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
select array_collect_sort(val) from aggdata;
|
||||
array_collect_sort
|
||||
array_collect_sort
|
||||
---------------------------------------------------------------------
|
||||
{0,2,2,3,4,5,8,NULL,NULL,NULL,NULL}
|
||||
(1 row)
|
||||
|
@ -229,7 +229,7 @@ create user notsuper;
|
|||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
select run_command_on_workers($$create user notsuper$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
|
@ -241,7 +241,7 @@ select run_command_on_workers($$
|
|||
grant all on schema aggregate_support to notsuper;
|
||||
grant all on all tables in schema aggregate_support to notsuper;
|
||||
$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,GRANT)
|
||||
(localhost,57638,t,GRANT)
|
||||
|
@ -249,7 +249,7 @@ $$);
|
|||
|
||||
set role notsuper;
|
||||
select array_collect_sort(val) from aggdata;
|
||||
array_collect_sort
|
||||
array_collect_sort
|
||||
---------------------------------------------------------------------
|
||||
{0,2,2,3,4,5,8,NULL,NULL,NULL,NULL}
|
||||
(1 row)
|
||||
|
|
|
@ -4,7 +4,7 @@ CREATE ROLE alter_role_1 WITH LOGIN;
|
|||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
SELECT run_command_on_workers($$CREATE ROLE alter_role_1 WITH LOGIN;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
|
@ -14,30 +14,30 @@ SELECT run_command_on_workers($$CREATE ROLE alter_role_1 WITH LOGIN;$$);
|
|||
ALTER ROLE alter_role_1 WITH SUPERUSER NOSUPERUSER;
|
||||
ERROR: conflicting or redundant options
|
||||
-- make sure that we propagate all options accurately
|
||||
ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05';
|
||||
ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05';
|
||||
SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
row
|
||||
row
|
||||
---------------------------------------------------------------------
|
||||
(alter_role_1,t,t,t,t,t,t,t,66,,2032)
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)")
|
||||
(localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)")
|
||||
(2 rows)
|
||||
|
||||
-- make sure that we propagate all options accurately
|
||||
ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05';
|
||||
ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05';
|
||||
SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
row
|
||||
row
|
||||
---------------------------------------------------------------------
|
||||
(alter_role_1,f,f,f,f,f,f,f,0,,2052)
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)")
|
||||
(localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)")
|
||||
|
@ -46,18 +46,18 @@ SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcr
|
|||
-- make sure that non-existent users are handled properly
|
||||
ALTER ROLE alter_role_2 WITH SUPERUSER NOSUPERUSER;
|
||||
ERROR: conflicting or redundant options
|
||||
ALTER ROLE alter_role_2 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05';
|
||||
ALTER ROLE alter_role_2 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05';
|
||||
ERROR: role "alter_role_2" does not exist
|
||||
-- make sure that CURRENT_USER just works fine
|
||||
ALTER ROLE CURRENT_USER WITH CONNECTION LIMIT 123;
|
||||
SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER;
|
||||
rolconnlimit
|
||||
rolconnlimit
|
||||
---------------------------------------------------------------------
|
||||
123
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,123)
|
||||
(localhost,57638,t,123)
|
||||
|
@ -66,13 +66,13 @@ SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname
|
|||
-- make sure that SESSION_USER just works fine
|
||||
ALTER ROLE SESSION_USER WITH CONNECTION LIMIT 124;
|
||||
SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER;
|
||||
rolconnlimit
|
||||
rolconnlimit
|
||||
---------------------------------------------------------------------
|
||||
124
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,124)
|
||||
(localhost,57638,t,124)
|
||||
|
@ -81,13 +81,13 @@ SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname
|
|||
-- now lets test the passwords in more detail
|
||||
ALTER ROLE alter_role_1 WITH PASSWORD NULL;
|
||||
SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,t)
|
||||
(localhost,57638,t,t)
|
||||
|
@ -95,13 +95,13 @@ SELECT run_command_on_workers($$SELECT rolpassword is NULL FROM pg_authid WHERE
|
|||
|
||||
ALTER ROLE alter_role_1 WITH PASSWORD 'test1';
|
||||
SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
rolpassword
|
||||
rolpassword
|
||||
---------------------------------------------------------------------
|
||||
md52f9cc8d65e37edcc45c4a489bdfc699d
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,md52f9cc8d65e37edcc45c4a489bdfc699d)
|
||||
(localhost,57638,t,md52f9cc8d65e37edcc45c4a489bdfc699d)
|
||||
|
@ -109,13 +109,13 @@ SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname
|
|||
|
||||
ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'test2';
|
||||
SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
rolpassword
|
||||
rolpassword
|
||||
---------------------------------------------------------------------
|
||||
md5e17f7818c5ec023fa87bdb97fd3e842e
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,md5e17f7818c5ec023fa87bdb97fd3e842e)
|
||||
(localhost,57638,t,md5e17f7818c5ec023fa87bdb97fd3e842e)
|
||||
|
@ -123,13 +123,13 @@ SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname
|
|||
|
||||
ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'md59cce240038b7b335c6aa9674a6f13e72';
|
||||
SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
rolpassword
|
||||
rolpassword
|
||||
---------------------------------------------------------------------
|
||||
md59cce240038b7b335c6aa9674a6f13e72
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,md59cce240038b7b335c6aa9674a6f13e72)
|
||||
(localhost,57638,t,md59cce240038b7b335c6aa9674a6f13e72)
|
||||
|
@ -140,7 +140,7 @@ CREATE ROLE "alter_role'1" WITH LOGIN;
|
|||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
SELECT run_command_on_workers($$CREATE ROLE "alter_role'1" WITH LOGIN;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
|
@ -148,13 +148,13 @@ SELECT run_command_on_workers($$CREATE ROLE "alter_role'1" WITH LOGIN;$$);
|
|||
|
||||
ALTER ROLE "alter_role'1" CREATEROLE;
|
||||
SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1';
|
||||
rolcreaterole
|
||||
rolcreaterole
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,t)
|
||||
(localhost,57638,t,t)
|
||||
|
@ -164,7 +164,7 @@ CREATE ROLE "alter_role""1" WITH LOGIN;
|
|||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
SELECT run_command_on_workers($$CREATE ROLE "alter_role""1" WITH LOGIN;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
|
@ -172,66 +172,66 @@ SELECT run_command_on_workers($$CREATE ROLE "alter_role""1" WITH LOGIN;$$);
|
|||
|
||||
ALTER ROLE "alter_role""1" CREATEROLE;
|
||||
SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1';
|
||||
rolcreaterole
|
||||
rolcreaterole
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,t)
|
||||
(localhost,57638,t,t)
|
||||
(2 rows)
|
||||
|
||||
-- add node
|
||||
ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05' PASSWORD 'test3';
|
||||
ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05' PASSWORD 'test3';
|
||||
SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
row
|
||||
row
|
||||
---------------------------------------------------------------------
|
||||
(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)")
|
||||
(localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)")
|
||||
(2 rows)
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_1_port);
|
||||
master_remove_node
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05' PASSWORD 'test4';
|
||||
ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05' PASSWORD 'test4';
|
||||
SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
row
|
||||
row
|
||||
---------------------------------------------------------------------
|
||||
(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)")
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
row
|
||||
row
|
||||
---------------------------------------------------------------------
|
||||
(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)")
|
||||
(localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)")
|
||||
|
|
|
@ -2,14 +2,14 @@
|
|||
-- Setup MX data syncing
|
||||
--
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
start_metadata_sync_to_node
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
start_metadata_sync_to_node
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -3,21 +3,21 @@ CREATE SCHEMA bool_agg;
|
|||
SET search_path TO bool_agg;
|
||||
CREATE TABLE bool_test (id int, val int, flag bool, kind int);
|
||||
SELECT create_distributed_table('bool_agg.bool_test','id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO bool_test VALUES (1, 1, true, 99), (2, 2, false, 99), (2, 3, true, 88);
|
||||
-- mix of true and false
|
||||
SELECT bool_and(flag), bool_or(flag), every(flag) FROM bool_test;
|
||||
bool_and | bool_or | every
|
||||
bool_and | bool_or | every
|
||||
---------------------------------------------------------------------
|
||||
f | t | f
|
||||
(1 row)
|
||||
|
||||
SELECT kind, bool_and(flag), bool_or(flag), every(flag) FROM bool_test GROUP BY kind ORDER BY 2;
|
||||
kind | bool_and | bool_or | every
|
||||
kind | bool_and | bool_or | every
|
||||
---------------------------------------------------------------------
|
||||
99 | f | t | f
|
||||
88 | t | t | t
|
||||
|
@ -25,13 +25,13 @@ SELECT kind, bool_and(flag), bool_or(flag), every(flag) FROM bool_test GROUP BY
|
|||
|
||||
-- expressions in aggregate
|
||||
SELECT bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test;
|
||||
bool_or | bool_and
|
||||
bool_or | bool_and
|
||||
---------------------------------------------------------------------
|
||||
t | f
|
||||
(1 row)
|
||||
|
||||
SELECT kind, bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test GROUP BY kind ORDER BY 3;
|
||||
kind | bool_or | bool_and
|
||||
kind | bool_or | bool_and
|
||||
---------------------------------------------------------------------
|
||||
88 | t | f
|
||||
99 | t | t
|
||||
|
@ -39,13 +39,13 @@ SELECT kind, bool_or(val > 2 OR id < 2), bool_and(val < 3) FROM bool_test GROUP
|
|||
|
||||
-- 1 & 3, 1 | 3
|
||||
SELECT bit_and(val), bit_or(val) FROM bool_test WHERE flag;
|
||||
bit_and | bit_or
|
||||
bit_and | bit_or
|
||||
---------------------------------------------------------------------
|
||||
1 | 3
|
||||
(1 row)
|
||||
|
||||
SELECT flag, bit_and(val), bit_or(val) FROM bool_test GROUP BY flag ORDER BY flag;
|
||||
flag | bit_and | bit_or
|
||||
flag | bit_and | bit_or
|
||||
---------------------------------------------------------------------
|
||||
f | 2 | 2
|
||||
t | 1 | 3
|
||||
|
|
|
@ -7,9 +7,9 @@ CREATE TABLE stock (
|
|||
s_order_cnt int NOT NULL
|
||||
);
|
||||
SELECT create_distributed_table('stock','s_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
explain (costs false, summary false, timing false)
|
||||
|
@ -19,7 +19,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock)
|
|||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock)
|
||||
order by s_i_id;
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Sort
|
||||
Sort Key: s_i_id
|
||||
|
@ -65,7 +65,7 @@ from stock
|
|||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock)
|
||||
order by s_i_id;
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Sort
|
||||
Sort Key: s_i_id
|
||||
|
@ -98,7 +98,7 @@ select s_i_id, sum(s_order_cnt) as ordercount
|
|||
from stock
|
||||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Group Key: s_i_id
|
||||
|
@ -129,7 +129,7 @@ from stock s
|
|||
group by s_i_id
|
||||
having (select true)
|
||||
order by s_i_id;
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Sort (cost=0.00..0.00 rows=0 width=0)
|
||||
Sort Key: remote_scan.s_i_id
|
||||
|
@ -152,7 +152,7 @@ explain select s_i_id, sum(s_order_cnt) as ordercount
|
|||
from stock s
|
||||
group by s_i_id
|
||||
having (select true);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
Group Key: remote_scan.s_i_id
|
||||
|
@ -175,7 +175,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock)
|
|||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -186,7 +186,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock)
|
|||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
4 | 4
|
||||
|
@ -198,7 +198,7 @@ from stock
|
|||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
4 | 4
|
||||
|
@ -211,7 +211,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock)
|
|||
group by s_i_id
|
||||
having (select true)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
|
@ -226,7 +226,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock)
|
|||
group by s_i_id
|
||||
having (select false)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -235,7 +235,7 @@ from stock s
|
|||
group by s_i_id
|
||||
having (select true)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
|
@ -249,7 +249,7 @@ from stock s
|
|||
group by s_i_id
|
||||
having (select false)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -258,7 +258,7 @@ from stock s
|
|||
group by s_i_id
|
||||
having (select true)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
|
@ -309,9 +309,9 @@ insert into stock VALUES
|
|||
(32, 1, 1, 1, 1, 1, '', '','','','','','','','','','');
|
||||
SELECT create_distributed_table('stock','s_w_id');
|
||||
NOTICE: Copying data from local table...
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
select s_i_id, sum(s_order_cnt) as ordercount
|
||||
|
@ -327,7 +327,7 @@ having sum(s_order_cnt) >
|
|||
and s_nationkey = n_nationkey
|
||||
and n_name = 'GERMANY')
|
||||
order by ordercount desc;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
33 | 1
|
||||
1 | 1
|
||||
|
@ -348,7 +348,7 @@ having sum(s_order_cnt) >
|
|||
and s_nationkey = n_nationkey
|
||||
and n_name = 'GERMANY')
|
||||
order by ordercount desc;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
1 | 100001
|
||||
(1 row)
|
||||
|
|
|
@ -10,9 +10,9 @@ CREATE TABLE stock (
|
|||
s_order_cnt int NOT NULL
|
||||
);
|
||||
SELECT create_distributed_table('stock','s_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
|
@ -24,7 +24,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock)
|
|||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock)
|
||||
order by s_i_id;
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Sort
|
||||
Sort Key: s_i_id
|
||||
|
@ -70,7 +70,7 @@ from stock
|
|||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock)
|
||||
order by s_i_id;
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Sort
|
||||
Sort Key: s_i_id
|
||||
|
@ -103,7 +103,7 @@ select s_i_id, sum(s_order_cnt) as ordercount
|
|||
from stock
|
||||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Group Key: s_i_id
|
||||
|
@ -134,7 +134,7 @@ from stock s
|
|||
group by s_i_id
|
||||
having (select true)
|
||||
order by s_i_id;
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Sort (cost=0.00..0.00 rows=0 width=0)
|
||||
Sort Key: remote_scan.s_i_id
|
||||
|
@ -157,7 +157,7 @@ explain select s_i_id, sum(s_order_cnt) as ordercount
|
|||
from stock s
|
||||
group by s_i_id
|
||||
having (select true);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
Group Key: remote_scan.s_i_id
|
||||
|
@ -180,7 +180,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock)
|
|||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -191,7 +191,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock)
|
|||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
4 | 4
|
||||
|
@ -203,7 +203,7 @@ from stock
|
|||
group by s_i_id
|
||||
having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from stock)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
4 | 4
|
||||
|
@ -216,7 +216,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock)
|
|||
group by s_i_id
|
||||
having (select true)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
|
@ -231,7 +231,7 @@ where s_order_cnt > (select sum(s_order_cnt) * .005 as where_query from stock)
|
|||
group by s_i_id
|
||||
having (select false)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -240,7 +240,7 @@ from stock s
|
|||
group by s_i_id
|
||||
having (select true)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
|
@ -254,7 +254,7 @@ from stock s
|
|||
group by s_i_id
|
||||
having (select false)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -263,7 +263,7 @@ from stock s
|
|||
group by s_i_id
|
||||
having (select true)
|
||||
order by s_i_id;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
|
@ -319,9 +319,9 @@ insert into stock VALUES
|
|||
(32, 1, 1, 1, 1, 1, '', '','','','','','','','','','');
|
||||
SELECT create_distributed_table('stock','s_w_id');
|
||||
NOTICE: Copying data from local table...
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
|
@ -339,7 +339,7 @@ having sum(s_order_cnt) >
|
|||
and s_nationkey = n_nationkey
|
||||
and n_name = 'GERMANY')
|
||||
order by ordercount desc;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
33 | 1
|
||||
1 | 1
|
||||
|
@ -360,7 +360,7 @@ having sum(s_order_cnt) >
|
|||
and s_nationkey = n_nationkey
|
||||
and n_name = 'GERMANY')
|
||||
order by ordercount desc;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
1 | 100001
|
||||
(1 row)
|
||||
|
|
|
@ -61,33 +61,33 @@ create table supplier (
|
|||
PRIMARY KEY ( su_suppkey )
|
||||
);
|
||||
SELECT create_distributed_table('order_line','ol_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('stock','s_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('item');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('nation');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('supplier');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO order_line SELECT c, c, c, c, c, NULL, c, c, c, 'abc' FROM generate_series(1, 10) as c;
|
||||
|
@ -102,7 +102,7 @@ select s_i_id
|
|||
s_i_id in (select i_id from item)
|
||||
AND s_i_id = ol_i_id
|
||||
order by s_i_id;
|
||||
s_i_id
|
||||
s_i_id
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
2
|
||||
|
@ -150,7 +150,7 @@ where su_suppkey in
|
|||
and su_nationkey = n_nationkey
|
||||
and n_name = 'Germany'
|
||||
order by su_name;
|
||||
su_name | su_address
|
||||
su_name | su_address
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -184,7 +184,7 @@ where s_suppkey in
|
|||
and s_nationkey = n_nationkey
|
||||
and n_name = 'GERMANY'
|
||||
order by s_name;
|
||||
s_name | s_address
|
||||
s_name | s_address
|
||||
---------------------------------------------------------------------
|
||||
Supplier#000000033 | gfeKpYw3400L0SDywXA6Ya1Qmq1w6YB9f3R
|
||||
(1 row)
|
||||
|
@ -205,7 +205,7 @@ where s_suppkey in
|
|||
and s_nationkey = n_nationkey
|
||||
and n_name = 'GERMANY'
|
||||
order by s_name;
|
||||
s_name | s_address
|
||||
s_name | s_address
|
||||
---------------------------------------------------------------------
|
||||
Supplier#000000033 | gfeKpYw3400L0SDywXA6Ya1Qmq1w6YB9f3R
|
||||
Supplier#000000044 | kERxlLDnlIZJdN66zAPHklyL
|
||||
|
|
|
@ -145,75 +145,75 @@ CREATE TABLE supplier (
|
|||
PRIMARY KEY ( su_suppkey )
|
||||
);
|
||||
SELECT create_distributed_table('order_line','ol_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('new_order','no_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('stock','s_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('oorder','o_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('history','h_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('customer','c_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('district','d_w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('warehouse','w_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('item');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('region');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('nation');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('supplier');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
TRUNCATE order_line, new_order, stock, oorder, history, customer, district, warehouse, item, region, nation, supplier; -- for easy copy in development
|
||||
|
@ -245,7 +245,7 @@ FROM order_line
|
|||
WHERE ol_delivery_d > '2007-01-02 00:00:00.000000'
|
||||
GROUP BY ol_number
|
||||
ORDER BY ol_number;
|
||||
ol_number | sum_qty | sum_amount | avg_qty | avg_amount | count_order
|
||||
ol_number | sum_qty | sum_amount | avg_qty | avg_amount | count_order
|
||||
---------------------------------------------------------------------
|
||||
0 | 0 | 0.00 | 0.00000000000000000000 | 0.00000000000000000000 | 1
|
||||
1 | 1 | 1.00 | 1.00000000000000000000 | 1.00000000000000000000 | 1
|
||||
|
@ -301,10 +301,10 @@ ORDER BY
|
|||
n_name,
|
||||
su_name,
|
||||
i_id;
|
||||
su_suppkey | su_name | n_name | i_id | i_name | su_address | su_phone | su_comment
|
||||
su_suppkey | su_name | n_name | i_id | i_name | su_address | su_phone | su_comment
|
||||
---------------------------------------------------------------------
|
||||
9 | abc | Germany | 3 | Keyboard | def | ghi | jkl
|
||||
4 | abc | The Netherlands | 2 | Keyboard | def | ghi | jkl
|
||||
9 | abc | Germany | 3 | Keyboard | def | ghi | jkl
|
||||
4 | abc | The Netherlands | 2 | Keyboard | def | ghi | jkl
|
||||
(2 rows)
|
||||
|
||||
-- Query 3
|
||||
|
@ -338,7 +338,7 @@ GROUP BY
|
|||
ORDER BY
|
||||
revenue DESC,
|
||||
o_entry_d;
|
||||
ol_o_id | ol_w_id | ol_d_id | revenue | o_entry_d
|
||||
ol_o_id | ol_w_id | ol_d_id | revenue | o_entry_d
|
||||
---------------------------------------------------------------------
|
||||
10 | 10 | 10 | 10.00 | Fri Oct 17 00:00:00 2008
|
||||
9 | 9 | 9 | 9.00 | Fri Oct 17 00:00:00 2008
|
||||
|
@ -369,7 +369,7 @@ WHERE o_entry_d >= '2007-01-02 00:00:00.000000'
|
|||
AND ol_delivery_d >= o_entry_d)
|
||||
GROUP BY o_ol_cnt
|
||||
ORDER BY o_ol_cnt;
|
||||
o_ol_cnt | order_count
|
||||
o_ol_cnt | order_count
|
||||
---------------------------------------------------------------------
|
||||
1 | 11
|
||||
(1 row)
|
||||
|
@ -406,7 +406,7 @@ WHERE c_id = o_c_id
|
|||
AND o_entry_d >= '2007-01-02 00:00:00.000000'
|
||||
GROUP BY n_name
|
||||
ORDER BY revenue DESC;
|
||||
n_name | revenue
|
||||
n_name | revenue
|
||||
---------------------------------------------------------------------
|
||||
Germany | 3.00
|
||||
The Netherlands | 2.00
|
||||
|
@ -419,7 +419,7 @@ FROM order_line
|
|||
WHERE ol_delivery_d >= '1999-01-01 00:00:00.000000'
|
||||
AND ol_delivery_d < '2020-01-01 00:00:00.000000'
|
||||
AND ol_quantity BETWEEN 1 AND 100000;
|
||||
revenue
|
||||
revenue
|
||||
---------------------------------------------------------------------
|
||||
55.00
|
||||
(1 row)
|
||||
|
@ -462,7 +462,7 @@ ORDER BY
|
|||
su_nationkey,
|
||||
cust_nation,
|
||||
l_year;
|
||||
supp_nation | cust_nation | l_year | revenue
|
||||
supp_nation | cust_nation | l_year | revenue
|
||||
---------------------------------------------------------------------
|
||||
9 | C | 2008 | 3.00
|
||||
(1 row)
|
||||
|
@ -501,7 +501,7 @@ WHERE i_id = s_i_id
|
|||
AND i_id = ol_i_id
|
||||
GROUP BY extract(YEAR FROM o_entry_d)
|
||||
ORDER BY l_year;
|
||||
l_year | mkt_share
|
||||
l_year | mkt_share
|
||||
---------------------------------------------------------------------
|
||||
2008 | 0.50000000000000000000
|
||||
(1 row)
|
||||
|
@ -533,7 +533,7 @@ GROUP BY
|
|||
ORDER BY
|
||||
n_name,
|
||||
l_year DESC;
|
||||
n_name | l_year | sum_profit
|
||||
n_name | l_year | sum_profit
|
||||
---------------------------------------------------------------------
|
||||
Germany | 2008 | 3.00
|
||||
The Netherlands | 2008 | 2.00
|
||||
|
@ -569,19 +569,19 @@ GROUP BY
|
|||
c_phone,
|
||||
n_name
|
||||
ORDER BY revenue DESC;
|
||||
c_id | c_last | revenue | c_city | c_phone | n_name
|
||||
c_id | c_last | revenue | c_city | c_phone | n_name
|
||||
---------------------------------------------------------------------
|
||||
10 | John | 10.00 | Some City | +1 000 0000000 | Cambodia
|
||||
9 | John | 9.00 | Some City | +1 000 0000000 | Cambodia
|
||||
8 | John | 8.00 | Some City | +1 000 0000000 | Cambodia
|
||||
7 | John | 7.00 | Some City | +1 000 0000000 | Cambodia
|
||||
6 | John | 6.00 | Some City | +1 000 0000000 | Cambodia
|
||||
5 | John | 5.00 | Some City | +1 000 0000000 | Cambodia
|
||||
4 | John | 4.00 | Some City | +1 000 0000000 | Cambodia
|
||||
3 | John | 3.00 | Some City | +1 000 0000000 | Cambodia
|
||||
2 | John | 2.00 | Some City | +1 000 0000000 | Cambodia
|
||||
1 | John | 1.00 | Some City | +1 000 0000000 | Cambodia
|
||||
0 | John | 0.00 | Some City | +1 000 0000000 | Cambodia
|
||||
10 | John | 10.00 | Some City | +1 000 0000000 | Cambodia
|
||||
9 | John | 9.00 | Some City | +1 000 0000000 | Cambodia
|
||||
8 | John | 8.00 | Some City | +1 000 0000000 | Cambodia
|
||||
7 | John | 7.00 | Some City | +1 000 0000000 | Cambodia
|
||||
6 | John | 6.00 | Some City | +1 000 0000000 | Cambodia
|
||||
5 | John | 5.00 | Some City | +1 000 0000000 | Cambodia
|
||||
4 | John | 4.00 | Some City | +1 000 0000000 | Cambodia
|
||||
3 | John | 3.00 | Some City | +1 000 0000000 | Cambodia
|
||||
2 | John | 2.00 | Some City | +1 000 0000000 | Cambodia
|
||||
1 | John | 1.00 | Some City | +1 000 0000000 | Cambodia
|
||||
0 | John | 0.00 | Some City | +1 000 0000000 | Cambodia
|
||||
(11 rows)
|
||||
|
||||
-- Query 11
|
||||
|
@ -606,7 +606,7 @@ HAVING sum(s_order_cnt) >
|
|||
AND su_nationkey = n_nationkey
|
||||
AND n_name = 'Germany')
|
||||
ORDER BY ordercount DESC;
|
||||
s_i_id | ordercount
|
||||
s_i_id | ordercount
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
@ -626,7 +626,7 @@ WHERE ol_w_id = o_w_id
|
|||
AND ol_delivery_d < '2020-01-01 00:00:00.000000'
|
||||
GROUP BY o_ol_cnt
|
||||
ORDER BY o_ol_cnt;
|
||||
o_ol_cnt | high_line_count | low_line_count
|
||||
o_ol_cnt | high_line_count | low_line_count
|
||||
---------------------------------------------------------------------
|
||||
1 | 2 | 9
|
||||
(1 row)
|
||||
|
@ -649,7 +649,7 @@ GROUP BY c_count
|
|||
ORDER BY
|
||||
custdist DESC,
|
||||
c_count DESC;
|
||||
c_count | custdist
|
||||
c_count | custdist
|
||||
---------------------------------------------------------------------
|
||||
0 | 9
|
||||
1 | 2
|
||||
|
@ -664,7 +664,7 @@ FROM
|
|||
WHERE ol_i_id = i_id
|
||||
AND ol_delivery_d >= '2007-01-02 00:00:00.000000'
|
||||
AND ol_delivery_d < '2020-01-02 00:00:00.000000';
|
||||
promo_revenue
|
||||
promo_revenue
|
||||
---------------------------------------------------------------------
|
||||
0.00000000000000000000
|
||||
(1 row)
|
||||
|
@ -693,7 +693,7 @@ FROM
|
|||
WHERE su_suppkey = supplier_no
|
||||
AND total_revenue = (SELECT max(total_revenue) FROM revenue)
|
||||
ORDER BY su_suppkey;
|
||||
su_suppkey | su_name | su_address | su_phone | total_revenue
|
||||
su_suppkey | su_name | su_address | su_phone | total_revenue
|
||||
---------------------------------------------------------------------
|
||||
9 | abc | def | ghi | 3.00
|
||||
(1 row)
|
||||
|
@ -718,7 +718,7 @@ GROUP BY
|
|||
substr(i_data, 1, 3),
|
||||
i_price
|
||||
ORDER BY supplier_cnt DESC;
|
||||
i_name | brand | i_price | supplier_cnt
|
||||
i_name | brand | i_price | supplier_cnt
|
||||
---------------------------------------------------------------------
|
||||
Keyboard | co | 50.00 | 3
|
||||
(1 row)
|
||||
|
@ -738,7 +738,7 @@ FROM
|
|||
AND ol_i_id = i_id
|
||||
GROUP BY i_id) t
|
||||
WHERE ol_i_id = t.i_id;
|
||||
avg_yearly
|
||||
avg_yearly
|
||||
---------------------------------------------------------------------
|
||||
27.5000000000000000
|
||||
(1 row)
|
||||
|
@ -775,7 +775,7 @@ HAVING sum(ol_amount) > 5 -- was 200, but thats too big for the dataset
|
|||
ORDER BY
|
||||
sum(ol_amount) DESC,
|
||||
o_entry_d;
|
||||
c_last | o_id | o_entry_d | o_ol_cnt | sum
|
||||
c_last | o_id | o_entry_d | o_ol_cnt | sum
|
||||
---------------------------------------------------------------------
|
||||
John | 10 | Fri Oct 17 00:00:00 2008 | 1 | 10.00
|
||||
John | 9 | Fri Oct 17 00:00:00 2008 | 1 | 9.00
|
||||
|
@ -808,7 +808,7 @@ WHERE ( ol_i_id = i_id
|
|||
AND ol_quantity <= 10
|
||||
AND i_price BETWEEN 1 AND 400000
|
||||
AND ol_w_id IN (1,5,3));
|
||||
revenue
|
||||
revenue
|
||||
---------------------------------------------------------------------
|
||||
7.00
|
||||
(1 row)
|
||||
|
@ -837,7 +837,7 @@ WHERE su_suppkey in
|
|||
AND su_nationkey = n_nationkey
|
||||
AND n_name = 'Germany'
|
||||
ORDER BY su_name;
|
||||
su_name | su_address
|
||||
su_name | su_address
|
||||
---------------------------------------------------------------------
|
||||
abc | def
|
||||
(1 row)
|
||||
|
@ -872,7 +872,7 @@ GROUP BY su_name
|
|||
ORDER BY
|
||||
numwait desc,
|
||||
su_name;
|
||||
su_name | numwait
|
||||
su_name | numwait
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -895,7 +895,7 @@ WHERE substr(c_phone,1,1) in ('1','2','3','4','5','6','7')
|
|||
AND o_d_id = c_d_id)
|
||||
GROUP BY substr(c_state,1,1)
|
||||
ORDER BY substr(c_state,1,1);
|
||||
country | numcust | totacctbal
|
||||
country | numcust | totacctbal
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
|
|
@ -4,14 +4,14 @@ SET search_path TO coordinator_shouldhaveshards;
|
|||
-- idempotently add node to allow this test to run without add_coordinator
|
||||
SET client_min_messages TO WARNING;
|
||||
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
RESET client_min_messages;
|
||||
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -19,14 +19,14 @@ SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhavesha
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE test (x int, y int);
|
||||
SELECT create_distributed_table('test','x', colocate_with := 'none');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard JOIN pg_dist_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test'::regclass AND groupid = 0;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
@ -36,20 +36,20 @@ INSERT INTO test SELECT s,s FROM generate_series(2,100) s;
|
|||
-- router queries execute locally
|
||||
INSERT INTO test VALUES (1, 1);
|
||||
SELECT y FROM test WHERE x = 1;
|
||||
y
|
||||
y
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- multi-shard queries connect to localhost
|
||||
SELECT count(*) FROM test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
100
|
||||
(1 row)
|
||||
|
||||
WITH a AS (SELECT * FROM test) SELECT count(*) FROM test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
100
|
||||
(1 row)
|
||||
|
@ -57,13 +57,13 @@ WITH a AS (SELECT * FROM test) SELECT count(*) FROM test;
|
|||
-- multi-shard queries in transaction blocks execute locally
|
||||
BEGIN;
|
||||
SELECT y FROM test WHERE x = 1;
|
||||
y
|
||||
y
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
100
|
||||
(1 row)
|
||||
|
@ -71,13 +71,13 @@ SELECT count(*) FROM test;
|
|||
END;
|
||||
BEGIN;
|
||||
SELECT y FROM test WHERE x = 1;
|
||||
y
|
||||
y
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
100
|
||||
(1 row)
|
||||
|
@ -88,7 +88,7 @@ ALTER TABLE test ADD COLUMN z int;
|
|||
-- DDL after local execution
|
||||
BEGIN;
|
||||
SELECT y FROM test WHERE x = 1;
|
||||
y
|
||||
y
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -101,7 +101,7 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
ALTER TABLE test DROP COLUMN z;
|
||||
SELECT y FROM test WHERE x = 1;
|
||||
y
|
||||
y
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -111,7 +111,7 @@ DELETE FROM test;
|
|||
DROP TABLE test;
|
||||
DROP SCHEMA coordinator_shouldhaveshards CASCADE;
|
||||
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', false);
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
|
|
@ -4,18 +4,18 @@ CREATE TABLE tt1(id int, value_1 int);
|
|||
INSERT INTO tt1 VALUES(1,2),(2,3),(3,4);
|
||||
SELECT create_distributed_table('tt1','id');
|
||||
NOTICE: Copying data from local table...
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE tt2(id int, value_1 int);
|
||||
INSERT INTO tt2 VALUES(3,3),(4,4),(5,5);
|
||||
SELECT create_distributed_table('tt2','id');
|
||||
NOTICE: Copying data from local table...
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE tt3(id int, json_val json);
|
||||
|
@ -40,7 +40,7 @@ SET value_1 = abs(2 + 3.5)
|
|||
FROM cte_1
|
||||
WHERE cte_1.id = tt1.id;
|
||||
SELECT * FROM tt1 ORDER BY id;
|
||||
id | value_1
|
||||
id | value_1
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
2 | 6
|
||||
|
@ -64,7 +64,7 @@ WITH cte_1 AS (
|
|||
UPDATE tt1
|
||||
SET value_1 = (SELECT max(id) + abs(2 + 3.5) FROM cte_1);
|
||||
SELECT * FROM tt1 ORDER BY id;
|
||||
id | value_1
|
||||
id | value_1
|
||||
---------------------------------------------------------------------
|
||||
1 | 9
|
||||
2 | 9
|
||||
|
@ -88,7 +88,7 @@ WITH cte_1(id) AS (
|
|||
UPDATE tt1
|
||||
SET value_1 = (SELECT max(id) + abs(2 + 3.5) FROM cte_1);
|
||||
SELECT * FROM tt1 ORDER BY id;
|
||||
id | value_1
|
||||
id | value_1
|
||||
---------------------------------------------------------------------
|
||||
1 | 9
|
||||
2 | 9
|
||||
|
@ -114,7 +114,7 @@ DELETE FROM tt1
|
|||
USING cte_1
|
||||
WHERE tt1.id < cte_1.id;
|
||||
SELECT * FROM tt1 ORDER BY id;
|
||||
id | value_1
|
||||
id | value_1
|
||||
---------------------------------------------------------------------
|
||||
3 | 4
|
||||
(1 row)
|
||||
|
@ -134,7 +134,7 @@ DELETE FROM tt1
|
|||
USING cte_1
|
||||
WHERE tt1.id < cte_1.id;
|
||||
SELECT * FROM tt1 ORDER BY id;
|
||||
id | value_1
|
||||
id | value_1
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
|
|
@ -4,18 +4,18 @@ CREATE TABLE tt1(id int, value_1 int);
|
|||
INSERT INTO tt1 VALUES(1,2),(2,3),(3,4);
|
||||
SELECT create_distributed_table('tt1','id');
|
||||
NOTICE: Copying data from local table...
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE tt2(id int, value_1 int);
|
||||
INSERT INTO tt2 VALUES(3,3),(4,4),(5,5);
|
||||
SELECT create_distributed_table('tt2','id');
|
||||
NOTICE: Copying data from local table...
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Test with prepared statements (parameter used by SET)
|
||||
|
|
|
@ -14,15 +14,15 @@ SET citus.shard_count TO 4;
|
|||
CREATE TABLE raw_table (day date, user_id int);
|
||||
CREATE TABLE daily_uniques(day date, unique_users hll);
|
||||
SELECT create_distributed_table('raw_table', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('daily_uniques', 'day');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO raw_table
|
||||
|
@ -38,7 +38,7 @@ SELECT hll_cardinality(hll_union_agg(agg))
|
|||
FROM (
|
||||
SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg
|
||||
FROM raw_table)a;
|
||||
hll_cardinality
|
||||
hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
19
|
||||
(1 row)
|
||||
|
@ -54,7 +54,7 @@ FROM daily_uniques
|
|||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
ORDER BY 2 DESC,1
|
||||
LIMIT 10;
|
||||
day | hll_cardinality
|
||||
day | hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
06-20-2018 | 19
|
||||
06-21-2018 | 19
|
||||
|
@ -72,7 +72,7 @@ LIMIT 10;
|
|||
SELECT hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date;
|
||||
hll_cardinality
|
||||
hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
19
|
||||
(1 row)
|
||||
|
@ -82,7 +82,7 @@ FROM daily_uniques
|
|||
WHERE day >= '2018-06-23' AND day <= '2018-07-01'
|
||||
GROUP BY 1
|
||||
ORDER BY 1;
|
||||
month | hll_cardinality
|
||||
month | hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
6 | 19
|
||||
7 | 13
|
||||
|
@ -108,7 +108,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
|
@ -142,7 +142,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
|
@ -177,7 +177,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
|
@ -211,7 +211,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
|
@ -246,7 +246,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
|
@ -280,7 +280,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
|
@ -315,7 +315,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
|
@ -350,7 +350,7 @@ FROM
|
|||
daily_uniques
|
||||
GROUP BY(1)
|
||||
HAVING hll_cardinality(hll_union_agg(unique_users)) > 1;
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 4
|
||||
|
@ -395,15 +395,15 @@ WHERE name = 'topn'
|
|||
CREATE TABLE customer_reviews (day date, user_id int, review int);
|
||||
CREATE TABLE popular_reviewer(day date, reviewers jsonb);
|
||||
SELECT create_distributed_table('customer_reviews', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('popular_reviewer', 'day');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO customer_reviews
|
||||
|
@ -421,7 +421,7 @@ FROM (
|
|||
FROM customer_reviews
|
||||
)a
|
||||
ORDER BY 2 DESC, 1;
|
||||
item | frequency
|
||||
item | frequency
|
||||
---------------------------------------------------------------------
|
||||
1 | 7843
|
||||
2 | 7843
|
||||
|
@ -446,7 +446,7 @@ FROM popular_reviewer
|
|||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
ORDER BY 3 DESC, 1, 2
|
||||
LIMIT 10;
|
||||
day | item | frequency
|
||||
day | item | frequency
|
||||
---------------------------------------------------------------------
|
||||
06-20-2018 | 1 | 248
|
||||
06-20-2018 | 2 | 248
|
||||
|
@ -468,7 +468,7 @@ FROM (
|
|||
WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date
|
||||
)a
|
||||
ORDER BY 2 DESC, 1;
|
||||
item | frequency
|
||||
item | frequency
|
||||
---------------------------------------------------------------------
|
||||
1 | 1240
|
||||
2 | 1240
|
||||
|
@ -488,7 +488,7 @@ FROM (
|
|||
ORDER BY 1
|
||||
)a
|
||||
ORDER BY 1, 3 DESC, 2;
|
||||
month | item | frequency
|
||||
month | item | frequency
|
||||
---------------------------------------------------------------------
|
||||
6 | 1 | 1054
|
||||
6 | 2 | 1054
|
||||
|
|
|
@ -9,7 +9,7 @@ AS create_cmd FROM pg_available_extensions()
|
|||
WHERE name = 'hll'
|
||||
\gset
|
||||
:create_cmd;
|
||||
hll_present
|
||||
hll_present
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
@ -19,50 +19,50 @@ CREATE TABLE raw_table (day date, user_id int);
|
|||
CREATE TABLE daily_uniques(day date, unique_users hll);
|
||||
ERROR: type "hll" does not exist
|
||||
SELECT create_distributed_table('raw_table', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('daily_uniques', 'day');
|
||||
ERROR: relation "daily_uniques" does not exist
|
||||
INSERT INTO raw_table
|
||||
SELECT day, user_id % 19
|
||||
INSERT INTO raw_table
|
||||
SELECT day, user_id % 19
|
||||
FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,100) as g(user_id);
|
||||
INSERT INTO raw_table
|
||||
SELECT day, user_id % 13
|
||||
FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day),
|
||||
INSERT INTO raw_table
|
||||
SELECT day, user_id % 13
|
||||
FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,100) as g(user_id);
|
||||
-- Run hll on raw data
|
||||
SELECT hll_cardinality(hll_union_agg(agg))
|
||||
SELECT hll_cardinality(hll_union_agg(agg))
|
||||
FROM (
|
||||
SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg
|
||||
SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg
|
||||
FROM raw_table)a;
|
||||
ERROR: function hll_hash_integer(integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- Aggregate the data into daily_uniques
|
||||
INSERT INTO daily_uniques
|
||||
SELECT day, hll_add_agg(hll_hash_integer(user_id))
|
||||
INSERT INTO daily_uniques
|
||||
SELECT day, hll_add_agg(hll_hash_integer(user_id))
|
||||
FROM raw_table
|
||||
GROUP BY 1;
|
||||
ERROR: relation "daily_uniques" does not exist
|
||||
-- Basic hll_cardinality check on aggregated data
|
||||
SELECT day, hll_cardinality(unique_users)
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
ORDER BY 2 DESC,1
|
||||
SELECT day, hll_cardinality(unique_users)
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
ORDER BY 2 DESC,1
|
||||
LIMIT 10;
|
||||
ERROR: relation "daily_uniques" does not exist
|
||||
-- Union aggregated data for one week
|
||||
SELECT hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM daily_uniques
|
||||
SELECT hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date;
|
||||
ERROR: relation "daily_uniques" does not exist
|
||||
SELECT EXTRACT(MONTH FROM day) AS month, hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-06-23' AND day <= '2018-07-01'
|
||||
GROUP BY 1
|
||||
GROUP BY 1
|
||||
ORDER BY 1;
|
||||
ERROR: relation "daily_uniques" does not exist
|
||||
-- These are going to be supported after window function support
|
||||
|
@ -156,7 +156,7 @@ AS create_topn FROM pg_available_extensions()
|
|||
WHERE name = 'topn'
|
||||
\gset
|
||||
:create_topn;
|
||||
topn_present
|
||||
topn_present
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
@ -164,51 +164,51 @@ WHERE name = 'topn'
|
|||
CREATE TABLE customer_reviews (day date, user_id int, review int);
|
||||
CREATE TABLE popular_reviewer(day date, reviewers jsonb);
|
||||
SELECT create_distributed_table('customer_reviews', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('popular_reviewer', 'day');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO customer_reviews
|
||||
INSERT INTO customer_reviews
|
||||
SELECT day, user_id % 7, review % 5
|
||||
FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review);
|
||||
INSERT INTO customer_reviews
|
||||
INSERT INTO customer_reviews
|
||||
SELECT day, user_id % 13, review % 3
|
||||
FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day),
|
||||
FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review);
|
||||
-- Run topn on raw data
|
||||
SELECT (topn(agg, 10)).*
|
||||
FROM (
|
||||
SELECT topn_add_agg(user_id::text) AS agg
|
||||
SELECT topn_add_agg(user_id::text) AS agg
|
||||
FROM customer_reviews
|
||||
)a
|
||||
ORDER BY 2 DESC, 1;
|
||||
ERROR: function topn_add_agg(text) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- Aggregate the data into popular_reviewer
|
||||
INSERT INTO popular_reviewer
|
||||
INSERT INTO popular_reviewer
|
||||
SELECT day, topn_add_agg(user_id::text)
|
||||
FROM customer_reviews
|
||||
GROUP BY 1;
|
||||
ERROR: function topn_add_agg(text) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- Basic topn check on aggregated data
|
||||
SELECT day, (topn(reviewers, 10)).*
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
SELECT day, (topn(reviewers, 10)).*
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
ORDER BY 3 DESC, 1, 2
|
||||
LIMIT 10;
|
||||
ERROR: function topn(jsonb, integer) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- Union aggregated data for one week
|
||||
SELECT (topn(agg, 10)).*
|
||||
SELECT (topn(agg, 10)).*
|
||||
FROM (
|
||||
SELECT topn_union_agg(reviewers) AS agg
|
||||
FROM popular_reviewer
|
||||
|
@ -217,7 +217,7 @@ FROM (
|
|||
ORDER BY 2 DESC, 1;
|
||||
ERROR: function topn_union_agg(jsonb) does not exist
|
||||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
SELECT month, (topn(agg, 5)).*
|
||||
SELECT month, (topn(agg, 5)).*
|
||||
FROM (
|
||||
SELECT EXTRACT(MONTH FROM day) AS month, topn_union_agg(reviewers) AS agg
|
||||
FROM popular_reviewer
|
||||
|
@ -230,7 +230,7 @@ ERROR: function topn_union_agg(jsonb) does not exist
|
|||
HINT: No function matches the given name and argument types. You might need to add explicit type casts.
|
||||
-- TODO the following queries will be supported after we fix #2265
|
||||
-- They work for PG9.6 but not for PG10
|
||||
SELECT (topn(topn_union_agg(reviewers), 10)).*
|
||||
SELECT (topn(topn_union_agg(reviewers), 10)).*
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date
|
||||
ORDER BY 2 DESC, 1;
|
||||
|
|
|
@ -14,47 +14,47 @@ SET citus.shard_count TO 4;
|
|||
CREATE TABLE raw_table (day date, user_id int);
|
||||
CREATE TABLE daily_uniques(day date, unique_users hll);
|
||||
SELECT create_distributed_table('raw_table', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('daily_uniques', 'day');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO raw_table
|
||||
SELECT day, user_id % 19
|
||||
INSERT INTO raw_table
|
||||
SELECT day, user_id % 19
|
||||
FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,100) as g(user_id);
|
||||
INSERT INTO raw_table
|
||||
SELECT day, user_id % 13
|
||||
FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day),
|
||||
INSERT INTO raw_table
|
||||
SELECT day, user_id % 13
|
||||
FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,100) as g(user_id);
|
||||
-- Run hll on raw data
|
||||
SELECT hll_cardinality(hll_union_agg(agg))
|
||||
SELECT hll_cardinality(hll_union_agg(agg))
|
||||
FROM (
|
||||
SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg
|
||||
SELECT hll_add_agg(hll_hash_integer(user_id)) AS agg
|
||||
FROM raw_table)a;
|
||||
hll_cardinality
|
||||
hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
19
|
||||
(1 row)
|
||||
|
||||
-- Aggregate the data into daily_uniques
|
||||
INSERT INTO daily_uniques
|
||||
SELECT day, hll_add_agg(hll_hash_integer(user_id))
|
||||
INSERT INTO daily_uniques
|
||||
SELECT day, hll_add_agg(hll_hash_integer(user_id))
|
||||
FROM raw_table
|
||||
GROUP BY 1;
|
||||
-- Basic hll_cardinality check on aggregated data
|
||||
SELECT day, hll_cardinality(unique_users)
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
ORDER BY 2 DESC,1
|
||||
SELECT day, hll_cardinality(unique_users)
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
ORDER BY 2 DESC,1
|
||||
LIMIT 10;
|
||||
day | hll_cardinality
|
||||
day | hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
06-20-2018 | 19
|
||||
06-21-2018 | 19
|
||||
|
@ -69,10 +69,10 @@ LIMIT 10;
|
|||
(10 rows)
|
||||
|
||||
-- Union aggregated data for one week
|
||||
SELECT hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM daily_uniques
|
||||
SELECT hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date;
|
||||
hll_cardinality
|
||||
hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
19
|
||||
(1 row)
|
||||
|
@ -80,9 +80,9 @@ WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date;
|
|||
SELECT EXTRACT(MONTH FROM day) AS month, hll_cardinality(hll_union_agg(unique_users))
|
||||
FROM daily_uniques
|
||||
WHERE day >= '2018-06-23' AND day <= '2018-07-01'
|
||||
GROUP BY 1
|
||||
GROUP BY 1
|
||||
ORDER BY 1;
|
||||
month | hll_cardinality
|
||||
month | hll_cardinality
|
||||
---------------------------------------------------------------------
|
||||
6 | 19
|
||||
7 | 13
|
||||
|
@ -108,7 +108,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Group Key: remote_scan.day
|
||||
|
@ -144,7 +144,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
GroupAggregate
|
||||
Group Key: remote_scan.day
|
||||
|
@ -183,7 +183,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Group Key: remote_scan.day
|
||||
|
@ -219,7 +219,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
GroupAggregate
|
||||
Group Key: remote_scan.day
|
||||
|
@ -258,7 +258,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Group Key: remote_scan.day
|
||||
|
@ -294,7 +294,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
GroupAggregate
|
||||
Group Key: remote_scan.day
|
||||
|
@ -333,7 +333,7 @@ SELECT
|
|||
FROM
|
||||
daily_uniques
|
||||
GROUP BY(1);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
HashAggregate
|
||||
Group Key: remote_scan.day
|
||||
|
@ -370,7 +370,7 @@ FROM
|
|||
daily_uniques
|
||||
GROUP BY(1)
|
||||
HAVING hll_cardinality(hll_union_agg(unique_users)) > 1;
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
GroupAggregate
|
||||
Group Key: remote_scan.day
|
||||
|
@ -428,33 +428,33 @@ WHERE name = 'topn'
|
|||
CREATE TABLE customer_reviews (day date, user_id int, review int);
|
||||
CREATE TABLE popular_reviewer(day date, reviewers jsonb);
|
||||
SELECT create_distributed_table('customer_reviews', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('popular_reviewer', 'day');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO customer_reviews
|
||||
INSERT INTO customer_reviews
|
||||
SELECT day, user_id % 7, review % 5
|
||||
FROM generate_series('2018-05-24'::timestamp, '2018-06-24'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review);
|
||||
INSERT INTO customer_reviews
|
||||
INSERT INTO customer_reviews
|
||||
SELECT day, user_id % 13, review % 3
|
||||
FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day),
|
||||
FROM generate_series('2018-06-10'::timestamp, '2018-07-10'::timestamp, '1 day'::interval) as f(day),
|
||||
generate_series(1,30) as g(user_id), generate_series(0,30) AS r(review);
|
||||
-- Run topn on raw data
|
||||
SELECT (topn(agg, 10)).*
|
||||
FROM (
|
||||
SELECT topn_add_agg(user_id::text) AS agg
|
||||
SELECT topn_add_agg(user_id::text) AS agg
|
||||
FROM customer_reviews
|
||||
)a
|
||||
ORDER BY 2 DESC, 1;
|
||||
item | frequency
|
||||
item | frequency
|
||||
---------------------------------------------------------------------
|
||||
1 | 7843
|
||||
2 | 7843
|
||||
|
@ -469,17 +469,17 @@ ORDER BY 2 DESC, 1;
|
|||
(10 rows)
|
||||
|
||||
-- Aggregate the data into popular_reviewer
|
||||
INSERT INTO popular_reviewer
|
||||
INSERT INTO popular_reviewer
|
||||
SELECT day, topn_add_agg(user_id::text)
|
||||
FROM customer_reviews
|
||||
GROUP BY 1;
|
||||
-- Basic topn check on aggregated data
|
||||
SELECT day, (topn(reviewers, 10)).*
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
SELECT day, (topn(reviewers, 10)).*
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-06-20' and day <= '2018-06-30'
|
||||
ORDER BY 3 DESC, 1, 2
|
||||
LIMIT 10;
|
||||
day | item | frequency
|
||||
day | item | frequency
|
||||
---------------------------------------------------------------------
|
||||
06-20-2018 | 1 | 248
|
||||
06-20-2018 | 2 | 248
|
||||
|
@ -494,14 +494,14 @@ LIMIT 10;
|
|||
(10 rows)
|
||||
|
||||
-- Union aggregated data for one week
|
||||
SELECT (topn(agg, 10)).*
|
||||
SELECT (topn(agg, 10)).*
|
||||
FROM (
|
||||
SELECT topn_union_agg(reviewers) AS agg
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date
|
||||
)a
|
||||
ORDER BY 2 DESC, 1;
|
||||
item | frequency
|
||||
item | frequency
|
||||
---------------------------------------------------------------------
|
||||
1 | 1240
|
||||
2 | 1240
|
||||
|
@ -512,7 +512,7 @@ ORDER BY 2 DESC, 1;
|
|||
6 | 992
|
||||
(7 rows)
|
||||
|
||||
SELECT month, (topn(agg, 5)).*
|
||||
SELECT month, (topn(agg, 5)).*
|
||||
FROM (
|
||||
SELECT EXTRACT(MONTH FROM day) AS month, topn_union_agg(reviewers) AS agg
|
||||
FROM popular_reviewer
|
||||
|
@ -521,7 +521,7 @@ FROM (
|
|||
ORDER BY 1
|
||||
)a
|
||||
ORDER BY 1, 3 DESC, 2;
|
||||
month | item | frequency
|
||||
month | item | frequency
|
||||
---------------------------------------------------------------------
|
||||
6 | 1 | 1054
|
||||
6 | 2 | 1054
|
||||
|
@ -537,11 +537,11 @@ ORDER BY 1, 3 DESC, 2;
|
|||
|
||||
-- TODO the following queries will be supported after we fix #2265
|
||||
-- They work for PG9.6 but not for PG10
|
||||
SELECT (topn(topn_union_agg(reviewers), 10)).*
|
||||
SELECT (topn(topn_union_agg(reviewers), 10)).*
|
||||
FROM popular_reviewer
|
||||
WHERE day >= '2018-05-24'::date AND day <= '2018-05-31'::date
|
||||
ORDER BY 2 DESC, 1;
|
||||
item | frequency
|
||||
item | frequency
|
||||
---------------------------------------------------------------------
|
||||
1 | 1240
|
||||
2 | 1240
|
||||
|
@ -555,7 +555,7 @@ ORDER BY 2 DESC, 1;
|
|||
SELECT (topn(topn_add_agg(user_id::text), 10)).*
|
||||
FROM customer_reviews
|
||||
ORDER BY 2 DESC, 1;
|
||||
item | frequency
|
||||
item | frequency
|
||||
---------------------------------------------------------------------
|
||||
1 | 7843
|
||||
2 | 7843
|
||||
|
|
|
@ -9,9 +9,9 @@ SET search_path TO disabled_object_propagation;
|
|||
-- verify the table gets created, which requires schema distribution to still work
|
||||
CREATE TABLE t1 (a int PRIMARY KEY , b int);
|
||||
SELECT create_distributed_table('t1','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- verify types are not created, preventing distributed tables to be created unless created manually on the workers
|
||||
|
@ -26,16 +26,16 @@ SELECT 1 FROM run_command_on_workers($$
|
|||
CREATE TYPE disabled_object_propagation.tt1 AS (a int , b int);
|
||||
COMMIT;
|
||||
$$);
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
SELECT create_distributed_table('t2', 'a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- verify enum types are not created, preventing distributed tables to be created unless created manually on the workers
|
||||
|
@ -50,16 +50,16 @@ SELECT 1 FROM run_command_on_workers($$
|
|||
CREATE TYPE disabled_object_propagation.tt2 AS ENUM ('a', 'b');
|
||||
COMMIT;
|
||||
$$);
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
1
|
||||
(2 rows)
|
||||
|
||||
SELECT create_distributed_table('t3', 'a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- verify ALTER TYPE statements are not propagated for types, even though they are marked distributed
|
||||
|
@ -69,16 +69,16 @@ SET LOCAL citus.enable_object_propagation TO on;
|
|||
CREATE TYPE tt3 AS (a int, b int);
|
||||
CREATE TABLE t4 (a int PRIMARY KEY, b tt3);
|
||||
SELECT create_distributed_table('t4','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE t4; -- as long as the table is using the type some operations are hard to force
|
||||
COMMIT;
|
||||
-- verify the type is distributed
|
||||
SELECT count(*) FROM citus.pg_dist_object WHERE objid = 'disabled_object_propagation.tt3'::regtype::oid;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -97,7 +97,7 @@ SELECT row(nspname, typname, usename)
|
|||
JOIN pg_namespace ON (pg_namespace.oid = typnamespace)
|
||||
WHERE typname = 'tt3';
|
||||
$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(disabled_object_propagation,tt3,postgres)")
|
||||
(localhost,57638,t,"(disabled_object_propagation,tt3,postgres)")
|
||||
|
@ -112,7 +112,7 @@ SELECT run_command_on_workers($$
|
|||
WHERE pg_type.typname = 'tt3'
|
||||
GROUP BY pg_type.typname;
|
||||
$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(tt3,""a int4, b int4"")")
|
||||
(localhost,57638,t,"(tt3,""a int4, b int4"")")
|
||||
|
|
|
@ -3,7 +3,7 @@ CREATE USER collationuser;
|
|||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
SELECT run_command_on_workers($$CREATE USER collationuser;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
|
@ -23,7 +23,7 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace
|
|||
JOIN pg_authid a ON a.oid = c.collowner
|
||||
WHERE collname like 'german_phonebook%'
|
||||
ORDER BY 1,2,3;
|
||||
collname | nspname | rolname
|
||||
collname | nspname | rolname
|
||||
---------------------------------------------------------------------
|
||||
german_phonebook | collation_tests | postgres
|
||||
(1 row)
|
||||
|
@ -35,20 +35,20 @@ CREATE TABLE test_propagate(id int, t1 text COLLATE german_phonebook,
|
|||
INSERT INTO test_propagate VALUES (1, 'aesop', U&'\00E4sop'), (2, U&'Vo\1E9Er', 'Vossr');
|
||||
SELECT create_distributed_table('test_propagate', 'id');
|
||||
NOTICE: Copying data from local table...
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Test COLLATE is pushed down
|
||||
SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b';
|
||||
id | t1 | t2
|
||||
id | t1 | t2
|
||||
---------------------------------------------------------------------
|
||||
1 | aesop | äsop
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b' COLLATE "C";
|
||||
id | t1 | t2
|
||||
id | t1 | t2
|
||||
---------------------------------------------------------------------
|
||||
2 | Voẞr | Vossr
|
||||
(1 row)
|
||||
|
@ -56,9 +56,9 @@ SELECT * FROM collation_tests.test_propagate WHERE t2 < 'b' COLLATE "C";
|
|||
-- Test range table with collated distribution column
|
||||
CREATE TABLE test_range(key text COLLATE german_phonebook, val int);
|
||||
SELECT create_distributed_table('test_range', 'key', 'range');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('test_range') AS new_shard_id
|
||||
|
@ -76,7 +76,7 @@ SET client_min_messages TO debug;
|
|||
SELECT * FROM test_range WHERE key > 'Ab' AND key < U&'\00E4z';
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
key | val
|
||||
key | val
|
||||
---------------------------------------------------------------------
|
||||
äsop | 1
|
||||
(1 row)
|
||||
|
@ -88,7 +88,7 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace
|
|||
JOIN pg_authid a ON a.oid = c.collowner
|
||||
WHERE collname like 'german_phonebook%'
|
||||
ORDER BY 1,2,3;
|
||||
collname | nspname | rolname
|
||||
collname | nspname | rolname
|
||||
---------------------------------------------------------------------
|
||||
german_phonebook | collation_tests | postgres
|
||||
german_phonebook_unpropagated | collation_tests | postgres
|
||||
|
@ -105,7 +105,7 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace
|
|||
JOIN pg_authid a ON a.oid = c.collowner
|
||||
WHERE collname like 'german_phonebook%'
|
||||
ORDER BY 1,2,3;
|
||||
collname | nspname | rolname
|
||||
collname | nspname | rolname
|
||||
---------------------------------------------------------------------
|
||||
german_phonebook2 | collation_tests2 | collationuser
|
||||
german_phonebook_unpropagated | collation_tests | postgres
|
||||
|
@ -127,7 +127,7 @@ DROP SCHEMA collation_tests2 CASCADE;
|
|||
\c - - - :master_port
|
||||
DROP USER collationuser;
|
||||
SELECT run_command_on_workers($$DROP USER collationuser;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP ROLE")
|
||||
(localhost,57638,t,"DROP ROLE")
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
CREATE SCHEMA collation_conflict;
|
||||
SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE SCHEMA")
|
||||
(localhost,57638,t,"CREATE SCHEMA")
|
||||
|
@ -20,9 +20,9 @@ CREATE COLLATION caseinsensitive (
|
|||
);
|
||||
CREATE TABLE tblcoll(val text COLLATE caseinsensitive);
|
||||
SELECT create_reference_table('tblcoll');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
|
@ -32,7 +32,7 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace
|
|||
JOIN pg_authid a ON a.oid = c.collowner
|
||||
WHERE collname like 'caseinsensitive%'
|
||||
ORDER BY 1,2,3;
|
||||
collname | nspname | rolname
|
||||
collname | nspname | rolname
|
||||
---------------------------------------------------------------------
|
||||
caseinsensitive | collation_conflict | postgres
|
||||
(1 row)
|
||||
|
@ -58,9 +58,9 @@ CREATE COLLATION caseinsensitive (
|
|||
);
|
||||
CREATE TABLE tblcoll(val text COLLATE caseinsensitive);
|
||||
SELECT create_reference_table('tblcoll');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
|
@ -70,7 +70,7 @@ JOIN pg_namespace nsp ON nsp.oid = c.collnamespace
|
|||
JOIN pg_authid a ON a.oid = c.collowner
|
||||
WHERE collname like 'caseinsensitive%'
|
||||
ORDER BY 1,2,3;
|
||||
collname | nspname | rolname
|
||||
collname | nspname | rolname
|
||||
---------------------------------------------------------------------
|
||||
caseinsensitive | collation_conflict | postgres
|
||||
caseinsensitive(citus_backup_0) | collation_conflict | postgres
|
||||
|
@ -80,13 +80,13 @@ ORDER BY 1,2,3;
|
|||
SET search_path TO collation_conflict;
|
||||
-- now test worker_create_or_replace_object directly
|
||||
SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$);
|
||||
worker_create_or_replace_object
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
SELECT worker_create_or_replace_object($$CREATE COLLATION collation_conflict.caseinsensitive (provider = 'icu', lc_collate = 'und-u-ks-level2', lc_ctype = 'und-u-ks-level2')$$);
|
||||
worker_create_or_replace_object
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
|
|
@ -3,7 +3,7 @@ CREATE USER functionuser;
|
|||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
SELECT run_command_on_workers($$CREATE USER functionuser;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
|
@ -130,9 +130,9 @@ CREATE TABLE statement_table(id int2);
|
|||
SET citus.replication_model TO 'statement';
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('statement_table','id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- create a table uses streaming-based replication (can be synced)
|
||||
|
@ -140,15 +140,15 @@ CREATE TABLE streaming_table(id int);
|
|||
SET citus.replication_model TO 'streaming';
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('streaming_table','id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- make sure that none of the active and primary nodes hasmetadata
|
||||
-- at the start of the test
|
||||
select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
bool_or
|
||||
bool_or
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
@ -156,21 +156,21 @@ select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'pr
|
|||
-- if not paremeters are supplied, we'd see that function doesn't have
|
||||
-- distribution_argument_index and colocationid
|
||||
SELECT create_distributed_function('"add_mi''xed_param_names"(int, int)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT distribution_argument_index is NULL, colocationid is NULL from citus.pg_dist_object
|
||||
WHERE objid = 'add_mi''xed_param_names(int, int)'::regprocedure;
|
||||
?column? | ?column?
|
||||
?column? | ?column?
|
||||
---------------------------------------------------------------------
|
||||
t | t
|
||||
(1 row)
|
||||
|
||||
-- also show that we can use the function
|
||||
SELECT * FROM run_command_on_workers('SELECT function_tests."add_mi''xed_param_names"(2,3);') ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | 5
|
||||
localhost | 57638 | t | 5
|
||||
|
@ -179,7 +179,7 @@ SELECT * FROM run_command_on_workers('SELECT function_tests."add_mi''xed_param_n
|
|||
-- make sure that none of the active and primary nodes hasmetadata
|
||||
-- since the function doesn't have a parameter
|
||||
select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
bool_or
|
||||
bool_or
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
@ -202,54 +202,54 @@ HINT: Set citus.replication_model to 'streaming' before creating distributed ta
|
|||
END;
|
||||
-- try to co-locate with a table that uses streaming replication
|
||||
SELECT create_distributed_function('dup(int)', '$1', colocate_with := 'streaming_table');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers('SELECT function_tests.dup(42);') ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | (42,"42 is text")
|
||||
localhost | 57638 | t | (42,"42 is text")
|
||||
(2 rows)
|
||||
|
||||
SELECT create_distributed_function('add(int,int)', '$1', colocate_with := 'streaming_table');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | 5
|
||||
localhost | 57638 | t | 5
|
||||
(2 rows)
|
||||
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- distribute aggregate
|
||||
SELECT create_distributed_function('sum2(int)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('my_rank("any")');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('agg_names(dup_result,dup_result)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- testing alter statements for a distributed function
|
||||
|
@ -257,21 +257,21 @@ SELECT create_distributed_function('agg_names(dup_result,dup_result)');
|
|||
-- ERROR: ROWS is not applicable when function does not return a set
|
||||
ALTER FUNCTION add(int,int) CALLED ON NULL INPUT IMMUTABLE SECURITY INVOKER PARALLEL UNSAFE LEAKPROOF COST 5;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER FUNCTION add(int,int) RETURNS NULL ON NULL INPUT STABLE SECURITY DEFINER PARALLEL RESTRICTED;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER FUNCTION add(int,int) STRICT VOLATILE PARALLEL SAFE;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -279,49 +279,49 @@ SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
|||
-- Test SET/RESET for alter function
|
||||
ALTER FUNCTION add(int,int) SET client_min_messages TO warning;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER FUNCTION add(int,int) SET client_min_messages TO error;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER FUNCTION add(int,int) SET client_min_messages TO debug;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER FUNCTION add(int,int) RESET client_min_messages;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER FUNCTION add(int,int) SET "citus.setting;'" TO 'hello '' world';
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER FUNCTION add(int,int) RESET "citus.setting;'";
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER FUNCTION add(int,int) SET search_path TO 'sch'';ma', public;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -332,7 +332,7 @@ ALTER FUNCTION add(int,int) SET client_min_messages FROM CURRENT;
|
|||
ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed function
|
||||
HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value.
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -341,7 +341,7 @@ ALTER FUNCTION add(int,int) RETURNS NULL ON NULL INPUT SET client_min_messages F
|
|||
ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed function
|
||||
HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value.
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -350,7 +350,7 @@ ALTER FUNCTION add(int,int) SET client_min_messages FROM CURRENT SECURITY DEFINE
|
|||
ERROR: unsupported ALTER FUNCTION ... SET ... FROM CURRENT for a distributed function
|
||||
HINT: SET FROM CURRENT is not supported for distributed functions, instead use the SET ... TO ... syntax with a constant value.
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -358,20 +358,20 @@ SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
|||
-- rename function and make sure the new name can be used on the workers while the old name can't
|
||||
ALTER FUNCTION add(int,int) RENAME TO add2;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add2(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist
|
||||
localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers('SELECT function_tests.add2(2,3);') ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | 5
|
||||
localhost | 57638 | t | 5
|
||||
|
@ -380,7 +380,7 @@ SELECT * FROM run_command_on_workers('SELECT function_tests.add2(2,3);') ORDER B
|
|||
ALTER FUNCTION add2(int,int) RENAME TO add;
|
||||
ALTER AGGREGATE sum2(int) RENAME TO sum27;
|
||||
SELECT * FROM run_command_on_workers($$SELECT 1 from pg_proc where proname = 'sum27';$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | 1
|
||||
localhost | 57638 | t | 1
|
||||
|
@ -390,7 +390,7 @@ ALTER AGGREGATE sum27(int) RENAME TO sum2;
|
|||
-- change the owner of the function and verify the owner has been changed on the workers
|
||||
ALTER FUNCTION add(int,int) OWNER TO functionuser;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -403,7 +403,7 @@ JOIN pg_user ON (usesysid = proowner)
|
|||
JOIN pg_namespace ON (pg_namespace.oid = pronamespace and nspname = 'function_tests')
|
||||
WHERE proname = 'add';
|
||||
$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(functionuser,function_tests,add)")
|
||||
(localhost,57638,t,"(functionuser,function_tests,add)")
|
||||
|
@ -416,7 +416,7 @@ JOIN pg_user ON (usesysid = proowner)
|
|||
JOIN pg_namespace ON (pg_namespace.oid = pronamespace and nspname = 'function_tests')
|
||||
WHERE proname = 'sum2';
|
||||
$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(functionuser,function_tests,sum2)")
|
||||
(localhost,57638,t,"(functionuser,function_tests,sum2)")
|
||||
|
@ -426,20 +426,20 @@ $$);
|
|||
-- the new schema has the function.
|
||||
ALTER FUNCTION add(int,int) SET SCHEMA function_tests2;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests2.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist
|
||||
localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers('SELECT function_tests2.add(2,3);') ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | 5
|
||||
localhost | 57638 | t | 5
|
||||
|
@ -454,13 +454,13 @@ AS 'select $1 * $2;' -- I know, this is not an add, but the output will tell us
|
|||
IMMUTABLE
|
||||
RETURNS NULL ON NULL INPUT;
|
||||
SELECT public.verify_function_is_same_on_workers('function_tests.add(int,int)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | 6
|
||||
localhost | 57638 | t | 6
|
||||
|
@ -477,7 +477,7 @@ DETAIL: Function "pg_catalog.citus_drop_trigger()" has a dependency on extensio
|
|||
DROP FUNCTION add(int,int);
|
||||
-- call should fail as function should have been dropped
|
||||
SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | f | ERROR: function function_tests.add(integer, integer) does not exist
|
||||
localhost | 57638 | f | ERROR: function function_tests.add(integer, integer) does not exist
|
||||
|
@ -486,7 +486,7 @@ SELECT * FROM run_command_on_workers('SELECT function_tests.add(2,3);') ORDER BY
|
|||
DROP AGGREGATE function_tests2.sum2(int);
|
||||
-- call should fail as aggregate should have been dropped
|
||||
SELECT * FROM run_command_on_workers('SELECT function_tests2.sum2(id) FROM (select 1 id, 2) subq;') ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | f | ERROR: function function_tests2.sum2(integer) does not exist
|
||||
localhost | 57638 | f | ERROR: function function_tests2.sum2(integer) does not exist
|
||||
|
@ -498,10 +498,10 @@ ERROR: syntax error at or near "int"
|
|||
CONTEXT: invalid type name "val1 int"
|
||||
-- invalid distribution_arg_name
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='test');
|
||||
ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid
|
||||
ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid
|
||||
HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function()
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='int');
|
||||
ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid
|
||||
ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid
|
||||
HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function()
|
||||
-- invalid distribution_arg_index
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', '$0');
|
||||
|
@ -520,7 +520,7 @@ SELECT create_distributed_function('add_with_param_names(int, int)', '$1a');
|
|||
ERROR: invalid input syntax for integer: "1a"
|
||||
-- non existing column name
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', 'aaa');
|
||||
ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid
|
||||
ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid
|
||||
HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function()
|
||||
-- NULL function
|
||||
SELECT create_distributed_function(NULL);
|
||||
|
@ -532,21 +532,21 @@ ERROR: colocate_with parameter should not be NULL
|
|||
HINT: To use the default value, set colocate_with option to "default"
|
||||
-- empty string distribution_arg_index
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', '');
|
||||
ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid
|
||||
ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid
|
||||
HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function()
|
||||
-- The first distributed function syncs the metadata to nodes
|
||||
-- and metadata syncing is not supported within transaction blocks
|
||||
BEGIN;
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='val1');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
-- make sure that none of the nodes have the function because we've rollbacked
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_with_param_names';$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,0)
|
||||
(localhost,57638,t,0)
|
||||
|
@ -554,28 +554,28 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_
|
|||
|
||||
-- make sure that none of the active and primary nodes hasmetadata
|
||||
select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
bool_or
|
||||
bool_or
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- valid distribution with distribution_arg_name
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='val1');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- make sure that the primary nodes are now metadata synced
|
||||
select bool_and(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
bool_and
|
||||
bool_and
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- make sure that both of the nodes have the function because we've succeeded
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_with_param_names';$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,1)
|
||||
(localhost,57638,t,1)
|
||||
|
@ -583,16 +583,16 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='add_
|
|||
|
||||
-- valid distribution with distribution_arg_name -- case insensitive
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', distribution_arg_name:='VaL1');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- valid distribution with distribution_arg_index
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)','$1');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- a function cannot be colocated with a table that is not "streaming" replicated
|
||||
|
@ -600,9 +600,9 @@ SET citus.shard_replication_factor TO 2;
|
|||
CREATE TABLE replicated_table_func_test (a int);
|
||||
SET citus.replication_model TO "statement";
|
||||
SELECT create_distributed_table('replicated_table_func_test', 'a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', '$1', colocate_with:='replicated_table_func_test');
|
||||
|
@ -610,9 +610,9 @@ ERROR: cannot colocate function "add_with_param_names" and table "replicated_ta
|
|||
DETAIL: Citus currently only supports colocating function with distributed tables that are created using streaming replication model.
|
||||
HINT: When distributing tables make sure that citus.replication_model = 'streaming'
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- a function can be colocated with a different distribution argument type
|
||||
|
@ -621,20 +621,20 @@ SET citus.shard_replication_factor TO 1;
|
|||
CREATE TABLE replicated_table_func_test_2 (a bigint);
|
||||
SET citus.replication_model TO "streaming";
|
||||
SELECT create_distributed_table('replicated_table_func_test_2', 'a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', 'val1', colocate_with:='replicated_table_func_test_2');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- colocate_with cannot be used without distribution key
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', colocate_with:='replicated_table_func_test_2');
|
||||
ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid
|
||||
ERROR: cannot distribute the function "add_with_param_names" since the distribution argument is not valid
|
||||
HINT: To provide "colocate_with" option, the distribution argument parameter should also be provided
|
||||
-- a function cannot be colocated with a local table
|
||||
CREATE TABLE replicated_table_func_test_3 (a bigint);
|
||||
|
@ -642,9 +642,9 @@ SELECT create_distributed_function('add_with_param_names(int, int)', 'val1', col
|
|||
ERROR: relation replicated_table_func_test_3 is not distributed
|
||||
-- a function cannot be colocated with a reference table
|
||||
SELECT create_reference_table('replicated_table_func_test_3');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', 'val1', colocate_with:='replicated_table_func_test_3');
|
||||
|
@ -654,15 +654,15 @@ SET citus.shard_replication_factor TO 1;
|
|||
CREATE TABLE replicated_table_func_test_4 (a int);
|
||||
SET citus.replication_model TO "streaming";
|
||||
SELECT create_distributed_table('replicated_table_func_test_4', 'a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', '$1', colocate_with:='replicated_table_func_test_4');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show that the colocationIds are the same
|
||||
|
@ -670,7 +670,7 @@ SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_functi
|
|||
FROM pg_dist_partition, citus.pg_dist_object as objects
|
||||
WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND
|
||||
objects.objid = 'add_with_param_names(int, int)'::regprocedure;
|
||||
table_and_function_colocated
|
||||
table_and_function_colocated
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -678,16 +678,16 @@ WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass
|
|||
-- now, re-distributed with the default colocation option, we should still see that the same colocation
|
||||
-- group preserved, because we're using the default shard creation settings
|
||||
SELECT create_distributed_function('add_with_param_names(int, int)', 'val1');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_function_colocated
|
||||
FROM pg_dist_partition, citus.pg_dist_object as objects
|
||||
WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND
|
||||
objects.objid = 'add_with_param_names(int, int)'::regprocedure;
|
||||
table_and_function_colocated
|
||||
table_and_function_colocated
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -697,31 +697,31 @@ WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass
|
|||
-- path, we rely on postgres for implicit coersions, and users for explicit coersions
|
||||
-- to coerce the values
|
||||
SELECT create_distributed_function('add_numeric(numeric, numeric)', '$1', colocate_with:='replicated_table_func_test_4');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_function_colocated
|
||||
FROM pg_dist_partition, citus.pg_dist_object as objects
|
||||
WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND
|
||||
objects.objid = 'add_numeric(numeric, numeric)'::regprocedure;
|
||||
table_and_function_colocated
|
||||
table_and_function_colocated
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('add_text(text, text)', '$1', colocate_with:='replicated_table_func_test_4');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT pg_dist_partition.colocationid = objects.colocationid as table_and_function_colocated
|
||||
FROM pg_dist_partition, citus.pg_dist_object as objects
|
||||
WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass AND
|
||||
objects.objid = 'add_text(text, text)'::regprocedure;
|
||||
table_and_function_colocated
|
||||
table_and_function_colocated
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -738,18 +738,18 @@ ERROR: cannot distribute the function "add_with_param_names" since there is no
|
|||
HINT: Provide a distributed table via "colocate_with" option to create_distributed_function()
|
||||
-- sync metadata to workers for consistent results when clearing objects
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_count TO 4;
|
||||
CREATE TABLE test (id int, name text);
|
||||
SELECT create_distributed_table('test','id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test VALUES (3,'three');
|
||||
|
@ -762,9 +762,9 @@ BEGIN
|
|||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
SELECT create_distributed_function('increment(int)', '$1', colocate_with := 'test');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- call a distributed function inside a pl/pgsql function
|
||||
|
@ -777,24 +777,24 @@ BEGIN
|
|||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
SELECT test_func_calls_dist_func();
|
||||
test_func_calls_dist_func
|
||||
test_func_calls_dist_func
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT test_func_calls_dist_func();
|
||||
test_func_calls_dist_func
|
||||
test_func_calls_dist_func
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test an INSERT..SELECT via the coordinator just because it is kind of funky
|
||||
INSERT INTO test SELECT increment(3);
|
||||
SELECT * FROM test ORDER BY id;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
3 | three
|
||||
4 |
|
||||
4 |
|
||||
(2 rows)
|
||||
|
||||
DROP TABLE test;
|
||||
|
@ -803,10 +803,10 @@ DROP SCHEMA function_tests CASCADE;
|
|||
DROP SCHEMA function_tests2 CASCADE;
|
||||
-- clear objects
|
||||
SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
stop_metadata_sync_to_node
|
||||
stop_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
|
||||
(2 rows)
|
||||
|
||||
-- This is hacky, but we should clean-up the resources as below
|
||||
|
@ -826,7 +826,7 @@ DROP SCHEMA function_tests2 CASCADE;
|
|||
\c - - - :master_port
|
||||
DROP USER functionuser;
|
||||
SELECT run_command_on_workers($$DROP USER functionuser$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP ROLE")
|
||||
(localhost,57638,t,"DROP ROLE")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
-- Note in PG12 we use CREATE OR REPLACE AGGREGATE, thus the renaming does not occur
|
||||
CREATE SCHEMA proc_conflict;
|
||||
SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE SCHEMA")
|
||||
(localhost,57638,t,"CREATE SCHEMA")
|
||||
|
@ -31,9 +31,9 @@ CREATE AGGREGATE existing_agg(int) (
|
|||
STYPE = int
|
||||
);
|
||||
SELECT create_distributed_function('existing_agg(int)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
|
@ -44,7 +44,7 @@ WITH data (val) AS (
|
|||
union all select 6
|
||||
)
|
||||
SELECT existing_agg(val) FROM data;
|
||||
existing_agg
|
||||
existing_agg
|
||||
---------------------------------------------------------------------
|
||||
78
|
||||
(1 row)
|
||||
|
@ -57,7 +57,7 @@ WITH data (val) AS (
|
|||
union all select 6
|
||||
)
|
||||
SELECT existing_agg(val) FROM data;
|
||||
existing_agg
|
||||
existing_agg
|
||||
---------------------------------------------------------------------
|
||||
78
|
||||
(1 row)
|
||||
|
@ -90,9 +90,9 @@ CREATE AGGREGATE existing_agg(int) (
|
|||
STYPE = int
|
||||
);
|
||||
SELECT create_distributed_function('existing_agg(int)');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\c - - - :worker_1_port
|
||||
|
@ -103,7 +103,7 @@ WITH data (val) AS (
|
|||
union all select 6
|
||||
)
|
||||
SELECT existing_agg(val) FROM data;
|
||||
existing_agg
|
||||
existing_agg
|
||||
---------------------------------------------------------------------
|
||||
76
|
||||
(1 row)
|
||||
|
@ -116,7 +116,7 @@ WITH data (val) AS (
|
|||
union all select 6
|
||||
)
|
||||
SELECT existing_agg(val) FROM data;
|
||||
existing_agg
|
||||
existing_agg
|
||||
---------------------------------------------------------------------
|
||||
76
|
||||
(1 row)
|
||||
|
@ -128,13 +128,13 @@ BEGIN
|
|||
END;
|
||||
$$ LANGUAGE plpgsql STRICT IMMUTABLE;
|
||||
SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_agg(integer) (STYPE = integer,SFUNC = proc_conflict.existing_func2)');
|
||||
worker_create_or_replace_object
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT worker_create_or_replace_object('CREATE AGGREGATE proc_conflict.existing_agg(integer) (STYPE = integer,SFUNC = proc_conflict.existing_func2)');
|
||||
worker_create_or_replace_object
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
|
|
@ -3,7 +3,7 @@ CREATE USER procedureuser;
|
|||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
SELECT run_command_on_workers($$CREATE USER procedureuser;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
|
@ -24,7 +24,7 @@ $proc$;
|
|||
ALTER SYSTEM SET citus.metadata_sync_interval TO 3000;
|
||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 500;
|
||||
SELECT pg_reload_conf();
|
||||
pg_reload_conf
|
||||
pg_reload_conf
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -38,32 +38,32 @@ CREATE TABLE colocation_table(id text);
|
|||
SET citus.replication_model TO 'streaming';
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT create_distributed_table('colocation_table','id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_function('raise_info(text)', '$1', colocate_with := 'colocation_table');
|
||||
create_distributed_function
|
||||
create_distributed_function
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT wait_until_metadata_sync();
|
||||
wait_until_metadata_sync
|
||||
wait_until_metadata_sync
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | CALL
|
||||
localhost | 57638 | t | CALL
|
||||
(2 rows)
|
||||
|
||||
SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -73,14 +73,14 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(tex
|
|||
-- ERROR: ROWS is not applicable when function does not return a set
|
||||
ALTER PROCEDURE raise_info(text) SECURITY INVOKER;
|
||||
SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER PROCEDURE raise_info(text) SECURITY DEFINER;
|
||||
SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -88,28 +88,28 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(tex
|
|||
-- Test SET/RESET for alter procedure
|
||||
ALTER PROCEDURE raise_info(text) SET client_min_messages TO warning;
|
||||
SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER PROCEDURE raise_info(text) SET client_min_messages TO error;
|
||||
SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER PROCEDURE raise_info(text) SET client_min_messages TO debug;
|
||||
SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
ALTER PROCEDURE raise_info(text) RESET client_min_messages;
|
||||
SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -117,20 +117,20 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(tex
|
|||
-- rename function and make sure the new name can be used on the workers while the old name can't
|
||||
ALTER PROCEDURE raise_info(text) RENAME TO raise_info2;
|
||||
SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info2(text)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
|
||||
localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info2('hello');$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | CALL
|
||||
localhost | 57638 | t | CALL
|
||||
|
@ -140,7 +140,7 @@ ALTER PROCEDURE raise_info2(text) RENAME TO raise_info;
|
|||
-- change the owner of the function and verify the owner has been changed on the workers
|
||||
ALTER PROCEDURE raise_info(text) OWNER TO procedureuser;
|
||||
SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -152,7 +152,7 @@ JOIN pg_user ON (usesysid = proowner)
|
|||
JOIN pg_namespace ON (pg_namespace.oid = pronamespace)
|
||||
WHERE proname = 'raise_info';
|
||||
$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(procedureuser,procedure_tests,raise_info)")
|
||||
(localhost,57638,t,"(procedureuser,procedure_tests,raise_info)")
|
||||
|
@ -162,20 +162,20 @@ $$);
|
|||
-- the new schema has the function.
|
||||
ALTER PROCEDURE raise_info(text) SET SCHEMA procedure_tests2;
|
||||
SELECT public.verify_function_is_same_on_workers('procedure_tests2.raise_info(text)');
|
||||
verify_function_is_same_on_workers
|
||||
verify_function_is_same_on_workers
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
|
||||
localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM run_command_on_workers($$CALL procedure_tests2.raise_info('hello');$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | t | CALL
|
||||
localhost | 57638 | t | CALL
|
||||
|
@ -185,7 +185,7 @@ ALTER PROCEDURE procedure_tests2.raise_info(text) SET SCHEMA procedure_tests;
|
|||
DROP PROCEDURE raise_info(text);
|
||||
-- call should fail as procedure should have been dropped
|
||||
SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
|
||||
localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
|
||||
|
@ -194,7 +194,7 @@ SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');
|
|||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA procedure_tests CASCADE;
|
||||
SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP SCHEMA")
|
||||
(localhost,57638,t,"DROP SCHEMA")
|
||||
|
@ -202,7 +202,7 @@ SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$);
|
|||
|
||||
DROP SCHEMA procedure_tests2 CASCADE;
|
||||
SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP SCHEMA")
|
||||
(localhost,57638,t,"DROP SCHEMA")
|
||||
|
@ -210,7 +210,7 @@ SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$);
|
|||
|
||||
DROP USER procedureuser;
|
||||
SELECT run_command_on_workers($$DROP USER procedureuser;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP ROLE")
|
||||
(localhost,57638,t,"DROP ROLE")
|
||||
|
|
|
@ -3,7 +3,7 @@ CREATE USER typeuser;
|
|||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
SELECT run_command_on_workers($$CREATE USER typeuser;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
|
@ -17,14 +17,14 @@ SET citus.shard_count TO 4;
|
|||
CREATE TYPE tc1 AS (a int, b int);
|
||||
CREATE TABLE t1 (a int PRIMARY KEY, b tc1);
|
||||
SELECT create_distributed_table('t1','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t1 VALUES (1, (2,3)::tc1);
|
||||
SELECT * FROM t1;
|
||||
a | b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | (2,3)
|
||||
(1 row)
|
||||
|
@ -37,14 +37,14 @@ INSERT INTO t1 VALUES (6, (7,8)::type_tests2.tc1_newname); -- insert with a cast
|
|||
CREATE TYPE te1 AS ENUM ('one', 'two', 'three');
|
||||
CREATE TABLE t2 (a int PRIMARY KEY, b te1);
|
||||
SELECT create_distributed_table('t2','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t2 VALUES (1, 'two');
|
||||
SELECT * FROM t2;
|
||||
a | b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | two
|
||||
(1 row)
|
||||
|
@ -55,7 +55,7 @@ ALTER TYPE te1 RENAME TO te1_newname;
|
|||
ALTER TYPE te1_newname ADD VALUE 'four';
|
||||
UPDATE t2 SET b = 'four';
|
||||
SELECT * FROM t2;
|
||||
a | b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | four
|
||||
(1 row)
|
||||
|
@ -68,14 +68,14 @@ BEGIN;
|
|||
CREATE TYPE tc2 AS (a int, b int);
|
||||
CREATE TABLE t3 (a int PRIMARY KEY, b tc2);
|
||||
SELECT create_distributed_table('t3','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t3 VALUES (4, (5,6)::tc2);
|
||||
SELECT * FROM t3;
|
||||
a | b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
4 | (5,6)
|
||||
(1 row)
|
||||
|
@ -86,14 +86,14 @@ BEGIN;
|
|||
CREATE TYPE te2 AS ENUM ('yes', 'no');
|
||||
CREATE TABLE t4 (a int PRIMARY KEY, b te2);
|
||||
SELECT create_distributed_table('t4','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t4 VALUES (1, 'yes');
|
||||
SELECT * FROM t4;
|
||||
a | b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | yes
|
||||
(1 row)
|
||||
|
@ -102,13 +102,13 @@ SELECT * FROM t4;
|
|||
COMMIT;
|
||||
-- verify order of enum labels
|
||||
SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype;
|
||||
string_agg
|
||||
string_agg
|
||||
---------------------------------------------------------------------
|
||||
yes,no
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"yes,no")
|
||||
(localhost,57638,t,"yes,no")
|
||||
|
@ -124,9 +124,9 @@ CREATE TYPE te3 AS ENUM ('a','b');
|
|||
RESET citus.enable_ddl_propagation;
|
||||
CREATE TABLE t5 (a int PRIMARY KEY, b tc5[], c te3);
|
||||
SELECT create_distributed_table('t5','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- test adding an attribute to a type and a column to a table both for a non-distributed type
|
||||
|
@ -144,7 +144,7 @@ INSERT INTO t5 VALUES (1, NULL, 'a', 'd', (1,2,(4,5)::tc6c)::tc6);
|
|||
-- test renaming an attribute of a distrbuted type and read it by its new name to verify propagation
|
||||
ALTER TYPE tc6 RENAME ATTRIBUTE b TO d;
|
||||
SELECT (e::tc6).d FROM t5 ORDER BY 1;
|
||||
d
|
||||
d
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
@ -152,13 +152,13 @@ SELECT (e::tc6).d FROM t5 ORDER BY 1;
|
|||
-- change owner of supported types and check ownership on remote server
|
||||
ALTER TYPE te4 OWNER TO typeuser;
|
||||
SELECT typname, usename FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid;
|
||||
typname | usename
|
||||
typname | usename
|
||||
---------------------------------------------------------------------
|
||||
te4 | typeuser
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(te4,typeuser)")
|
||||
(localhost,57638,t,"(te4,typeuser)")
|
||||
|
@ -166,13 +166,13 @@ SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_us
|
|||
|
||||
ALTER TYPE tc6 OWNER TO typeuser;
|
||||
SELECT typname, usename FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid;
|
||||
typname | usename
|
||||
typname | usename
|
||||
---------------------------------------------------------------------
|
||||
tc6 | typeuser
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(tc6,typeuser)")
|
||||
(localhost,57638,t,"(tc6,typeuser)")
|
||||
|
@ -190,60 +190,60 @@ CREATE TYPE te6 AS ENUM ('a','b','c');
|
|||
RESET citus.enable_ddl_propagation;
|
||||
CREATE TABLE t6 (a int, b tc8, c te6);
|
||||
SELECT create_distributed_table('t6', 'a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET ROLE;
|
||||
-- test ownership of all types
|
||||
SELECT typname, usename FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid;
|
||||
typname | usename
|
||||
typname | usename
|
||||
---------------------------------------------------------------------
|
||||
tc7 | typeuser
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(tc7,typeuser)")
|
||||
(localhost,57638,t,"(tc7,typeuser)")
|
||||
(2 rows)
|
||||
|
||||
SELECT typname, usename FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid;
|
||||
typname | usename
|
||||
typname | usename
|
||||
---------------------------------------------------------------------
|
||||
te5 | typeuser
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(te5,typeuser)")
|
||||
(localhost,57638,t,"(te5,typeuser)")
|
||||
(2 rows)
|
||||
|
||||
SELECT typname, usename FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid;
|
||||
typname | usename
|
||||
typname | usename
|
||||
---------------------------------------------------------------------
|
||||
tc8 | typeuser
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(tc8,typeuser)")
|
||||
(localhost,57638,t,"(tc8,typeuser)")
|
||||
(2 rows)
|
||||
|
||||
SELECT typname, usename FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid;
|
||||
typname | usename
|
||||
typname | usename
|
||||
---------------------------------------------------------------------
|
||||
te6 | typeuser
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"(te6,typeuser)")
|
||||
(localhost,57638,t,"(te6,typeuser)")
|
||||
|
@ -257,12 +257,12 @@ DROP TYPE tc3, tc4, tc5 CASCADE;
|
|||
NOTICE: drop cascades to column b of table t5
|
||||
-- test if the types are deleted
|
||||
SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname;
|
||||
typname
|
||||
typname
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"")
|
||||
(localhost,57638,t,"")
|
||||
|
@ -301,9 +301,9 @@ CREATE TYPE distributed_enum_type AS ENUM ('a', 'c');
|
|||
-- enforce distribution of types in every case
|
||||
CREATE TABLE type_proc (a int, b distributed_composite_type, c distributed_enum_type);
|
||||
SELECT create_distributed_table('type_proc','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE type_proc;
|
||||
|
@ -330,13 +330,13 @@ CREATE TYPE feature_flag_composite_type AS (a int, b int);
|
|||
CREATE TYPE feature_flag_enum_type AS ENUM ('a', 'b');
|
||||
-- verify types do not exist on workers
|
||||
SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,0)
|
||||
(localhost,57638,t,0)
|
||||
|
@ -345,19 +345,19 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('
|
|||
-- verify they are still distributed when required
|
||||
CREATE TABLE feature_flag_table (a int PRIMARY KEY, b feature_flag_composite_type, c feature_flag_enum_type);
|
||||
SELECT create_distributed_table('feature_flag_table','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,2)
|
||||
(localhost,57638,t,2)
|
||||
|
@ -368,7 +368,7 @@ RESET citus.enable_create_type_propagation;
|
|||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA type_tests CASCADE;
|
||||
SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP SCHEMA")
|
||||
(localhost,57638,t,"DROP SCHEMA")
|
||||
|
@ -376,7 +376,7 @@ SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$);
|
|||
|
||||
DROP SCHEMA type_tests2 CASCADE;
|
||||
SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP SCHEMA")
|
||||
(localhost,57638,t,"DROP SCHEMA")
|
||||
|
@ -384,7 +384,7 @@ SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$);
|
|||
|
||||
DROP USER typeuser;
|
||||
SELECT run_command_on_workers($$DROP USER typeuser;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP ROLE")
|
||||
(localhost,57638,t,"DROP ROLE")
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
SET citus.next_shard_id TO 20020000;
|
||||
CREATE SCHEMA type_conflict;
|
||||
SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"CREATE SCHEMA")
|
||||
(localhost,57638,t,"CREATE SCHEMA")
|
||||
|
@ -33,14 +33,14 @@ SET search_path TO type_conflict;
|
|||
WHERE pg_class.relname = 'local_table'
|
||||
AND attnum > 0
|
||||
ORDER BY attnum;
|
||||
relname | attname | typname
|
||||
relname | attname | typname
|
||||
---------------------------------------------------------------------
|
||||
local_table | a | int4
|
||||
local_table | b | my_precious_type(citus_backup_0)
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM local_table;
|
||||
a | b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
42 | ("always bring a towel",t)
|
||||
(1 row)
|
||||
|
@ -49,37 +49,37 @@ SELECT * FROM local_table;
|
|||
SET search_path TO type_conflict;
|
||||
-- make sure worker_create_or_replace correctly generates new names while types are existing
|
||||
SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int);');
|
||||
worker_create_or_replace_object
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int, c int);');
|
||||
worker_create_or_replace_object
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type AS (a int, b int, c int, d int);');
|
||||
worker_create_or_replace_object
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int);');
|
||||
worker_create_or_replace_object
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int, c int);');
|
||||
worker_create_or_replace_object
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT worker_create_or_replace_object('CREATE TYPE type_conflict.multi_conflicting_type_with_a_really_long_name_that_truncates AS (a int, b int, c int, d int);');
|
||||
worker_create_or_replace_object
|
||||
worker_create_or_replace_object
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -93,7 +93,7 @@ FROM pg_attribute
|
|||
JOIN pg_type AS atttype ON (atttypid = atttype.oid)
|
||||
WHERE pg_type.typname LIKE 'multi_conflicting_type%'
|
||||
GROUP BY pg_type.typname;
|
||||
typname | fields
|
||||
typname | fields
|
||||
---------------------------------------------------------------------
|
||||
multi_conflicting_type | a int4, b int4, c int4, d int4
|
||||
multi_conflicting_type(citus_backup_0) | a int4, b int4
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven;
|
||||
version_above_eleven
|
||||
version_above_eleven
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -14,14 +14,14 @@ BEGIN;
|
|||
CREATE TYPE xact_enum_edit AS ENUM ('yes', 'no');
|
||||
CREATE TABLE t1 (a int PRIMARY KEY, b xact_enum_edit);
|
||||
SELECT create_distributed_table('t1','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t1 VALUES (1, 'yes');
|
||||
SELECT * FROM t1;
|
||||
a | b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | yes
|
||||
(1 row)
|
||||
|
@ -32,13 +32,13 @@ ALTER TYPE xact_enum_edit ADD VALUE 'maybe';
|
|||
ABORT;
|
||||
-- maybe should not be on the workers
|
||||
SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;
|
||||
string_agg
|
||||
string_agg
|
||||
---------------------------------------------------------------------
|
||||
yes,no
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"yes,no")
|
||||
(localhost,57638,t,"yes,no")
|
||||
|
@ -49,13 +49,13 @@ ALTER TYPE xact_enum_edit ADD VALUE 'maybe';
|
|||
COMMIT;
|
||||
-- maybe should be on the workers (pg12 and above)
|
||||
SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;
|
||||
string_agg
|
||||
string_agg
|
||||
---------------------------------------------------------------------
|
||||
yes,no,maybe
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"yes,no,maybe")
|
||||
(localhost,57638,t,"yes,no,maybe")
|
||||
|
@ -65,7 +65,7 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso
|
|||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA xact_enum_type CASCADE;
|
||||
SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP SCHEMA")
|
||||
(localhost,57638,t,"DROP SCHEMA")
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
SHOW server_version \gset
|
||||
SELECT substring(:'server_version', '\d+')::int > 11 AS version_above_eleven;
|
||||
version_above_eleven
|
||||
version_above_eleven
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
@ -14,14 +14,14 @@ BEGIN;
|
|||
CREATE TYPE xact_enum_edit AS ENUM ('yes', 'no');
|
||||
CREATE TABLE t1 (a int PRIMARY KEY, b xact_enum_edit);
|
||||
SELECT create_distributed_table('t1','a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t1 VALUES (1, 'yes');
|
||||
SELECT * FROM t1;
|
||||
a | b
|
||||
a | b
|
||||
---------------------------------------------------------------------
|
||||
1 | yes
|
||||
(1 row)
|
||||
|
@ -33,13 +33,13 @@ ERROR: ALTER TYPE ... ADD cannot run inside a transaction block
|
|||
ABORT;
|
||||
-- maybe should not be on the workers
|
||||
SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;
|
||||
string_agg
|
||||
string_agg
|
||||
---------------------------------------------------------------------
|
||||
yes,no
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"yes,no")
|
||||
(localhost,57638,t,"yes,no")
|
||||
|
@ -51,13 +51,13 @@ ERROR: ALTER TYPE ... ADD cannot run inside a transaction block
|
|||
COMMIT;
|
||||
-- maybe should be on the workers (pg12 and above)
|
||||
SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;
|
||||
string_agg
|
||||
string_agg
|
||||
---------------------------------------------------------------------
|
||||
yes,no
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"yes,no")
|
||||
(localhost,57638,t,"yes,no")
|
||||
|
@ -67,7 +67,7 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso
|
|||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA xact_enum_type CASCADE;
|
||||
SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"DROP SCHEMA")
|
||||
(localhost,57638,t,"DROP SCHEMA")
|
||||
|
|
|
@ -3,23 +3,23 @@ SET search_path TO recursive_dml_queries, public;
|
|||
SET citus.next_shard_id TO 2370000;
|
||||
CREATE TABLE recursive_dml_queries.distributed_table (tenant_id text, dept int, info jsonb);
|
||||
SELECT create_distributed_table('distributed_table', 'tenant_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE recursive_dml_queries.second_distributed_table (tenant_id text, dept int, info jsonb);
|
||||
SELECT create_distributed_table('second_distributed_table', 'tenant_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE recursive_dml_queries.reference_table (id text, name text);
|
||||
SELECT create_reference_table('reference_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE recursive_dml_queries.local_table (id text, name text);
|
||||
|
@ -54,7 +54,7 @@ RETURNING
|
|||
reference_table.name;
|
||||
DEBUG: generating subplan 4_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM recursive_dml_queries.second_distributed_table
|
||||
DEBUG: Plan 4 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.reference_table SET name = ('new_'::text OPERATOR(pg_catalog.||) reference_table.name) FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('4_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) reference_table.id) RETURNING reference_table.name
|
||||
name
|
||||
name
|
||||
---------------------------------------------------------------------
|
||||
new_user_50
|
||||
(1 row)
|
||||
|
@ -87,7 +87,7 @@ RETURNING
|
|||
second_distributed_table.tenant_id, second_distributed_table.dept;
|
||||
DEBUG: generating subplan 6_1 for subquery SELECT DISTINCT ON (tenant_id) tenant_id, max(dept) AS max_dept FROM (SELECT second_distributed_table.dept, second_distributed_table.tenant_id FROM recursive_dml_queries.second_distributed_table, recursive_dml_queries.distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) second_distributed_table.tenant_id)) foo_inner GROUP BY tenant_id ORDER BY tenant_id DESC
|
||||
DEBUG: Plan 6 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.second_distributed_table SET dept = (foo.max_dept OPERATOR(pg_catalog.*) 2) FROM (SELECT intermediate_result.tenant_id, intermediate_result.max_dept FROM read_intermediate_result('6_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text, max_dept integer)) foo WHERE ((foo.tenant_id OPERATOR(pg_catalog.<>) second_distributed_table.tenant_id) AND (second_distributed_table.dept OPERATOR(pg_catalog.=) 2)) RETURNING second_distributed_table.tenant_id, second_distributed_table.dept
|
||||
tenant_id | dept
|
||||
tenant_id | dept
|
||||
---------------------------------------------------------------------
|
||||
12 | 18
|
||||
2 | 18
|
||||
|
@ -156,7 +156,7 @@ RETURNING
|
|||
distributed_table.*;
|
||||
DEBUG: generating subplan 11_1 for subquery SELECT avg((id)::integer) AS avg_tenant_id FROM recursive_dml_queries.local_table
|
||||
DEBUG: Plan 11 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('11_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info
|
||||
tenant_id | dept | info
|
||||
tenant_id | dept | info
|
||||
---------------------------------------------------------------------
|
||||
50 | 50 | {"f1": 50, "f2": 2500}
|
||||
(1 row)
|
||||
|
@ -179,7 +179,7 @@ RETURNING
|
|||
distributed_table.*;
|
||||
DEBUG: generating subplan 12_1 for subquery SELECT avg((tenant_id)::integer) AS avg_tenant_id FROM (SELECT distributed_table.tenant_id, reference_table.name FROM recursive_dml_queries.distributed_table, recursive_dml_queries.reference_table WHERE ((distributed_table.dept)::text OPERATOR(pg_catalog.=) reference_table.id) ORDER BY reference_table.name DESC, distributed_table.tenant_id DESC) tenant_ids
|
||||
DEBUG: Plan 12 query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.distributed_table SET dept = (foo.avg_tenant_id)::integer FROM (SELECT intermediate_result.avg_tenant_id FROM read_intermediate_result('12_1'::text, 'binary'::citus_copy_format) intermediate_result(avg_tenant_id numeric)) foo WHERE (((foo.avg_tenant_id)::integer)::text OPERATOR(pg_catalog.=) distributed_table.tenant_id) RETURNING distributed_table.tenant_id, distributed_table.dept, distributed_table.info
|
||||
tenant_id | dept | info
|
||||
tenant_id | dept | info
|
||||
---------------------------------------------------------------------
|
||||
50 | 50 | {"f1": 50, "f2": 2500}
|
||||
(1 row)
|
||||
|
@ -212,7 +212,7 @@ foo_inner_1 JOIN LATERAL
|
|||
) foo_inner_2
|
||||
ON (foo_inner_2.tenant_id != foo_inner_1.tenant_id)
|
||||
ORDER BY foo_inner_1.tenant_id;
|
||||
tenant_id
|
||||
tenant_id
|
||||
---------------------------------------------------------------------
|
||||
14
|
||||
24
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
---------------------------------------------------------------------
|
||||
-- THIS TEST SHOULD IDEALLY BE EXECUTED AT THE END OF
|
||||
-- THE REGRESSION TEST SUITE TO MAKE SURE THAT WE
|
||||
-- CLEAR ALL INTERMEDIATE RESULTS ON BOTH THE COORDINATOR
|
||||
-- THIS TEST SHOULD IDEALLY BE EXECUTED AT THE END OF
|
||||
-- THE REGRESSION TEST SUITE TO MAKE SURE THAT WE
|
||||
-- CLEAR ALL INTERMEDIATE RESULTS ON BOTH THE COORDINATOR
|
||||
-- AND ON THE WORKERS. HOWEVER, WE HAVE SOME ISSUES AROUND
|
||||
-- WINDOWS SUPPORT, FAILURES IN TASK-TRACKER EXECUTOR
|
||||
-- SO WE DISABLE THIS TEST ON WINDOWS
|
||||
---------------------------------------------------------------------
|
||||
SELECT pg_ls_dir('base/pgsql_job_cache') WHERE citus_version() NOT ILIKE '%windows%';
|
||||
pg_ls_dir
|
||||
pg_ls_dir
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT pg_ls_dir('base/pgsql_job_cache') WHERE citus_version() NOT ILIKE '%windows%'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"")
|
||||
(localhost,57638,t,"")
|
||||
|
|
|
@ -14,7 +14,7 @@ WHERE name = 'uuid-ossp'
|
|||
:uuid_present_command;
|
||||
-- show that the extension is created on both nodes
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,1)
|
||||
(localhost,57638,t,1)
|
||||
|
@ -25,7 +25,7 @@ DROP EXTENSION "uuid-ossp";
|
|||
RESET client_min_messages;
|
||||
-- show that the extension is dropped from both nodes
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,0)
|
||||
(localhost,57638,t,0)
|
||||
|
@ -33,7 +33,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname
|
|||
|
||||
-- show that extension recreation on new nodes works also fine with extension names that require escaping
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -50,14 +50,14 @@ WHERE name = 'uuid-ossp'
|
|||
:uuid_present_command;
|
||||
-- and add the other node
|
||||
SELECT 1 from master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- show that the extension exists on both nodes
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,1)
|
||||
(localhost,57638,t,1)
|
||||
|
|
|
@ -12,14 +12,14 @@ FROM pg_available_extensions()
|
|||
WHERE name = 'uuid-ossp'
|
||||
\gset
|
||||
:uuid_present_command;
|
||||
uuid_ossp_present
|
||||
uuid_ossp_present
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- show that the extension is created on both nodes
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,0)
|
||||
(localhost,57638,t,0)
|
||||
|
@ -31,7 +31,7 @@ ERROR: extension "uuid-ossp" does not exist
|
|||
RESET client_min_messages;
|
||||
-- show that the extension is dropped from both nodes
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,0)
|
||||
(localhost,57638,t,0)
|
||||
|
@ -39,7 +39,7 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname
|
|||
|
||||
-- show that extension recreation on new nodes works also fine with extension names that require escaping
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -54,21 +54,21 @@ FROM pg_available_extensions()
|
|||
WHERE name = 'uuid-ossp'
|
||||
\gset
|
||||
:uuid_present_command;
|
||||
uuid_ossp_present
|
||||
uuid_ossp_present
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- and add the other node
|
||||
SELECT 1 from master_add_node('localhost', :worker_2_port);
|
||||
?column?
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- show that the extension exists on both nodes
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,0)
|
||||
(localhost,57638,t,0)
|
||||
|
|
|
@ -13,16 +13,16 @@ INSERT INTO test VALUES
|
|||
(2,2);
|
||||
SELECT create_reference_table('ref');
|
||||
NOTICE: Copying data from local table...
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test', 'x');
|
||||
NOTICE: Copying data from local table...
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- PR 3180 implements expressions in join clauses to reference tables to support CHbenCHmark queries 7/8/9
|
||||
|
@ -33,7 +33,7 @@ FROM
|
|||
ref a
|
||||
WHERE t2.y * 2 = a.a
|
||||
ORDER BY 1,2,3;
|
||||
y | x | x | a | b
|
||||
y | x | x | a | b
|
||||
---------------------------------------------------------------------
|
||||
2 | 1 | 1 | 4 | 4
|
||||
2 | 1 | 2 | 4 | 4
|
||||
|
@ -53,7 +53,7 @@ FROM
|
|||
ref b
|
||||
WHERE t2.y - a.a - b.b = 0
|
||||
ORDER BY 1,2,3;
|
||||
y | x | x | a | b | a | b
|
||||
y | x | x | a | b | a | b
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- do not cache any connections
|
||||
|
@ -13,26 +13,26 @@ SET citus.next_shard_id TO 100400;
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100;
|
||||
CREATE TABLE copy_test (key int, value int);
|
||||
SELECT create_distributed_table('copy_test', 'key', 'append');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.clear_network_traffic();
|
||||
clear_network_traffic
|
||||
clear_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
SELECT count(1) FROM copy_test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT citus.dump_network_traffic();
|
||||
dump_network_traffic
|
||||
dump_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
(0,coordinator,"[initial message]")
|
||||
(0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
|
||||
|
@ -58,16 +58,16 @@ SELECT citus.dump_network_traffic();
|
|||
|
||||
---- all of the following tests test behavior with 2 shard placements ----
|
||||
SHOW citus.shard_replication_factor;
|
||||
citus.shard_replication_factor
|
||||
citus.shard_replication_factor
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
---- kill the connection when we try to create the shard ----
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -78,23 +78,23 @@ CONTEXT: while executing command on localhost:xxxxx
|
|||
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
|
||||
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
|
||||
ORDER BY placementid;
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
|
||||
(2 rows)
|
||||
|
||||
SELECT count(1) FROM copy_test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
---- kill the connection when we try to start a transaction ----
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -104,23 +104,23 @@ ERROR: failure on connection marked as essential: localhost:xxxxx
|
|||
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
|
||||
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
|
||||
ORDER BY placementid;
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
|
||||
(2 rows)
|
||||
|
||||
SELECT count(1) FROM copy_test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
---- kill the connection when we start the COPY ----
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -131,23 +131,23 @@ CONTEXT: while executing command on localhost:xxxxx
|
|||
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
|
||||
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
|
||||
ORDER BY placementid;
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
|
||||
(2 rows)
|
||||
|
||||
SELECT count(1) FROM copy_test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
---- kill the connection when we send the data ----
|
||||
SELECT citus.mitmproxy('conn.onCopyData().kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -155,16 +155,16 @@ ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
|||
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
|
||||
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
|
||||
ORDER BY placementid;
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
|
||||
(2 rows)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(1) FROM copy_test;
|
||||
|
@ -172,16 +172,16 @@ WARNING: connection error: localhost:xxxxx
|
|||
DETAIL: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
---- cancel the connection when we send the data ----
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -189,7 +189,7 @@ ERROR: canceling statement due to user request
|
|||
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
|
||||
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
|
||||
ORDER BY placementid;
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
|
||||
|
@ -199,9 +199,9 @@ SELECT count(1) FROM copy_test;
|
|||
ERROR: canceling statement due to user request
|
||||
---- kill the connection when we try to get the size of the table ----
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="pg_table_size").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -215,23 +215,23 @@ ERROR: failure on connection marked as essential: localhost:xxxxx
|
|||
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
|
||||
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
|
||||
ORDER BY placementid;
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
|
||||
(2 rows)
|
||||
|
||||
SELECT count(1) FROM copy_test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
---- kill the connection when we try to get the min, max of the table ----
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT min\(key\), max\(key\)").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -245,23 +245,23 @@ ERROR: failure on connection marked as essential: localhost:xxxxx
|
|||
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
|
||||
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
|
||||
ORDER BY placementid;
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
|
||||
(2 rows)
|
||||
|
||||
SELECT count(1) FROM copy_test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
---- kill the connection when we try to COMMIT ----
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -273,7 +273,7 @@ CONTEXT: while executing command on localhost:xxxxx
|
|||
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
|
||||
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
|
||||
ORDER BY placementid;
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
|
||||
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
|
||||
|
@ -282,16 +282,16 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
|
|||
(4 rows)
|
||||
|
||||
SELECT count(1) FROM copy_test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
-- ==== Clean up, we're done here ====
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE copy_test;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- do not cache any connections
|
||||
|
@ -14,26 +14,26 @@ SET citus.max_cached_conns_per_worker TO 0;
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100;
|
||||
CREATE TABLE copy_test (key int, value int);
|
||||
SELECT create_distributed_table('copy_test', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.clear_network_traffic();
|
||||
clear_network_traffic
|
||||
clear_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
SELECT count(1) FROM copy_test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT citus.dump_network_traffic();
|
||||
dump_network_traffic
|
||||
dump_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
(0,coordinator,"[initial message]")
|
||||
(0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
|
||||
|
@ -54,9 +54,9 @@ SELECT citus.dump_network_traffic();
|
|||
-- ==== kill the connection when we try to start a transaction ====
|
||||
-- the query should abort
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction").killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -68,9 +68,9 @@ CONTEXT: COPY copy_test, line 1: "0, 0"
|
|||
-- ==== kill the connection when we try to start the COPY ====
|
||||
-- the query should abort
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -82,9 +82,9 @@ COPY copy_test, line 1: "0, 0"
|
|||
-- ==== kill the connection when we first start sending data ====
|
||||
-- the query should abort
|
||||
SELECT citus.mitmproxy('conn.onCopyData().killall()'); -- raw rows from the client
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -92,9 +92,9 @@ ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
|||
-- ==== kill the connection when the worker confirms it's received the data ====
|
||||
-- the query should abort
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -102,29 +102,29 @@ ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
|||
-- ==== kill the connection when we try to send COMMIT ====
|
||||
-- the query should succeed, and the placement should be marked inactive
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(1) FROM pg_dist_shard_placement WHERE shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass
|
||||
) AND shardstate = 3;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(1) FROM copy_test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -135,21 +135,21 @@ WARNING: connection not open
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- the shard is marked invalid
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(1) FROM pg_dist_shard_placement WHERE shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass
|
||||
) AND shardstate = 3;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(1) FROM copy_test;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
8
|
||||
(1 row)
|
||||
|
@ -169,24 +169,24 @@ ERROR: missing data for column "value"
|
|||
CONTEXT: COPY copy_test, line 5: "10"
|
||||
-- kill the connection if the coordinator sends COMMIT. It doesn't, so nothing changes
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9 && echo 10' WITH CSV;
|
||||
ERROR: missing data for column "value"
|
||||
CONTEXT: COPY copy_test, line 5: "10"
|
||||
SELECT * FROM copy_test ORDER BY key, value;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- ==== clean up some more to prepare for tests with only one replica ====
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
TRUNCATE copy_test;
|
||||
|
@ -194,7 +194,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_1_por
|
|||
SELECT * FROM pg_dist_shard_placement WHERE shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass
|
||||
) ORDER BY nodeport, placementid;
|
||||
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
100400 | 1 | 0 | localhost | 9060 | 100
|
||||
100400 | 3 | 0 | localhost | 57637 | 101
|
||||
|
@ -203,7 +203,7 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN (
|
|||
-- ==== okay, run some tests where there's only one active shard ====
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
SELECT * FROM copy_test;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
0 | 0
|
||||
1 | 1
|
||||
|
@ -213,9 +213,9 @@ SELECT * FROM copy_test;
|
|||
|
||||
-- the worker is unreachable
|
||||
SELECT citus.mitmproxy('conn.killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -227,13 +227,13 @@ CONTEXT: COPY copy_test, line 1: "0, 0"
|
|||
ERROR: could not connect to any active placements
|
||||
CONTEXT: COPY copy_test, line 1: "0, 0"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM copy_test;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
0 | 0
|
||||
1 | 1
|
||||
|
@ -243,9 +243,9 @@ SELECT * FROM copy_test;
|
|||
|
||||
-- the first message fails
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id").killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -255,13 +255,13 @@ COPY copy_test, line 1: "0, 0"
|
|||
ERROR: failure on connection marked as essential: localhost:xxxxx
|
||||
CONTEXT: COPY copy_test, line 1: "0, 0"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM copy_test;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
0 | 0
|
||||
1 | 1
|
||||
|
@ -271,9 +271,9 @@ SELECT * FROM copy_test;
|
|||
|
||||
-- the COPY message fails
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -283,13 +283,13 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
COPY copy_test, line 1: "0, 0"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM copy_test;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
0 | 0
|
||||
1 | 1
|
||||
|
@ -299,21 +299,21 @@ SELECT * FROM copy_test;
|
|||
|
||||
-- the COPY data fails
|
||||
SELECT citus.mitmproxy('conn.onCopyData().killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM copy_test;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
0 | 0
|
||||
1 | 1
|
||||
|
@ -323,9 +323,9 @@ SELECT * FROM copy_test;
|
|||
|
||||
-- the COMMIT fails
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -337,13 +337,13 @@ CONTEXT: while executing command on localhost:xxxxx
|
|||
WARNING: could not commit transaction for shard xxxxx on any active node
|
||||
ERROR: could not commit transaction on any active node
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM copy_test;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
0 | 0
|
||||
1 | 1
|
||||
|
@ -355,7 +355,7 @@ SELECT * FROM copy_test;
|
|||
SELECT * FROM pg_dist_shard_placement WHERE shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass
|
||||
) ORDER BY nodeport, placementid;
|
||||
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
100400 | 1 | 0 | localhost | 9060 | 100
|
||||
100400 | 3 | 0 | localhost | 57637 | 101
|
||||
|
@ -363,9 +363,9 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN (
|
|||
|
||||
-- the COMMIT makes it through but the connection dies before we get a response
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT").killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
|
||||
|
@ -377,22 +377,22 @@ CONTEXT: while executing command on localhost:xxxxx
|
|||
WARNING: could not commit transaction for shard xxxxx on any active node
|
||||
ERROR: could not commit transaction on any active node
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM pg_dist_shard_placement WHERE shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'copy_test'::regclass
|
||||
) ORDER BY nodeport, placementid;
|
||||
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
100400 | 1 | 0 | localhost | 9060 | 100
|
||||
100400 | 3 | 0 | localhost | 57637 | 101
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM copy_test;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
0 | 0
|
||||
1 | 1
|
||||
|
@ -406,9 +406,9 @@ SELECT * FROM copy_test;
|
|||
|
||||
-- ==== Clean up, we're done here ====
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE copy_test;
|
||||
|
|
|
@ -5,16 +5,16 @@
|
|||
-- tested as they don't create network activity
|
||||
--
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.next_shard_id TO 200000;
|
||||
-- verify we have all worker nodes present
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 9060
|
||||
localhost | 57637
|
||||
|
@ -22,7 +22,7 @@ ORDER BY 1, 2;
|
|||
|
||||
-- verify there are no tables that could prevent add/remove node operations
|
||||
SELECT * FROM pg_dist_partition;
|
||||
logicalrelid | partmethod | partkey | colocationid | repmodel
|
||||
logicalrelid | partmethod | partkey | colocationid | repmodel
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -30,23 +30,23 @@ CREATE SCHEMA add_remove_node;
|
|||
SET SEARCH_PATH=add_remove_node;
|
||||
CREATE TABLE user_table(user_id int, user_name text);
|
||||
SELECT create_reference_table('user_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE event_table(user_id int, event_id int, event_name text);
|
||||
SELECT create_distributed_table('event_table', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT shardid, shardstate
|
||||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
200000 | 1
|
||||
|
@ -54,14 +54,14 @@ ORDER BY placementid;
|
|||
|
||||
SELECT master_disable_node('localhost', :worker_2_proxy_port);
|
||||
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back.
|
||||
master_disable_node
|
||||
master_disable_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637
|
||||
(1 row)
|
||||
|
@ -70,16 +70,16 @@ SELECT shardid, shardstate
|
|||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
(1 row)
|
||||
|
||||
-- fail activate node by failing reference table creation
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
@ -89,15 +89,15 @@ ERROR: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- verify node is not activated
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637
|
||||
(1 row)
|
||||
|
@ -106,16 +106,16 @@ SELECT shardid, shardstate
|
|||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
(1 row)
|
||||
|
||||
-- fail create schema command
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
@ -126,7 +126,7 @@ CONTEXT: while executing command on localhost:xxxxx
|
|||
-- verify node is not activated
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637
|
||||
(1 row)
|
||||
|
@ -135,16 +135,16 @@ SELECT shardid, shardstate
|
|||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
(1 row)
|
||||
|
||||
-- fail activate node by failing reference table creation
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_activate_node('localhost', :worker_2_proxy_port);
|
||||
|
@ -153,7 +153,7 @@ ERROR: canceling statement due to user request
|
|||
-- verify node is not activated
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637
|
||||
(1 row)
|
||||
|
@ -162,15 +162,15 @@ SELECT shardid, shardstate
|
|||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- master_remove_node fails when there are shards on that worker
|
||||
|
@ -179,15 +179,15 @@ ERROR: you cannot remove the primary node of a node group which has shard place
|
|||
-- drop event table and re-run remove
|
||||
DROP TABLE event_table;
|
||||
SELECT master_remove_node('localhost', :worker_2_proxy_port);
|
||||
master_remove_node
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- verify node is removed
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637
|
||||
(1 row)
|
||||
|
@ -196,7 +196,7 @@ SELECT shardid, shardstate
|
|||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
(1 row)
|
||||
|
@ -205,22 +205,22 @@ ORDER BY placementid;
|
|||
-- it does not create any network activity therefore can not
|
||||
-- be injected failure through network
|
||||
SELECT master_add_inactive_node('localhost', :worker_2_proxy_port);
|
||||
master_add_inactive_node
|
||||
master_add_inactive_node
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_proxy_port);
|
||||
master_remove_node
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT shardid, shardstate
|
||||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
(1 row)
|
||||
|
@ -228,9 +228,9 @@ ORDER BY placementid;
|
|||
-- test master_add_node replicated a reference table
|
||||
-- to newly added node.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_node('localhost', :worker_2_proxy_port);
|
||||
|
@ -242,7 +242,7 @@ CONTEXT: while executing command on localhost:xxxxx
|
|||
-- verify node is not added
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637
|
||||
(1 row)
|
||||
|
@ -251,15 +251,15 @@ SELECT shardid, shardstate
|
|||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_node('localhost', :worker_2_proxy_port);
|
||||
|
@ -268,7 +268,7 @@ ERROR: canceling statement due to user request
|
|||
-- verify node is not added
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 57637
|
||||
(1 row)
|
||||
|
@ -277,21 +277,21 @@ SELECT shardid, shardstate
|
|||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
(1 row)
|
||||
|
||||
-- reset cluster to original state
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_node('localhost', :worker_2_proxy_port);
|
||||
NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
|
||||
master_add_node
|
||||
master_add_node
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
(1 row)
|
||||
|
@ -299,7 +299,7 @@ NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
|
|||
-- verify node is added
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 9060
|
||||
localhost | 57637
|
||||
|
@ -309,7 +309,7 @@ SELECT shardid, shardstate
|
|||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
200000 | 1
|
||||
|
@ -317,15 +317,15 @@ ORDER BY placementid;
|
|||
|
||||
-- fail master_add_node by failing copy out operation
|
||||
SELECT master_remove_node('localhost', :worker_1_port);
|
||||
master_remove_node
|
||||
master_remove_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_node('localhost', :worker_1_port);
|
||||
|
@ -335,20 +335,20 @@ CONTEXT: while executing command on localhost:xxxxx
|
|||
-- verify node is not added
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 9060
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_node('localhost', :worker_1_port);
|
||||
NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
|
||||
master_add_node
|
||||
master_add_node
|
||||
---------------------------------------------------------------------
|
||||
8
|
||||
(1 row)
|
||||
|
@ -356,7 +356,7 @@ NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
|
|||
-- verify node is added
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
node_name | node_port
|
||||
node_name | node_port
|
||||
---------------------------------------------------------------------
|
||||
localhost | 9060
|
||||
localhost | 57637
|
||||
|
@ -366,7 +366,7 @@ SELECT shardid, shardstate
|
|||
FROM pg_dist_placement p JOIN pg_dist_shard s USING (shardid)
|
||||
WHERE s.logicalrelid = 'user_table'::regclass
|
||||
ORDER BY placementid;
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
200000 | 1
|
||||
200000 | 1
|
||||
|
@ -377,7 +377,7 @@ DROP SCHEMA add_remove_node CASCADE;
|
|||
NOTICE: drop cascades to table add_remove_node.user_table
|
||||
SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASCADE')
|
||||
ORDER BY nodeport;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 9060 | t | DROP SCHEMA
|
||||
localhost | 57637 | t | DROP SCHEMA
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
-- - timeout
|
||||
--
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA fail_connect;
|
||||
|
@ -23,9 +23,9 @@ CREATE TABLE products (
|
|||
price numeric
|
||||
);
|
||||
SELECT create_distributed_table('products', 'product_no');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Can only add primary key constraint on distribution column (or group of columns
|
||||
|
@ -38,17 +38,17 @@ DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY const
|
|||
-- into connection establishment problems
|
||||
SET citus.node_connection_timeout TO 400;
|
||||
SELECT citus.mitmproxy('conn.delay(500)');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no);
|
||||
ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE r1 (
|
||||
|
@ -61,26 +61,26 @@ INSERT INTO r1 (id, name) VALUES
|
|||
(3,'baz');
|
||||
SELECT create_reference_table('r1');
|
||||
NOTICE: Copying data from local table...
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.clear_network_traffic();
|
||||
clear_network_traffic
|
||||
clear_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.delay(500)');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- we cannot control which replica of the reference table will be queried and there is
|
||||
-- only one specific client we can control the connection for.
|
||||
-- by using round-robin task_assignment_policy we can force to hit both machines.
|
||||
-- by using round-robin task_assignment_policy we can force to hit both machines.
|
||||
-- and in the end, dumping the network traffic shows that the connection establishment
|
||||
-- is initiated to the node behind the proxy
|
||||
SET client_min_messages TO ERROR;
|
||||
|
@ -88,13 +88,13 @@ SET citus.task_assignment_policy TO 'round-robin';
|
|||
-- suppress the warning since we can't control which shard is chose first. Failure of this
|
||||
-- test would be if one of the queries does not return the result but an error.
|
||||
SELECT name FROM r1 WHERE id = 2;
|
||||
name
|
||||
name
|
||||
---------------------------------------------------------------------
|
||||
bar
|
||||
(1 row)
|
||||
|
||||
SELECT name FROM r1 WHERE id = 2;
|
||||
name
|
||||
name
|
||||
---------------------------------------------------------------------
|
||||
bar
|
||||
(1 row)
|
||||
|
@ -102,70 +102,70 @@ SELECT name FROM r1 WHERE id = 2;
|
|||
-- verify a connection attempt was made to the intercepted node, this would have cause the
|
||||
-- connection to have been delayed and thus caused a timeout
|
||||
SELECT citus.dump_network_traffic();
|
||||
dump_network_traffic
|
||||
dump_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
(0,coordinator,"[initial message]")
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- similar test with the above but this time on a
|
||||
-- similar test with the above but this time on a
|
||||
-- distributed table instead of a reference table
|
||||
-- and with citus.force_max_query_parallelization is set
|
||||
SET citus.force_max_query_parallelization TO ON;
|
||||
SELECT citus.mitmproxy('conn.delay(500)');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- suppress the warning since we can't control which shard is chose first. Failure of this
|
||||
-- test would be if one of the queries does not return the result but an error.
|
||||
SELECT count(*) FROM products;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM products;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- use OFFSET 1 to prevent printing the line where source
|
||||
-- use OFFSET 1 to prevent printing the line where source
|
||||
-- is the worker
|
||||
SELECT citus.dump_network_traffic() ORDER BY 1 OFFSET 1;
|
||||
dump_network_traffic
|
||||
dump_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
(1,coordinator,"[initial message]")
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE single_replicatated(key int);
|
||||
SELECT create_distributed_table('single_replicatated', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this time the table is single replicated and we're still using the
|
||||
-- the max parallelization flag, so the query should fail
|
||||
SET citus.force_max_query_parallelization TO ON;
|
||||
SELECT citus.mitmproxy('conn.delay(500)');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM single_replicatated;
|
||||
|
@ -175,47 +175,47 @@ SET citus.force_max_query_parallelization TO OFF;
|
|||
-- to see that connection establishement failures could
|
||||
-- mark placement INVALID
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SELECT
|
||||
SELECT
|
||||
count(*) as invalid_placement_count
|
||||
FROM
|
||||
pg_dist_shard_placement
|
||||
WHERE
|
||||
shardstate = 3 AND
|
||||
FROM
|
||||
pg_dist_shard_placement
|
||||
WHERE
|
||||
shardstate = 3 AND
|
||||
shardid IN (SELECT shardid from pg_dist_shard where logicalrelid = 'products'::regclass);
|
||||
invalid_placement_count
|
||||
invalid_placement_count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.delay(500)');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO products VALUES (100, '100', 100);
|
||||
COMMIT;
|
||||
SELECT
|
||||
SELECT
|
||||
count(*) as invalid_placement_count
|
||||
FROM
|
||||
pg_dist_shard_placement
|
||||
WHERE
|
||||
shardstate = 3 AND
|
||||
FROM
|
||||
pg_dist_shard_placement
|
||||
WHERE
|
||||
shardstate = 3 AND
|
||||
shardid IN (SELECT shardid from pg_dist_shard where logicalrelid = 'products'::regclass);
|
||||
invalid_placement_count
|
||||
invalid_placement_count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- show that INSERT went through
|
||||
SELECT count(*) FROM products WHERE product_no = 100;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -225,14 +225,14 @@ RESET client_min_messages;
|
|||
SELECT get_global_active_transactions();
|
||||
WARNING: could not establish connection after 400 ms
|
||||
WARNING: connection error: localhost:xxxxx
|
||||
get_global_active_transactions
|
||||
get_global_active_transactions
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.node_connection_timeout TO DEFAULT;
|
||||
|
|
|
@ -5,9 +5,9 @@ CREATE SCHEMA copy_distributed_table;
|
|||
SET search_path TO 'copy_distributed_table';
|
||||
SET citus.next_shard_id TO 1710000;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- With one placement COPY should error out and placement should stay healthy.
|
||||
|
@ -16,23 +16,23 @@ SET citus.shard_count to 4;
|
|||
SET citus.max_cached_conns_per_worker to 0;
|
||||
CREATE TABLE test_table(id int, value_1 int);
|
||||
SELECT create_distributed_table('test_table','id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE VIEW unhealthy_shard_count AS
|
||||
SELECT count(*)
|
||||
FROM pg_dist_shard_placement pdsp
|
||||
JOIN
|
||||
pg_dist_shard pds
|
||||
ON pdsp.shardid=pds.shardid
|
||||
CREATE VIEW unhealthy_shard_count AS
|
||||
SELECT count(*)
|
||||
FROM pg_dist_shard_placement pdsp
|
||||
JOIN
|
||||
pg_dist_shard pds
|
||||
ON pdsp.shardid=pds.shardid
|
||||
WHERE logicalrelid='copy_distributed_table.test_table'::regclass AND shardstate != 1;
|
||||
-- Just kill the connection after sending the first query to the worker.
|
||||
SELECT citus.mitmproxy('conn.kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY test_table FROM stdin delimiter ',';
|
||||
|
@ -44,46 +44,46 @@ CONTEXT: COPY test_table, line 1: "1,2"
|
|||
ERROR: could not connect to any active placements
|
||||
CONTEXT: COPY test_table, line 1: "1,2"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Now, kill the connection while copying the data
|
||||
SELECT citus.mitmproxy('conn.onCopyData().kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY test_table FROM stdin delimiter ',';
|
||||
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -91,109 +91,109 @@ SELECT count(*) FROM test_table;
|
|||
-- Similar to the above one, but now cancel the connection
|
||||
-- instead of killing it.
|
||||
SELECT citus.mitmproxy('conn.onCopyData().cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY test_table FROM stdin delimiter ',';
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- kill the connection after worker sends command complete message
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY test_table FROM stdin delimiter ',';
|
||||
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- similar to above one, but cancel the connection on command complete
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY test_table FROM stdin delimiter ',';
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- kill the connection on PREPARE TRANSACTION
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY test_table FROM stdin delimiter ',';
|
||||
ERROR: connection not open
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -202,27 +202,27 @@ SELECT count(*) FROM test_table;
|
|||
SET client_min_messages TO ERROR;
|
||||
-- kill on command complete on COMMIT PREPARE, command should succeed
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY test_table FROM stdin delimiter ',';
|
||||
SET client_min_messages TO NOTICE;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
@ -230,9 +230,9 @@ SELECT count(*) FROM test_table;
|
|||
TRUNCATE TABLE test_table;
|
||||
-- kill on ROLLBACK, command could be rollbacked
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="ROLLBACK").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -241,19 +241,19 @@ ROLLBACK;
|
|||
WARNING: connection not open
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -264,15 +264,15 @@ NOTICE: drop cascades to view unhealthy_shard_count
|
|||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE test_table_2(id int, value_1 int);
|
||||
SELECT create_distributed_table('test_table_2','id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY test_table_2 FROM stdin delimiter ',';
|
||||
|
@ -297,9 +297,9 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
CONTEXT: COPY test_table_2, line 5: "9,10"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate
|
||||
|
@ -308,7 +308,7 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate
|
|||
ON pdsd.shardid = pds.shardid
|
||||
WHERE pds.logicalrelid = 'test_table_2'::regclass
|
||||
ORDER BY shardid, nodeport;
|
||||
logicalrelid | shardid | shardstate
|
||||
logicalrelid | shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
test_table_2 | 1710004 | 3
|
||||
test_table_2 | 1710004 | 1
|
||||
|
@ -324,17 +324,17 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate
|
|||
DROP TABLE test_table_2;
|
||||
CREATE TABLE test_table_2(id int, value_1 int);
|
||||
SELECT create_distributed_table('test_table_2','id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Kill the connection when we try to start the COPY
|
||||
-- Kill the connection when we try to start the COPY
|
||||
-- The query should abort
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY test_table_2 FROM stdin delimiter ',';
|
||||
|
@ -344,9 +344,9 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
COPY test_table_2, line 1: "1,2"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate
|
||||
|
@ -355,7 +355,7 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate
|
|||
ON pdsd.shardid = pds.shardid
|
||||
WHERE pds.logicalrelid = 'test_table_2'::regclass
|
||||
ORDER BY shardid, nodeport;
|
||||
logicalrelid | shardid | shardstate
|
||||
logicalrelid | shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
test_table_2 | 1710008 | 1
|
||||
test_table_2 | 1710008 | 1
|
||||
|
@ -371,26 +371,26 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate
|
|||
DROP TABLE test_table_2;
|
||||
CREATE TABLE test_table_2(id int, value_1 int);
|
||||
SELECT create_distributed_table('test_table_2','id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- When kill on copying data, it will be rollbacked and placements won't be labaled as invalid.
|
||||
-- Note that now we sent data to shard xxxxx, yet it is not marked as invalid.
|
||||
-- You can check the issue about this behaviour: https://github.com/citusdata/citus/issues/1933
|
||||
SELECT citus.mitmproxy('conn.onCopyData().kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\COPY test_table_2 FROM stdin delimiter ',';
|
||||
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate
|
||||
|
@ -399,7 +399,7 @@ SELECT pds.logicalrelid, pdsd.shardid, pdsd.shardstate
|
|||
ON pdsd.shardid = pds.shardid
|
||||
WHERE pds.logicalrelid = 'test_table_2'::regclass
|
||||
ORDER BY shardid, nodeport;
|
||||
logicalrelid | shardid | shardstate
|
||||
logicalrelid | shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
test_table_2 | 1710012 | 1
|
||||
test_table_2 | 1710012 | 1
|
||||
|
|
|
@ -1,121 +1,121 @@
|
|||
--
|
||||
-- Failure tests for COPY to reference tables
|
||||
--
|
||||
--
|
||||
-- Failure tests for COPY to reference tables
|
||||
--
|
||||
CREATE SCHEMA copy_reference_failure;
|
||||
SET search_path TO 'copy_reference_failure';
|
||||
SET citus.next_shard_id TO 130000;
|
||||
-- we don't want to see the prepared transaction numbers in the warnings
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test_table(id int, value_1 int);
|
||||
SELECT create_reference_table('test_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE VIEW unhealthy_shard_count AS
|
||||
SELECT count(*)
|
||||
FROM pg_dist_shard_placement pdsp
|
||||
JOIN
|
||||
pg_dist_shard pds
|
||||
ON pdsp.shardid=pds.shardid
|
||||
CREATE VIEW unhealthy_shard_count AS
|
||||
SELECT count(*)
|
||||
FROM pg_dist_shard_placement pdsp
|
||||
JOIN
|
||||
pg_dist_shard pds
|
||||
ON pdsp.shardid=pds.shardid
|
||||
WHERE logicalrelid='copy_reference_failure.test_table'::regclass AND shardstate != 1;
|
||||
-- in the first test, kill just in the first
|
||||
-- in the first test, kill just in the first
|
||||
-- response we get from the worker
|
||||
SELECT citus.mitmproxy('conn.kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
ERROR: failure on connection marked as essential: localhost:xxxxx
|
||||
CONTEXT: COPY test_table, line 1: "1,2"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- kill as soon as the coordinator sends begin
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
ERROR: failure on connection marked as essential: localhost:xxxxx
|
||||
CONTEXT: COPY test_table, line 1: "1,2"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- cancel as soon as the coordinator sends begin
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
ERROR: canceling statement due to user request
|
||||
CONTEXT: COPY test_table, line 1: "1,2"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- kill as soon as the coordinator sends COPY command
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
|
@ -125,111 +125,111 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
COPY test_table, line 1: "1,2"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- cancel as soon as the coordinator sends COPY command
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
ERROR: canceling statement due to user request
|
||||
CONTEXT: COPY test_table, line 1: "1,2"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- kill as soon as the worker sends CopyComplete
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- cancel as soon as the coordinator sends CopyData
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- kill the connection when we try to start the COPY
|
||||
-- kill the connection when we try to start the COPY
|
||||
-- the query should abort
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
|
@ -239,74 +239,74 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
COPY test_table, line 1: "1,2"
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- killing on PREPARE should be fine, everything should be rollbacked
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
ERROR: connection not open
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- cancelling on PREPARE should be fine, everything should be rollbacked
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -314,33 +314,33 @@ SELECT count(*) FROM test_table;
|
|||
-- killing on command complete of COMMIT PREPARE, we should see that the command succeeds
|
||||
-- and all the workers committed
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT PREPARED").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- we shouldn't have any prepared transactions in the workers
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
@ -348,36 +348,36 @@ SELECT count(*) FROM test_table;
|
|||
TRUNCATE test_table;
|
||||
-- kill as soon as the coordinator sends COMMIT
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table FROM STDIN DELIMITER ','
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Since we kill connections to one worker after commit arrives but the
|
||||
-- Since we kill connections to one worker after commit arrives but the
|
||||
-- other worker connections are healthy, we cannot commit on 1 worker
|
||||
-- which has 1 active shard placements, but the other does. That's why
|
||||
-- we expect to see 1 recovered prepared transactions.
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
@ -386,9 +386,9 @@ TRUNCATE test_table;
|
|||
-- finally, test failing on ROLLBACK just after the coordinator
|
||||
-- sends the ROLLBACK so the command can be rollbacked
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -398,30 +398,30 @@ ROLLBACK;
|
|||
WARNING: connection not open
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- but now kill just after the worker sends response to
|
||||
-- but now kill just after the worker sends response to
|
||||
-- ROLLBACK command, command should have been rollbacked
|
||||
-- both on the distributed table and the placements
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="^ROLLBACK").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -431,25 +431,25 @@ ROLLBACK;
|
|||
WARNING: connection not open
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM unhealthy_shard_count;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3,9 +3,9 @@
|
|||
-- test create index concurrently command
|
||||
-- failure.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_count = 4; -- two per worker
|
||||
|
@ -13,16 +13,16 @@ CREATE SCHEMA index_schema;
|
|||
SET SEARCH_PATH=index_schema;
|
||||
CREATE TABLE index_test(id int, value_1 int, value_2 int);
|
||||
SELECT create_distributed_table('index_test', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- kill the connection when create command is issued
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1);
|
||||
|
@ -30,15 +30,15 @@ ERROR: CONCURRENTLY-enabled index command failed
|
|||
DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index.
|
||||
HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- verify index is not created
|
||||
SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$)
|
||||
WHERE nodeport = :worker_2_proxy_port;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 9060 | t | 0
|
||||
(1 row)
|
||||
|
@ -46,16 +46,16 @@ WHERE nodeport = :worker_2_proxy_port;
|
|||
DROP TABLE index_test;
|
||||
CREATE TABLE index_test(id int, value_1 int, value_2 int);
|
||||
SELECT create_reference_table('index_test');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- kill the connection when create command is issued
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1);
|
||||
|
@ -63,26 +63,26 @@ ERROR: CONCURRENTLY-enabled index command failed
|
|||
DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index.
|
||||
HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE index_test;
|
||||
CREATE TABLE index_test(id int, value_1 int, value_2 int);
|
||||
SELECT create_distributed_table('index_test', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- cancel the connection when create command is issued
|
||||
-- network traffic may differ between execution during cancellation
|
||||
-- therefore dump_network_traffic() calls are not made
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1);
|
||||
|
@ -90,24 +90,24 @@ ERROR: CONCURRENTLY-enabled index command failed
|
|||
DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index.
|
||||
HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE index_test;
|
||||
CREATE TABLE index_test(id int, value_1 int, value_2 int);
|
||||
SELECT create_reference_table('index_test');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- cancel the connection when create command is issued
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1);
|
||||
|
@ -115,25 +115,25 @@ ERROR: CONCURRENTLY-enabled index command failed
|
|||
DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index.
|
||||
HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE index_test;
|
||||
CREATE TABLE index_test(id int, value_1 int, value_2 int);
|
||||
SELECT create_distributed_table('index_test', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1);
|
||||
-- kill the connection when create command is issued
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DROP INDEX CONCURRENTLY").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP INDEX CONCURRENTLY IF EXISTS idx_index_test;
|
||||
|
@ -141,15 +141,15 @@ ERROR: CONCURRENTLY-enabled index command failed
|
|||
DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index.
|
||||
HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- verify index is not dropped at worker 2
|
||||
SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$)
|
||||
WHERE nodeport = :worker_2_proxy_port;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 9060 | t | 4
|
||||
(1 row)
|
||||
|
@ -160,7 +160,7 @@ NOTICE: drop cascades to table index_schema.index_test
|
|||
-- verify index is not at worker 2 upon cleanup
|
||||
SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE indexname LIKE 'idx_index_test%' $$)
|
||||
WHERE nodeport = :worker_2_proxy_port;
|
||||
nodename | nodeport | success | result
|
||||
nodename | nodeport | success | result
|
||||
---------------------------------------------------------------------
|
||||
localhost | 9060 | t | 0
|
||||
(1 row)
|
||||
|
|
|
@ -5,9 +5,9 @@ CREATE SCHEMA failure_reference_table;
|
|||
SET search_path TO 'failure_reference_table';
|
||||
SET citus.next_shard_id TO 10000000;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this is merely used to get the schema creation propagated. Without there are failures
|
||||
|
@ -19,9 +19,9 @@ INSERT INTO ref_table VALUES(1),(2),(3);
|
|||
-- Kill on sending first query to worker node, should error
|
||||
-- out and not create any placement
|
||||
SELECT citus.mitmproxy('conn.onQuery().kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
@ -30,16 +30,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT count(*) FROM pg_dist_shard_placement;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Kill after creating transaction on worker node
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
@ -48,31 +48,31 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT count(*) FROM pg_dist_shard_placement;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Cancel after creating transaction on worker node
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('ref_table');
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT count(*) FROM pg_dist_shard_placement;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Kill after copying data to worker node
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
@ -81,54 +81,54 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT count(*) FROM pg_dist_shard_placement;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Cancel after copying data to worker node
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('ref_table');
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT count(*) FROM pg_dist_shard_placement;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Kill after copying data to worker node
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('ref_table');
|
||||
NOTICE: Copying data from local table...
|
||||
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
|
||||
SELECT count(*) FROM pg_dist_shard_placement;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Cancel after copying data to worker node
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('ref_table');
|
||||
NOTICE: Copying data from local table...
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT count(*) FROM pg_dist_shard_placement;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -138,41 +138,41 @@ SET client_min_messages TO ERROR;
|
|||
-- Kill after preparing transaction. Since we don't commit after preparing, we recover
|
||||
-- prepared transaction afterwards.
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('ref_table');
|
||||
ERROR: connection not open
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT count(*) FROM pg_dist_shard_placement;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- Kill after commiting prepared, this should succeed
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT PREPARED").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('ref_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shardid, nodeport;
|
||||
shardid | nodeport | shardstate
|
||||
shardid | nodeport | shardstate
|
||||
---------------------------------------------------------------------
|
||||
10000008 | 9060 | 1
|
||||
10000008 | 57637 | 1
|
||||
|
@ -180,9 +180,9 @@ SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shard
|
|||
|
||||
SET client_min_messages TO NOTICE;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE ref_table;
|
||||
|
@ -192,9 +192,9 @@ CREATE TABLE ref_table(id int);
|
|||
INSERT INTO ref_table VALUES(1),(2),(3);
|
||||
-- Test in transaction
|
||||
SELECT citus.mitmproxy('conn.onQuery().kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -207,46 +207,46 @@ ERROR: failure on connection marked as essential: localhost:xxxxx
|
|||
COMMIT;
|
||||
-- kill on ROLLBACK, should be rollbacked
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SELECT create_reference_table('ref_table');
|
||||
NOTICE: Copying data from local table...
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
WARNING: connection not open
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport;
|
||||
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- cancel when the coordinator send ROLLBACK, should be rollbacked. We ignore cancellations
|
||||
-- during the ROLLBACK.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
SELECT create_reference_table('ref_table');
|
||||
NOTICE: Copying data from local table...
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ROLLBACK;
|
||||
SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport;
|
||||
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
shardid | shardstate | shardlength | nodename | nodeport | placementid
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
|
|
@ -4,19 +4,19 @@
|
|||
CREATE SCHEMA failure_create_table;
|
||||
SET search_path TO 'failure_create_table';
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_count to 4;
|
||||
CREATE TABLE test_table(id int, value_1 int);
|
||||
-- Kill connection before sending query to the worker
|
||||
-- Kill connection before sending query to the worker
|
||||
SELECT citus.mitmproxy('conn.kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table','id');
|
||||
|
@ -25,19 +25,19 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -48,9 +48,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
-- be created only on the node which is not behind the proxy.
|
||||
-- https://github.com/citusdata/citus/pull/1652
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table', 'id');
|
||||
|
@ -59,19 +59,19 @@ ERROR: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'failure_create_table'$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,1)
|
||||
|
@ -83,9 +83,9 @@ CREATE TYPE schema_proc AS (a int);
|
|||
DROP TYPE schema_proc;
|
||||
-- Now, kill the connection while opening transaction on workers.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table','id');
|
||||
|
@ -94,19 +94,19 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -114,9 +114,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
|
||||
-- Now, kill the connection after sending create table command with worker_apply_shard_ddl_command UDF
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_command").after(1).kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table','id');
|
||||
|
@ -125,19 +125,19 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -148,9 +148,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
BEGIN;
|
||||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_command").after(1).kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table', 'id');
|
||||
|
@ -160,19 +160,19 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
COMMIT;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -182,27 +182,27 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
-- workers. Note that, cancel requests will be ignored during
|
||||
-- shard creation.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table','id');
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -213,15 +213,15 @@ CREATE TABLE test_table(id int, value_1 int);
|
|||
-- Kill and cancel the connection with colocate_with option while sending the create table command
|
||||
CREATE TABLE temp_table(id int, value_1 int);
|
||||
SELECT create_distributed_table('temp_table','id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table');
|
||||
|
@ -230,46 +230,46 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
(2 rows)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table');
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -277,55 +277,55 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
|
||||
-- Kill and cancel the connection after worker sends "PREPARE TRANSACTION" ack with colocate_with option
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table');
|
||||
ERROR: connection not open
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
(2 rows)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table');
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -338,11 +338,11 @@ DROP SCHEMA failure_create_table;
|
|||
CREATE SCHEMA failure_create_table;
|
||||
CREATE TABLE test_table(id int, value_1 int);
|
||||
-- Test inside transaction
|
||||
-- Kill connection before sending query to the worker
|
||||
-- Kill connection before sending query to the worker
|
||||
SELECT citus.mitmproxy('conn.kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -353,19 +353,19 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
ROLLBACK;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -377,9 +377,9 @@ CREATE TYPE schema_proc AS (a int);
|
|||
DROP TYPE schema_proc;
|
||||
-- Now, kill the connection while creating transaction on workers in transaction.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -390,19 +390,19 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
ROLLBACK;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -413,9 +413,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
-- shard creation again in transaction if we're not relying on the
|
||||
-- executor. So, we'll have two output files
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -423,25 +423,25 @@ SELECT create_distributed_table('test_table','id');
|
|||
ERROR: canceling statement due to user request
|
||||
COMMIT;
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -456,9 +456,9 @@ CREATE TABLE test_table(id int, value_1 int);
|
|||
SET citus.multi_shard_commit_protocol TO "1pc";
|
||||
-- Kill connection before sending query to the worker with 1pc.
|
||||
SELECT citus.mitmproxy('conn.kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -469,19 +469,19 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
ROLLBACK;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -489,9 +489,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
|
||||
-- Kill connection while sending create table command with 1pc.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -502,19 +502,19 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
ROLLBACK;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -526,9 +526,9 @@ CREATE TYPE schema_proc AS (a int);
|
|||
DROP TYPE schema_proc;
|
||||
-- Now, kill the connection while opening transactions on workers with 1pc. Transaction will be opened due to BEGIN.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -539,19 +539,19 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
ROLLBACK;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -561,9 +561,9 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
-- workers with 1pc. Note that, cancel requests will be ignored during
|
||||
-- shard creation unless the executor is used. So, we'll have two output files
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -571,25 +571,25 @@ SELECT create_distributed_table('test_table','id');
|
|||
ERROR: canceling statement due to user request
|
||||
COMMIT;
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -602,16 +602,16 @@ CREATE SCHEMA failure_create_table;
|
|||
SET citus.multi_shard_commit_protocol TO "2pc";
|
||||
CREATE TABLE test_table_2(id int, value_1 int);
|
||||
SELECT master_create_distributed_table('test_table_2', 'id', 'hash');
|
||||
master_create_distributed_table
|
||||
master_create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Kill connection before sending query to the worker
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('test_table_2', 4, 2);
|
||||
|
@ -620,25 +620,25 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -646,28 +646,28 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
|
||||
-- Kill the connection after worker sends "PREPARE TRANSACTION" ack
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('test_table_2', 4, 2);
|
||||
ERROR: connection not open
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
@ -675,34 +675,34 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
|
|||
|
||||
-- Cancel the connection after sending prepare transaction in master_create_worker_shards
|
||||
SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('test_table_2', 4, 2);
|
||||
ERROR: canceling statement due to user request
|
||||
-- Show that there is no pending transaction
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
|
||||
run_command_on_workers
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,9060,t,0)
|
||||
(localhost,57637,t,0)
|
||||
|
|
|
@ -7,23 +7,23 @@ SELECT pg_backend_pid() as pid \gset
|
|||
CREATE TABLE users_table (user_id int, user_name text);
|
||||
CREATE TABLE events_table(user_id int, event_id int, event_type int);
|
||||
SELECT create_distributed_table('users_table', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('events_table', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE users_table_local AS SELECT * FROM users_table;
|
||||
-- kill at the first copy (push)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
|
@ -35,19 +35,19 @@ WITH cte AS (
|
|||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
cte,
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
event_type IN (1,2,3,4)
|
||||
ORDER BY 1 DESC LIMIT 5
|
||||
) as foo
|
||||
) as foo
|
||||
WHERE foo.user_id = cte.user_id;
|
||||
ERROR: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
|
@ -55,9 +55,9 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- kill at the second copy (pull)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM cte_failure.events_table_16000002").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
|
@ -69,19 +69,19 @@ WITH cte AS (
|
|||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
cte,
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
event_type IN (1,2,3,4)
|
||||
ORDER BY 1 DESC LIMIT 5
|
||||
) as foo
|
||||
) as foo
|
||||
WHERE foo.user_id = cte.user_id;
|
||||
ERROR: connection error: localhost:xxxxx
|
||||
DETAIL: server closed the connection unexpectedly
|
||||
|
@ -89,9 +89,9 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- kill at the third copy (pull)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
|
@ -103,19 +103,19 @@ WITH cte AS (
|
|||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
cte,
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
event_type IN (1,2,3,4)
|
||||
ORDER BY 1 DESC LIMIT 5
|
||||
) as foo
|
||||
) as foo
|
||||
WHERE foo.user_id = cte.user_id;
|
||||
ERROR: connection error: localhost:xxxxx
|
||||
DETAIL: server closed the connection unexpectedly
|
||||
|
@ -123,9 +123,9 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- cancel at the first copy (push)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
|
@ -137,26 +137,26 @@ WITH cte AS (
|
|||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
cte,
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
event_type IN (1,2,3,4)
|
||||
ORDER BY 1 DESC LIMIT 5
|
||||
) as foo
|
||||
) as foo
|
||||
WHERE foo.user_id = cte.user_id;
|
||||
ERROR: canceling statement due to user request
|
||||
-- cancel at the second copy (pull)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
|
@ -168,26 +168,26 @@ WITH cte AS (
|
|||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
cte,
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
event_type IN (1,2,3,4)
|
||||
ORDER BY 1 DESC LIMIT 5
|
||||
) as foo
|
||||
) as foo
|
||||
WHERE foo.user_id = cte.user_id;
|
||||
ERROR: canceling statement due to user request
|
||||
-- cancel at the third copy (pull)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
WITH cte AS (
|
||||
|
@ -199,33 +199,33 @@ WITH cte AS (
|
|||
)
|
||||
SELECT dist_cte.user_id FROM local_cte join dist_cte on dist_cte.user_id=local_cte.user_id
|
||||
)
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
cte,
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
(SELECT
|
||||
DISTINCT users_table.user_id
|
||||
FROM
|
||||
users_table, events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id AND
|
||||
event_type IN (1,2,3,4)
|
||||
ORDER BY 1 DESC LIMIT 5
|
||||
) as foo
|
||||
) as foo
|
||||
WHERE foo.user_id = cte.user_id;
|
||||
ERROR: canceling statement due to user request
|
||||
-- distributed update tests
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- insert some rows
|
||||
INSERT INTO users_table VALUES (1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E');
|
||||
INSERT INTO events_table VALUES (1,1,1), (1,2,1), (1,3,1), (2,1, 4), (3, 4,1), (5, 1, 2), (5, 2, 1), (5, 2,2);
|
||||
SELECT * FROM users_table ORDER BY 1, 2;
|
||||
user_id | user_name
|
||||
user_id | user_name
|
||||
---------------------------------------------------------------------
|
||||
1 | A
|
||||
2 | B
|
||||
|
@ -239,7 +239,7 @@ WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURN
|
|||
INSERT INTO users_table SELECT * FROM cte_delete;
|
||||
-- verify contents are the same
|
||||
SELECT * FROM users_table ORDER BY 1, 2;
|
||||
user_id | user_name
|
||||
user_id | user_name
|
||||
---------------------------------------------------------------------
|
||||
1 | A
|
||||
2 | B
|
||||
|
@ -250,9 +250,9 @@ SELECT * FROM users_table ORDER BY 1, 2;
|
|||
|
||||
-- kill connection during deletion
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
|
@ -263,13 +263,13 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify contents are the same
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM users_table ORDER BY 1, 2;
|
||||
user_id | user_name
|
||||
user_id | user_name
|
||||
---------------------------------------------------------------------
|
||||
1 | A
|
||||
2 | B
|
||||
|
@ -280,9 +280,9 @@ SELECT * FROM users_table ORDER BY 1, 2;
|
|||
|
||||
-- kill connection during insert
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
|
@ -293,13 +293,13 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- verify contents are the same
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM users_table ORDER BY 1, 2;
|
||||
user_id | user_name
|
||||
user_id | user_name
|
||||
---------------------------------------------------------------------
|
||||
1 | A
|
||||
2 | B
|
||||
|
@ -310,9 +310,9 @@ SELECT * FROM users_table ORDER BY 1, 2;
|
|||
|
||||
-- cancel during deletion
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
|
@ -320,13 +320,13 @@ INSERT INTO users_table SELECT * FROM cte_delete;
|
|||
ERROR: canceling statement due to user request
|
||||
-- verify contents are the same
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM users_table ORDER BY 1, 2;
|
||||
user_id | user_name
|
||||
user_id | user_name
|
||||
---------------------------------------------------------------------
|
||||
1 | A
|
||||
2 | B
|
||||
|
@ -337,9 +337,9 @@ SELECT * FROM users_table ORDER BY 1, 2;
|
|||
|
||||
-- cancel during insert
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
|
||||
|
@ -347,13 +347,13 @@ INSERT INTO users_table SELECT * FROM cte_delete;
|
|||
ERROR: canceling statement due to user request
|
||||
-- verify contents are the same
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM users_table ORDER BY 1, 2;
|
||||
user_id | user_name
|
||||
user_id | user_name
|
||||
---------------------------------------------------------------------
|
||||
1 | A
|
||||
2 | B
|
||||
|
@ -364,9 +364,9 @@ SELECT * FROM users_table ORDER BY 1, 2;
|
|||
|
||||
-- test sequential delete/insert
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -380,9 +380,9 @@ DETAIL: server closed the connection unexpectedly
|
|||
END;
|
||||
RESET SEARCH_PATH;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP SCHEMA cte_failure CASCADE;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,9 +4,9 @@
|
|||
-- performs failure/cancellation test for insert/select pushed down to shards.
|
||||
--
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE SCHEMA insert_select_pushdown;
|
||||
|
@ -17,20 +17,20 @@ SELECT pg_backend_pid() as pid \gset
|
|||
CREATE TABLE events_table(user_id int, event_id int, event_type int);
|
||||
CREATE TABLE events_summary(user_id int, event_id int, event_count int);
|
||||
SELECT create_distributed_table('events_table', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('events_summary', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_table VALUES (1, 1, 3 ), (1, 2, 1), (1, 3, 2), (2, 4, 3), (3, 5, 1), (4, 7, 1), (4, 1, 9), (4, 3, 2);
|
||||
SELECT count(*) FROM events_summary;
|
||||
count
|
||||
SELECT count(*) FROM events_summary;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -38,9 +38,9 @@ SELECT count(*) FROM events_summary;
|
|||
-- insert/select from one distributed table to another
|
||||
-- kill worker query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2;
|
||||
|
@ -50,51 +50,51 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
--verify nothing is modified
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM events_summary;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- cancel worker query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2;
|
||||
ERROR: canceling statement due to user request
|
||||
--verify nothing is modified
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM events_summary;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- test self insert/select
|
||||
SELECT count(*) FROM events_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
-- kill worker query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_table SELECT * FROM events_table;
|
||||
|
@ -104,44 +104,44 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
--verify nothing is modified
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM events_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
-- cancel worker query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_table SELECT * FROM events_table;
|
||||
ERROR: canceling statement due to user request
|
||||
--verify nothing is modified
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM events_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
8
|
||||
(1 row)
|
||||
|
||||
RESET SEARCH_PATH;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP SCHEMA insert_select_pushdown CASCADE;
|
||||
|
|
|
@ -14,32 +14,32 @@ CREATE TABLE events_summary(event_id int, event_type int, event_count int);
|
|||
CREATE TABLE events_reference(event_type int, event_count int);
|
||||
CREATE TABLE events_reference_distributed(event_type int, event_count int);
|
||||
SELECT create_distributed_table('events_table', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('events_summary', 'event_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('events_reference');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('events_reference_distributed', 'event_type');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_table VALUES (1, 1, 3 ), (1, 2, 1), (1, 3, 2), (2, 4, 3), (3, 5, 1), (4, 7, 1), (4, 1, 9), (4, 3, 2);
|
||||
SELECT count(*) FROM events_summary;
|
||||
count
|
||||
SELECT count(*) FROM events_summary;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -47,9 +47,9 @@ SELECT count(*) FROM events_summary;
|
|||
-- insert/select from one distributed table to another
|
||||
-- kill coordinator pull query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2;
|
||||
|
@ -59,9 +59,9 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- kill data push
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2;
|
||||
|
@ -71,31 +71,31 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- cancel coordinator pull query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2;
|
||||
ERROR: canceling statement due to user request
|
||||
-- cancel data push
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_table GROUP BY 1,2;
|
||||
ERROR: canceling statement due to user request
|
||||
--verify nothing is modified
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM events_summary;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -103,9 +103,9 @@ SELECT count(*) FROM events_summary;
|
|||
-- insert into reference table from a distributed table
|
||||
-- kill coordinator pull query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1;
|
||||
|
@ -115,9 +115,9 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- kill data push
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1;
|
||||
|
@ -127,31 +127,31 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- cancel coordinator pull query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1;
|
||||
ERROR: canceling statement due to user request
|
||||
-- cancel data push
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1;
|
||||
ERROR: canceling statement due to user request
|
||||
--verify nothing is modified
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM events_reference;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -161,9 +161,9 @@ SELECT count(*) FROM events_reference;
|
|||
INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP BY 1;
|
||||
-- kill coordinator pull query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_reference_distributed SELECT * FROM events_reference;
|
||||
|
@ -173,9 +173,9 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- kill data push
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_reference_distributed SELECT * FROM events_reference;
|
||||
|
@ -185,40 +185,40 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- cancel coordinator pull query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_reference_distributed SELECT * FROM events_reference;
|
||||
ERROR: canceling statement due to user request
|
||||
-- cancel data push
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO events_reference_distributed SELECT * FROM events_reference;
|
||||
ERROR: canceling statement due to user request
|
||||
--verify nothing is modified
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM events_reference_distributed;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
RESET SEARCH_PATH;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP SCHEMA coordinator_insert_select CASCADE;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_count = 2;
|
||||
|
@ -10,25 +10,25 @@ SET citus.next_shard_id TO 103400;
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100;
|
||||
CREATE TABLE dml_test (id integer, name text);
|
||||
SELECT create_distributed_table('dml_test', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY dml_test FROM STDIN WITH CSV;
|
||||
SELECT citus.clear_network_traffic();
|
||||
clear_network_traffic
|
||||
clear_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
---- test multiple statements spanning multiple shards,
|
||||
---- at each significant point. These transactions are 2pc
|
||||
-- fail at DELETE
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -48,7 +48,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
|
|||
COMMIT;
|
||||
--- shouldn't see any changes performed in failed transaction
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
1 | Alpha
|
||||
2 | Beta
|
||||
|
@ -58,9 +58,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- cancel at DELETE
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -77,7 +77,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
|
|||
COMMIT;
|
||||
--- shouldn't see any changes performed in failed transaction
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
1 | Alpha
|
||||
2 | Beta
|
||||
|
@ -87,9 +87,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- fail at INSERT
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -107,7 +107,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
|
|||
COMMIT;
|
||||
--- shouldn't see any changes before failed INSERT
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
1 | Alpha
|
||||
2 | Beta
|
||||
|
@ -117,9 +117,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- cancel at INSERT
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -134,7 +134,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
|
|||
COMMIT;
|
||||
--- shouldn't see any changes before failed INSERT
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
1 | Alpha
|
||||
2 | Beta
|
||||
|
@ -144,9 +144,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- fail at UPDATE
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -163,7 +163,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
|
|||
COMMIT;
|
||||
--- shouldn't see any changes after failed UPDATE
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
1 | Alpha
|
||||
2 | Beta
|
||||
|
@ -173,9 +173,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- cancel at UPDATE
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -189,7 +189,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
|
|||
COMMIT;
|
||||
--- shouldn't see any changes after failed UPDATE
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
1 | Alpha
|
||||
2 | Beta
|
||||
|
@ -199,9 +199,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- fail at PREPARE TRANSACTION
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- this transaction block will be sent to the coordinator as a remote command to hide the
|
||||
|
@ -221,31 +221,31 @@ COMMIT;
|
|||
'],
|
||||
false
|
||||
);
|
||||
master_run_on_worker
|
||||
master_run_on_worker
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57636,t,BEGIN)
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3;
|
||||
shardid
|
||||
shardid
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- shouldn't see any changes after failed PREPARE
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
1 | Alpha
|
||||
2 | Beta
|
||||
|
@ -255,9 +255,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- cancel at PREPARE TRANSACTION
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- we'll test for the txn side-effects to ensure it didn't run
|
||||
|
@ -270,25 +270,25 @@ UPDATE dml_test SET name = 'gamma' WHERE id = 3;
|
|||
COMMIT;
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3;
|
||||
shardid
|
||||
shardid
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- shouldn't see any changes after failed PREPARE
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
1 | Alpha
|
||||
2 | Beta
|
||||
|
@ -298,9 +298,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- fail at COMMIT
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- hide the error message (it has the PID)...
|
||||
|
@ -315,25 +315,25 @@ UPDATE dml_test SET name = 'gamma' WHERE id = 3;
|
|||
COMMIT;
|
||||
SET client_min_messages TO DEFAULT;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3;
|
||||
shardid
|
||||
shardid
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- should see changes, because of txn recovery
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
3 | gamma
|
||||
4 | Delta
|
||||
|
@ -342,9 +342,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- cancel at COMMITs are ignored by Postgres
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -356,7 +356,7 @@ UPDATE dml_test SET name = 'gamma' WHERE id = 3;
|
|||
COMMIT;
|
||||
-- should see changes, because cancellation is ignored
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
3 | gamma
|
||||
4 | Delta
|
||||
|
@ -370,18 +370,18 @@ SET citus.shard_count = 1;
|
|||
SET citus.shard_replication_factor = 2; -- two placements
|
||||
CREATE TABLE dml_test (id integer, name text);
|
||||
SELECT create_distributed_table('dml_test', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY dml_test FROM STDIN WITH CSV;
|
||||
---- test multiple statements against a single shard, but with two placements
|
||||
-- fail at COMMIT (actually COMMIT this time, as no 2pc in use)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -398,7 +398,7 @@ WARNING: connection not open
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
--- should see all changes, but they only went to one placement (other is unhealthy)
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
3 | gamma
|
||||
4 | Delta
|
||||
|
@ -406,15 +406,15 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
(3 rows)
|
||||
|
||||
SELECT shardid FROM pg_dist_shard_placement WHERE shardstate = 3;
|
||||
shardid
|
||||
shardid
|
||||
---------------------------------------------------------------------
|
||||
103402
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- drop table and recreate as reference table
|
||||
|
@ -423,17 +423,17 @@ SET citus.shard_count = 2;
|
|||
SET citus.shard_replication_factor = 1;
|
||||
CREATE TABLE dml_test (id integer, name text);
|
||||
SELECT create_reference_table('dml_test');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
COPY dml_test FROM STDIN WITH CSV;
|
||||
-- fail at COMMIT (by failing to PREPARE)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -447,7 +447,7 @@ ERROR: connection not open
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
--- shouldn't see any changes after failed COMMIT
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
1 | Alpha
|
||||
2 | Beta
|
||||
|
@ -457,9 +457,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- cancel at COMMIT (by cancelling on PREPARE)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -472,7 +472,7 @@ COMMIT;
|
|||
ERROR: canceling statement due to user request
|
||||
--- shouldn't see any changes after cancelled PREPARE
|
||||
SELECT * FROM dml_test ORDER BY id ASC;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
1 | Alpha
|
||||
2 | Beta
|
||||
|
@ -482,9 +482,9 @@ SELECT * FROM dml_test ORDER BY id ASC;
|
|||
|
||||
-- allow connection to allow DROP
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE dml_test;
|
||||
|
|
|
@ -10,23 +10,23 @@ SET citus.next_shard_id TO 301000;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT pg_backend_pid() as pid \gset
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE distributed_table(key int, value int);
|
||||
CREATE TABLE reference_table(value int);
|
||||
SELECT create_distributed_table('distributed_table', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('reference_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- we'll test failure cases of the following cases:
|
||||
|
@ -37,9 +37,9 @@ SELECT create_reference_table('reference_table');
|
|||
-- (e) multi-row INSERT to a reference table
|
||||
-- Failure and cancellation on multi-row INSERT that hits the same shard with the same value
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3);
|
||||
|
@ -52,9 +52,9 @@ DETAIL: server closed the connection unexpectedly
|
|||
-- INSERT INTO distributed_table VALUES (1,4), (1,5), (1,6);
|
||||
-- Failure and cancellation on multi-row INSERT that hits the same shard with different values
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO distributed_table VALUES (1,7), (5,8);
|
||||
|
@ -67,9 +67,9 @@ DETAIL: server closed the connection unexpectedly
|
|||
-- INSERT INTO distributed_table VALUES (1,9), (5,10);
|
||||
-- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO distributed_table VALUES (1,11), (6,12);
|
||||
|
@ -78,18 +78,18 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO distributed_table VALUES (1,13), (6,14);
|
||||
ERROR: canceling statement due to user request
|
||||
-- Failure and cancellation multi-row INSERT that hits multiple shards in a single worker, happening on the second query
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO distributed_table VALUES (1,15), (6,16);
|
||||
|
@ -98,18 +98,18 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO distributed_table VALUES (1,17), (6,18);
|
||||
ERROR: canceling statement due to user request
|
||||
-- Failure and cancellation multi-row INSERT that hits multiple shards in multiple workers
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO distributed_table VALUES (2,19),(1,20);
|
||||
|
@ -118,54 +118,54 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO distributed_table VALUES (2,21), (1,22);
|
||||
ERROR: canceling statement due to user request
|
||||
-- one test for the reference tables for completeness
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO reference_table VALUES (1), (2), (3), (4);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO distributed_table VALUES (1,1), (2,2), (3,3), (4,2), (5,2), (6,2), (7,2);
|
||||
ERROR: canceling statement due to user request
|
||||
-- cancel the second insert over the same connection
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO distributed_table VALUES (1,1), (2,2), (3,3), (4,2), (5,2), (6,2), (7,2);
|
||||
ERROR: canceling statement due to user request
|
||||
-- we've either failed or cancelled all queries, so should be empty
|
||||
SELECT * FROM distributed_table;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM reference_table;
|
||||
value
|
||||
value
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET SEARCH_PATH;
|
||||
|
|
|
@ -9,30 +9,30 @@ SET citus.shard_replication_factor TO 1;
|
|||
-- do not cache any connections
|
||||
SET citus.max_cached_conns_per_worker TO 0;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE t1(a int PRIMARY KEY, b int, c int);
|
||||
CREATE TABLE r1(a int, b int PRIMARY KEY);
|
||||
CREATE TABLE t2(a int REFERENCES t1(a) ON DELETE CASCADE, b int REFERENCES r1(b) ON DELETE CASCADE, c int);
|
||||
SELECT create_distributed_table('t1', 'a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_reference_table('r1');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('t2', 'a');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- insert some data
|
||||
|
@ -41,13 +41,13 @@ INSERT INTO t1 VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3);
|
|||
INSERT INTO t2 VALUES (1, 1, 1), (1, 2, 1), (2, 1, 2), (2, 2, 4), (3, 1, 3), (3, 2, 3), (3, 3, 3);
|
||||
SELECT pg_backend_pid() as pid \gset
|
||||
SELECT count(*) FROM t2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
SHOW citus.multi_shard_commit_protocol ;
|
||||
citus.multi_shard_commit_protocol
|
||||
citus.multi_shard_commit_protocol
|
||||
---------------------------------------------------------------------
|
||||
2pc
|
||||
(1 row)
|
||||
|
@ -56,9 +56,9 @@ SHOW citus.multi_shard_commit_protocol ;
|
|||
-- delete using a filter on non-partition column filter
|
||||
-- test both kill and cancellation
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- issue a multi shard delete
|
||||
|
@ -69,16 +69,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is deleted
|
||||
SELECT count(*) FROM t2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
-- kill just one connection
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DELETE FROM t2 WHERE b = 2;
|
||||
|
@ -88,16 +88,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is deleted
|
||||
SELECT count(*) FROM t2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
-- cancellation
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- issue a multi shard delete
|
||||
|
@ -105,23 +105,23 @@ DELETE FROM t2 WHERE b = 2;
|
|||
ERROR: canceling statement due to user request
|
||||
-- verify nothing is deleted
|
||||
SELECT count(*) FROM t2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
-- cancel just one connection
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DELETE FROM t2 WHERE b = 2;
|
||||
ERROR: canceling statement due to user request
|
||||
-- verify nothing is deleted
|
||||
SELECT count(*) FROM t2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
@ -132,15 +132,15 @@ SELECT count(*) FROM t2;
|
|||
-- delete using a filter on non-partition column filter
|
||||
-- test both kill and cancellation
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2;
|
||||
b2 | c4
|
||||
b2 | c4
|
||||
---------------------------------------------------------------------
|
||||
3 | 1
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- issue a multi shard update
|
||||
|
@ -151,16 +151,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2;
|
||||
b2 | c4
|
||||
b2 | c4
|
||||
---------------------------------------------------------------------
|
||||
3 | 1
|
||||
(1 row)
|
||||
|
||||
-- kill just one connection
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
UPDATE t2 SET c = 4 WHERE b = 2;
|
||||
|
@ -170,16 +170,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2;
|
||||
b2 | c4
|
||||
b2 | c4
|
||||
---------------------------------------------------------------------
|
||||
3 | 1
|
||||
(1 row)
|
||||
|
||||
-- cancellation
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- issue a multi shard update
|
||||
|
@ -187,23 +187,23 @@ UPDATE t2 SET c = 4 WHERE b = 2;
|
|||
ERROR: canceling statement due to user request
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2;
|
||||
b2 | c4
|
||||
b2 | c4
|
||||
---------------------------------------------------------------------
|
||||
3 | 1
|
||||
(1 row)
|
||||
|
||||
-- cancel just one connection
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
UPDATE t2 SET c = 4 WHERE b = 2;
|
||||
ERROR: canceling statement due to user request
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2;
|
||||
b2 | c4
|
||||
b2 | c4
|
||||
---------------------------------------------------------------------
|
||||
3 | 1
|
||||
(1 row)
|
||||
|
@ -214,9 +214,9 @@ SET citus.multi_shard_commit_protocol TO '1PC';
|
|||
-- delete using a filter on non-partition column filter
|
||||
-- test both kill and cancellation
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- issue a multi shard delete
|
||||
|
@ -227,16 +227,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is deleted
|
||||
SELECT count(*) FROM t2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
-- kill just one connection
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DELETE FROM t2 WHERE b = 2;
|
||||
|
@ -246,16 +246,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is deleted
|
||||
SELECT count(*) FROM t2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
-- cancellation
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- issue a multi shard delete
|
||||
|
@ -263,23 +263,23 @@ DELETE FROM t2 WHERE b = 2;
|
|||
ERROR: canceling statement due to user request
|
||||
-- verify nothing is deleted
|
||||
SELECT count(*) FROM t2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
||||
-- cancel just one connection
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DELETE FROM t2 WHERE b = 2;
|
||||
ERROR: canceling statement due to user request
|
||||
-- verify nothing is deleted
|
||||
SELECT count(*) FROM t2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
7
|
||||
(1 row)
|
||||
|
@ -290,15 +290,15 @@ SELECT count(*) FROM t2;
|
|||
-- delete using a filter on non-partition column filter
|
||||
-- test both kill and cancellation
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2;
|
||||
b2 | c4
|
||||
b2 | c4
|
||||
---------------------------------------------------------------------
|
||||
3 | 1
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- issue a multi shard update
|
||||
|
@ -309,16 +309,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2;
|
||||
b2 | c4
|
||||
b2 | c4
|
||||
---------------------------------------------------------------------
|
||||
3 | 1
|
||||
(1 row)
|
||||
|
||||
-- kill just one connection
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
UPDATE t2 SET c = 4 WHERE b = 2;
|
||||
|
@ -328,16 +328,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2;
|
||||
b2 | c4
|
||||
b2 | c4
|
||||
---------------------------------------------------------------------
|
||||
3 | 1
|
||||
(1 row)
|
||||
|
||||
-- cancellation
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- issue a multi shard update
|
||||
|
@ -345,23 +345,23 @@ UPDATE t2 SET c = 4 WHERE b = 2;
|
|||
ERROR: canceling statement due to user request
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2;
|
||||
b2 | c4
|
||||
b2 | c4
|
||||
---------------------------------------------------------------------
|
||||
3 | 1
|
||||
(1 row)
|
||||
|
||||
-- cancel just one connection
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
UPDATE t2 SET c = 4 WHERE b = 2;
|
||||
ERROR: canceling statement due to user request
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2;
|
||||
b2 | c4
|
||||
b2 | c4
|
||||
---------------------------------------------------------------------
|
||||
3 | 1
|
||||
(1 row)
|
||||
|
@ -377,22 +377,22 @@ RESET citus.multi_shard_commit_protocol;
|
|||
-- it is safe to remove them without reducing any
|
||||
-- test coverage
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- check counts before delete
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2;
|
||||
b2
|
||||
b2
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DELETE FROM r1 WHERE a = 2;
|
||||
|
@ -402,15 +402,15 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is deleted
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2;
|
||||
b2
|
||||
b2
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DELETE FROM t2 WHERE b = 2;
|
||||
|
@ -420,28 +420,28 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is deleted
|
||||
SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2;
|
||||
b2
|
||||
b2
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
-- test update with subquery pull
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE t3 AS SELECT * FROM t2;
|
||||
SELECT create_distributed_table('t3', 'a');
|
||||
NOTICE: Copying data from local table...
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM t3 ORDER BY 1, 2, 3;
|
||||
a | b | c
|
||||
a | b | c
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | 1
|
||||
1 | 2 | 1
|
||||
|
@ -453,9 +453,9 @@ SELECT * FROM t3 ORDER BY 1, 2, 3;
|
|||
(7 rows)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
UPDATE t3 SET c = q.c FROM (
|
||||
|
@ -468,13 +468,13 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
--- verify nothing is updated
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM t3 ORDER BY 1, 2, 3;
|
||||
a | b | c
|
||||
a | b | c
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | 1
|
||||
1 | 2 | 1
|
||||
|
@ -487,9 +487,9 @@ SELECT * FROM t3 ORDER BY 1, 2, 3;
|
|||
|
||||
-- kill update part
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE multi_shard.t3_201009").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
UPDATE t3 SET c = q.c FROM (
|
||||
|
@ -502,13 +502,13 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
--- verify nothing is updated
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM t3 ORDER BY 1, 2, 3;
|
||||
a | b | c
|
||||
a | b | c
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | 1
|
||||
1 | 2 | 1
|
||||
|
@ -524,31 +524,31 @@ SELECT * FROM t3 ORDER BY 1, 2, 3;
|
|||
-- use a different set of table
|
||||
SET citus.shard_replication_factor to 2;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE t3;
|
||||
CREATE TABLE t3 AS SELECT * FROM t2;
|
||||
SELECT create_distributed_table('t3', 'a');
|
||||
NOTICE: Copying data from local table...
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
||||
-- prevent update of one replica of one shard
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t3_201013").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
UPDATE t3 SET b = 2 WHERE b = 1;
|
||||
|
@ -558,7 +558,7 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
@ -566,13 +566,13 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO
|
|||
-- fail only one update verify transaction is rolled back correctly
|
||||
BEGIN;
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
@ -580,7 +580,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO
|
|||
UPDATE t2 SET b = 2 WHERE b = 1;
|
||||
-- verify update is performed on t2
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
0 | 6
|
||||
(1 row)
|
||||
|
@ -594,13 +594,13 @@ DETAIL: server closed the connection unexpectedly
|
|||
END;
|
||||
-- verify everything is rolled back
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
@ -612,7 +612,7 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
@ -620,7 +620,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO
|
|||
-- switch to 1PC
|
||||
SET citus.multi_shard_commit_protocol TO '1PC';
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
@ -632,7 +632,7 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- verify nothing is updated
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
@ -640,13 +640,13 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO
|
|||
-- fail only one update verify transaction is rolled back correctly
|
||||
BEGIN;
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
@ -654,7 +654,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO
|
|||
UPDATE t2 SET b = 2 WHERE b = 1;
|
||||
-- verify update is performed on t2
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
0 | 6
|
||||
(1 row)
|
||||
|
@ -668,21 +668,21 @@ DETAIL: server closed the connection unexpectedly
|
|||
END;
|
||||
-- verify everything is rolled back
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3;
|
||||
b1 | b2
|
||||
b1 | b2
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET SEARCH_PATH;
|
||||
|
|
|
@ -9,39 +9,39 @@ SET citus.shard_replication_factor TO 1;
|
|||
SET citus.replication_model TO 'streaming';
|
||||
SELECT pg_backend_pid() as pid \gset
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE t1 (id int PRIMARY KEY);
|
||||
SELECT create_distributed_table('t1', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO t1 SELECT x FROM generate_series(1,100) AS f(x);
|
||||
-- Initial metadata status
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port;
|
||||
hasmetadata
|
||||
hasmetadata
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- Failure to set groupid in the worker
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
|
@ -51,17 +51,17 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- Failure to drop all tables in pg_dist_partition
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
|
@ -71,17 +71,17 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- Failure to truncate pg_dist_node in the worker
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
|
@ -91,17 +91,17 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- Failure to populate pg_dist_node in the worker
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
|
@ -111,26 +111,26 @@ ERROR: server closed the connection unexpectedly
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
-- Verify that coordinator knows worker does not have valid metadata
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port;
|
||||
hasmetadata
|
||||
hasmetadata
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
||||
-- Verify we can sync metadata after unsuccessful attempts
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
|
||||
start_metadata_sync_to_node
|
||||
start_metadata_sync_to_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port;
|
||||
hasmetadata
|
||||
hasmetadata
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
@ -138,9 +138,9 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port;
|
|||
-- Check failures on DDL command propagation
|
||||
CREATE TABLE t2 (id int PRIMARY KEY);
|
||||
SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_placement").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('t2', 'id');
|
||||
|
@ -149,9 +149,9 @@ ERROR: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
CONTEXT: while executing command on localhost:xxxxx
|
||||
SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_shard").cancel(' || :pid || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('t2', 'id');
|
||||
|
@ -160,7 +160,7 @@ ERROR: canceling statement due to user request
|
|||
SELECT count(*) > 0 AS is_table_distributed
|
||||
FROM pg_dist_partition
|
||||
WHERE logicalrelid='t2'::regclass;
|
||||
is_table_distributed
|
||||
is_table_distributed
|
||||
---------------------------------------------------------------------
|
||||
f
|
||||
(1 row)
|
||||
|
|
|
@ -1,35 +1,35 @@
|
|||
SET citus.next_shard_id TO 100500;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE ref_table (key int, value int);
|
||||
SELECT create_reference_table('ref_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy ref_table FROM stdin delimiter ',';
|
||||
SELECT citus.clear_network_traffic();
|
||||
clear_network_traffic
|
||||
clear_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM ref_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
-- verify behavior of single INSERT; should fail to execute
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO ref_table VALUES (5, 6);
|
||||
|
@ -38,16 +38,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT COUNT(*) FROM ref_table WHERE key=5;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- verify behavior of UPDATE ... RETURNING; should not execute
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
UPDATE ref_table SET key=7 RETURNING value;
|
||||
|
@ -56,16 +56,16 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT COUNT(*) FROM ref_table WHERE key=7;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- verify fix to #2214; should raise error and fail to execute
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -77,23 +77,23 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
COMMIT;
|
||||
SELECT COUNT(*) FROM ref_table WHERE key=value;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- all shards should still be healthy
|
||||
SELECT COUNT(*) FROM pg_dist_shard_placement WHERE shardstate = 3;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- ==== Clean up, we're done here ====
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE ref_table;
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
-- the placement commands fail. Otherwise, we might mark the placement
|
||||
-- as invalid and continue with a WARNING.
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_count = 2;
|
||||
|
@ -18,9 +18,9 @@ CREATE TABLE artists (
|
|||
name text NOT NULL
|
||||
);
|
||||
SELECT create_distributed_table('artists', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- add some data
|
||||
|
@ -30,9 +30,9 @@ INSERT INTO artists VALUES (3, 'Claude Monet');
|
|||
INSERT INTO artists VALUES (4, 'William Kurelek');
|
||||
-- simply fail at SAVEPOINT
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SAVEPOINT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -54,16 +54,16 @@ RELEASE SAVEPOINT s1;
|
|||
ERROR: current transaction is aborted, commands ignored until end of transaction block
|
||||
COMMIT;
|
||||
SELECT * FROM artists WHERE id IN (4, 5);
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
4 | William Kurelek
|
||||
(1 row)
|
||||
|
||||
-- fail at RELEASE
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -86,16 +86,16 @@ ERROR: connection not open
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
ROLLBACK;
|
||||
SELECT * FROM artists WHERE id IN (4, 5);
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
4 | William Kurelek
|
||||
(1 row)
|
||||
|
||||
-- fail at ROLLBACK
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -110,16 +110,16 @@ CONTEXT: while executing command on localhost:xxxxx
|
|||
COMMIT;
|
||||
ERROR: could not make changes to shard xxxxx on any node
|
||||
SELECT * FROM artists WHERE id IN (4, 5);
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
4 | William Kurelek
|
||||
(1 row)
|
||||
|
||||
-- fail at second RELEASE
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").after(1).kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -142,16 +142,16 @@ ERROR: connection not open
|
|||
CONTEXT: while executing command on localhost:xxxxx
|
||||
COMMIT;
|
||||
SELECT * FROM artists WHERE id IN (4, 5);
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
4 | William Kurelek
|
||||
(1 row)
|
||||
|
||||
-- fail at second ROLLBACK
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").after(1).kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -168,15 +168,15 @@ CONTEXT: while executing command on localhost:xxxxx
|
|||
COMMIT;
|
||||
ERROR: could not make changes to shard xxxxx on any node
|
||||
SELECT * FROM artists WHERE id IN (4, 5);
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
4 | William Kurelek
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").after(1).kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Release after rollback
|
||||
|
@ -191,14 +191,14 @@ ROLLBACK TO s2;
|
|||
RELEASE SAVEPOINT s2;
|
||||
COMMIT;
|
||||
SELECT * FROM artists WHERE id=7;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Recover from errors
|
||||
|
@ -219,7 +219,7 @@ WARNING: connection not open
|
|||
COMMIT;
|
||||
ERROR: could not make changes to shard xxxxx on any node
|
||||
SELECT * FROM artists WHERE id=6;
|
||||
id | name
|
||||
id | name
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -232,16 +232,16 @@ CREATE TABLE researchers (
|
|||
SET citus.shard_count = 1;
|
||||
SET citus.shard_replication_factor = 2; -- single shard, on both workers
|
||||
SELECT create_distributed_table('researchers', 'lab_id', 'hash');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- simply fail at SAVEPOINT
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SAVEPOINT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -261,7 +261,7 @@ ERROR: current transaction is aborted, commands ignored until end of transactio
|
|||
COMMIT;
|
||||
-- should see correct results from healthy placement and one bad placement
|
||||
SELECT * FROM researchers WHERE lab_id = 4;
|
||||
id | lab_id | name
|
||||
id | lab_id | name
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -269,16 +269,16 @@ UPDATE pg_dist_shard_placement SET shardstate = 1
|
|||
WHERE shardstate = 3 AND shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass
|
||||
) RETURNING placementid;
|
||||
placementid
|
||||
placementid
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
TRUNCATE researchers;
|
||||
-- fail at rollback
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -293,7 +293,7 @@ COMMIT;
|
|||
ERROR: failure on connection marked as essential: localhost:xxxxx
|
||||
-- should see correct results from healthy placement and one bad placement
|
||||
SELECT * FROM researchers WHERE lab_id = 4;
|
||||
id | lab_id | name
|
||||
id | lab_id | name
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -301,16 +301,16 @@ UPDATE pg_dist_shard_placement SET shardstate = 1
|
|||
WHERE shardstate = 3 AND shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass
|
||||
) RETURNING placementid;
|
||||
placementid
|
||||
placementid
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
TRUNCATE researchers;
|
||||
-- fail at release
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^RELEASE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -329,7 +329,7 @@ ERROR: connection not open
|
|||
COMMIT;
|
||||
-- should see correct results from healthy placement and one bad placement
|
||||
SELECT * FROM researchers WHERE lab_id = 4;
|
||||
id | lab_id | name
|
||||
id | lab_id | name
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -337,16 +337,16 @@ UPDATE pg_dist_shard_placement SET shardstate = 1
|
|||
WHERE shardstate = 3 AND shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'researchers'::regclass
|
||||
) RETURNING placementid;
|
||||
placementid
|
||||
placementid
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
TRUNCATE researchers;
|
||||
-- clean up
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE artists;
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- add the workers
|
||||
SELECT master_add_node('localhost', :worker_1_port);
|
||||
master_add_node
|
||||
master_add_node
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT master_add_node('localhost', :worker_2_proxy_port); -- an mitmproxy which forwards to the second worker
|
||||
master_add_node
|
||||
master_add_node
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
|
|
@ -1,29 +1,29 @@
|
|||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.clear_network_traffic();
|
||||
clear_network_traffic
|
||||
clear_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_count = 2;
|
||||
SET citus.shard_replication_factor = 2;
|
||||
CREATE TABLE mod_test (key int, value text);
|
||||
SELECT create_distributed_table('mod_test', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- verify behavior of single INSERT; should mark shard as failed
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO mod_test VALUES (2, 6);
|
||||
|
@ -32,7 +32,7 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT COUNT(*) FROM mod_test WHERE key=2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -42,7 +42,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 1
|
|||
WHERE shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass
|
||||
) AND shardstate = 3 RETURNING placementid;
|
||||
placementid
|
||||
placementid
|
||||
---------------------------------------------------------------------
|
||||
125
|
||||
(1 row)
|
||||
|
@ -50,16 +50,16 @@ WHERE shardid IN (
|
|||
TRUNCATE mod_test;
|
||||
-- verify behavior of UPDATE ... RETURNING; should mark as failed
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO mod_test VALUES (2, 6);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key;
|
||||
|
@ -67,13 +67,13 @@ WARNING: connection error: localhost:xxxxx
|
|||
DETAIL: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
key
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT COUNT(*) FROM mod_test WHERE value='ok';
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -83,7 +83,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 1
|
|||
WHERE shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass
|
||||
) AND shardstate = 3 RETURNING placementid;
|
||||
placementid
|
||||
placementid
|
||||
---------------------------------------------------------------------
|
||||
125
|
||||
(1 row)
|
||||
|
@ -92,9 +92,9 @@ TRUNCATE mod_test;
|
|||
-- verify behavior of multi-statement modifications to a single shard
|
||||
-- should succeed but mark a placement as failed
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -108,7 +108,7 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
COMMIT;
|
||||
SELECT COUNT(*) FROM mod_test WHERE key=2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -118,7 +118,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 1
|
|||
WHERE shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'mod_test'::regclass
|
||||
) AND shardstate = 3 RETURNING placementid;
|
||||
placementid
|
||||
placementid
|
||||
---------------------------------------------------------------------
|
||||
125
|
||||
(1 row)
|
||||
|
|
|
@ -1,30 +1,30 @@
|
|||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.clear_network_traffic();
|
||||
clear_network_traffic
|
||||
clear_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_count = 2;
|
||||
SET citus.shard_replication_factor = 2;
|
||||
CREATE TABLE select_test (key int, value text);
|
||||
SELECT create_distributed_table('select_test', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- put data in shard for which mitm node is first placement
|
||||
INSERT INTO select_test VALUES (3, 'test data');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM select_test WHERE key = 3;
|
||||
|
@ -32,7 +32,7 @@ WARNING: connection error: localhost:xxxxx
|
|||
DETAIL: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
3 | test data
|
||||
(1 row)
|
||||
|
@ -42,16 +42,16 @@ WARNING: connection error: localhost:xxxxx
|
|||
DETAIL: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
3 | test data
|
||||
(1 row)
|
||||
|
||||
-- kill after first SELECT; txn should work (though placement marked bad)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -61,7 +61,7 @@ WARNING: connection error: localhost:xxxxx
|
|||
DETAIL: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
3 | test data
|
||||
3 | more data
|
||||
|
@ -73,7 +73,7 @@ WARNING: connection error: localhost:xxxxx
|
|||
DETAIL: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
3 | test data
|
||||
3 | more data
|
||||
|
@ -91,9 +91,9 @@ TRUNCATE select_test;
|
|||
-- put data in shard for which mitm node is first placement
|
||||
INSERT INTO select_test VALUES (3, 'test data');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM select_test WHERE key = 3;
|
||||
|
@ -102,9 +102,9 @@ SELECT * FROM select_test WHERE key = 3;
|
|||
ERROR: canceling statement due to user request
|
||||
-- cancel after first SELECT; txn should fail and nothing should be marked as invalid
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
|
@ -117,7 +117,7 @@ SELECT DISTINCT shardstate FROM pg_dist_shard_placement
|
|||
WHERE shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'select_test'::regclass
|
||||
);
|
||||
shardstate
|
||||
shardstate
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -126,15 +126,15 @@ TRUNCATE select_test;
|
|||
-- cancel the second query
|
||||
-- error after second SELECT; txn should fail
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
INSERT INTO select_test VALUES (3, 'more data');
|
||||
SELECT * FROM select_test WHERE key = 3;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
3 | more data
|
||||
(1 row)
|
||||
|
@ -145,15 +145,15 @@ ERROR: canceling statement due to user request
|
|||
COMMIT;
|
||||
-- error after second SELECT; txn should work (though placement marked bad)
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).reset()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
INSERT INTO select_test VALUES (3, 'more data');
|
||||
SELECT * FROM select_test WHERE key = 3;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
3 | more data
|
||||
(1 row)
|
||||
|
@ -164,7 +164,7 @@ WARNING: connection error: localhost:xxxxx
|
|||
DETAIL: server closed the connection unexpectedly
|
||||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
3 | more data
|
||||
3 | even more data
|
||||
|
@ -172,13 +172,13 @@ DETAIL: server closed the connection unexpectedly
|
|||
|
||||
COMMIT;
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(2).kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT recover_prepared_transactions();
|
||||
recover_prepared_transactions
|
||||
recover_prepared_transactions
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -195,21 +195,21 @@ SET citus.shard_count = 2;
|
|||
SET citus.shard_replication_factor = 1;
|
||||
CREATE TABLE select_test (key int, value text);
|
||||
SELECT create_distributed_table('select_test', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.max_cached_conns_per_worker TO 1; -- allow connection to be cached
|
||||
INSERT INTO select_test VALUES (1, 'test data');
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM select_test WHERE key = 1;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
1 | test data
|
||||
(1 row)
|
||||
|
@ -221,13 +221,13 @@ DETAIL: server closed the connection unexpectedly
|
|||
before or while processing the request.
|
||||
-- now the same test with query cancellation
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").after(1).cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM select_test WHERE key = 1;
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
1 | test data
|
||||
(1 row)
|
||||
|
|
|
@ -5,7 +5,7 @@ ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO -1;
|
|||
ALTER SYSTEM SET citus.recover_2pc_interval TO -1;
|
||||
ALTER SYSTEM set citus.enable_statistics_collection TO false;
|
||||
SELECT pg_reload_conf();
|
||||
pg_reload_conf
|
||||
pg_reload_conf
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3,9 +3,9 @@
|
|||
-- get WARNINGs instead of ERRORs.
|
||||
SET citus.next_shard_id TO 12000000;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_count = 1;
|
||||
|
@ -13,21 +13,21 @@ SET citus.shard_replication_factor = 2; -- one shard per worker
|
|||
SET citus.multi_shard_commit_protocol TO '1pc';
|
||||
CREATE TABLE vacuum_test (key int, value int);
|
||||
SELECT create_distributed_table('vacuum_test', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.clear_network_traffic();
|
||||
clear_network_traffic
|
||||
clear_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
VACUUM vacuum_test;
|
||||
|
@ -36,9 +36,9 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ANALYZE vacuum_test;
|
||||
|
@ -47,17 +47,17 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ANALYZE vacuum_test;
|
||||
-- ANALYZE transactions being critical is an open question, see #2430
|
||||
-- show that we marked as INVALID on COMMIT FAILURE
|
||||
SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND
|
||||
SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND
|
||||
shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass);
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
12000000 | 3
|
||||
(1 row)
|
||||
|
@ -68,46 +68,46 @@ WHERE shardid IN (
|
|||
);
|
||||
-- the same tests with cancel
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
VACUUM vacuum_test;
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ANALYZE vacuum_test;
|
||||
ERROR: canceling statement due to user request
|
||||
-- cancel during COMMIT should be ignored
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ANALYZE vacuum_test;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE other_vacuum_test (key int, value int);
|
||||
SELECT create_distributed_table('other_vacuum_test', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
VACUUM vacuum_test, other_vacuum_test;
|
||||
|
@ -116,18 +116,18 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
VACUUM vacuum_test, other_vacuum_test;
|
||||
ERROR: canceling statement due to user request
|
||||
-- ==== Clean up, we're done here ====
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE vacuum_test, other_vacuum_test;
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
-- get WARNINGs instead of ERRORs.
|
||||
SET citus.next_shard_id TO 12000000;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_count = 1;
|
||||
|
@ -13,21 +13,21 @@ SET citus.shard_replication_factor = 2; -- one shard per worker
|
|||
SET citus.multi_shard_commit_protocol TO '1pc';
|
||||
CREATE TABLE vacuum_test (key int, value int);
|
||||
SELECT create_distributed_table('vacuum_test', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.clear_network_traffic();
|
||||
clear_network_traffic
|
||||
clear_network_traffic
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
VACUUM vacuum_test;
|
||||
|
@ -36,9 +36,9 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ANALYZE vacuum_test;
|
||||
|
@ -47,17 +47,17 @@ DETAIL: server closed the connection unexpectedly
|
|||
This probably means the server terminated abnormally
|
||||
before or while processing the request.
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ANALYZE vacuum_test;
|
||||
-- ANALYZE transactions being critical is an open question, see #2430
|
||||
-- show that we marked as INVALID on COMMIT FAILURE
|
||||
SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND
|
||||
SELECT shardid, shardstate FROM pg_dist_shard_placement where shardstate != 1 AND
|
||||
shardid in ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'vacuum_test'::regclass);
|
||||
shardid | shardstate
|
||||
shardid | shardstate
|
||||
---------------------------------------------------------------------
|
||||
12000000 | 3
|
||||
(1 row)
|
||||
|
@ -68,63 +68,63 @@ WHERE shardid IN (
|
|||
);
|
||||
-- the same tests with cancel
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
VACUUM vacuum_test;
|
||||
ERROR: canceling statement due to user request
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ANALYZE vacuum_test;
|
||||
ERROR: canceling statement due to user request
|
||||
-- cancel during COMMIT should be ignored
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ANALYZE vacuum_test;
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE other_vacuum_test (key int, value int);
|
||||
SELECT create_distributed_table('other_vacuum_test', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
VACUUM vacuum_test, other_vacuum_test;
|
||||
ERROR: syntax error at or near ","
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
VACUUM vacuum_test, other_vacuum_test;
|
||||
ERROR: syntax error at or near ","
|
||||
-- ==== Clean up, we're done here ====
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
mitmproxy
|
||||
mitmproxy
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
DROP TABLE vacuum_test, other_vacuum_test;
|
||||
|
|
|
@ -2,31 +2,31 @@ CREATE SCHEMA fast_path_router_modify;
|
|||
SET search_path TO fast_path_router_modify;
|
||||
SET citus.next_shard_id TO 1840000;
|
||||
-- all the tests in this file is intended for testing fast-path
|
||||
-- router planner, so we're explicitly enabling itin this file.
|
||||
-- We've bunch of other tests that triggers non-fast-path-router
|
||||
-- router planner, so we're explicitly enabling itin this file.
|
||||
-- We've bunch of other tests that triggers non-fast-path-router
|
||||
-- planner (note this is already true by default)
|
||||
SET citus.enable_fast_path_router_planner TO true;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE modify_fast_path(key int, value_1 int, value_2 text);
|
||||
SELECT create_distributed_table('modify_fast_path', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE modify_fast_path_replication_2(key int, value_1 int, value_2 text);
|
||||
SELECT create_distributed_table('modify_fast_path_replication_2', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE modify_fast_path_reference(key int, value_1 int, value_2 text);
|
||||
SELECT create_reference_table('modify_fast_path_reference');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- show the output
|
||||
|
@ -110,7 +110,7 @@ DELETE FROM modify_fast_path WHERE key = 1 RETURNING *;
|
|||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DETAIL: distribution column value: 1
|
||||
key | value_1 | value_2
|
||||
key | value_1 | value_2
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -127,7 +127,7 @@ DEBUG: Router planner cannot handle multi-shard select queries
|
|||
DEBUG: Plan 18 query after replacing subqueries and CTEs: SELECT key, value_1, value_2 FROM (SELECT intermediate_result.key, intermediate_result.value_1, intermediate_result.value_2 FROM read_intermediate_result('18_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value_1 integer, value_2 text)) t2
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
key | value_1 | value_2
|
||||
key | value_1 | value_2
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -137,7 +137,7 @@ DEBUG: Distributed planning for a fast-path router query
|
|||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DETAIL: distribution column value: 1
|
||||
key | value_1 | value_2
|
||||
key | value_1 | value_2
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -146,7 +146,7 @@ DEBUG: Distributed planning for a fast-path router query
|
|||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DETAIL: distribution column value: 1
|
||||
key | value_1 | value_2
|
||||
key | value_1 | value_2
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -154,7 +154,7 @@ SELECT * FROM modify_fast_path_reference WHERE key = 1 FOR UPDATE;
|
|||
DEBUG: Distributed planning for a fast-path router query
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
key | value_1 | value_2
|
||||
key | value_1 | value_2
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -162,7 +162,7 @@ SELECT * FROM modify_fast_path_reference WHERE key = 1 FOR SHARE;
|
|||
DEBUG: Distributed planning for a fast-path router query
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
key | value_1 | value_2
|
||||
key | value_1 | value_2
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -193,12 +193,12 @@ DEBUG: Distributed planning for a fast-path router query
|
|||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
-- joins are not supported via fast-path
|
||||
UPDATE modify_fast_path
|
||||
SET value_1 = 1
|
||||
FROM modify_fast_path_reference
|
||||
WHERE
|
||||
modify_fast_path.key = modify_fast_path_reference.key AND
|
||||
modify_fast_path.key = 1 AND
|
||||
UPDATE modify_fast_path
|
||||
SET value_1 = 1
|
||||
FROM modify_fast_path_reference
|
||||
WHERE
|
||||
modify_fast_path.key = modify_fast_path_reference.key AND
|
||||
modify_fast_path.key = 1 AND
|
||||
modify_fast_path_reference.key = 1;
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
|
@ -252,9 +252,9 @@ DEBUG: Plan is router executable
|
|||
DETAIL: distribution column value: 1
|
||||
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
|
||||
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement
|
||||
modify_fast_path_plpsql
|
||||
modify_fast_path_plpsql
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT modify_fast_path_plpsql(2,2);
|
||||
|
@ -268,9 +268,9 @@ DEBUG: Plan is router executable
|
|||
DETAIL: distribution column value: 2
|
||||
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
|
||||
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement
|
||||
modify_fast_path_plpsql
|
||||
modify_fast_path_plpsql
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT modify_fast_path_plpsql(3,3);
|
||||
|
@ -284,9 +284,9 @@ DEBUG: Plan is router executable
|
|||
DETAIL: distribution column value: 3
|
||||
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
|
||||
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement
|
||||
modify_fast_path_plpsql
|
||||
modify_fast_path_plpsql
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT modify_fast_path_plpsql(4,4);
|
||||
|
@ -300,9 +300,9 @@ DEBUG: Plan is router executable
|
|||
DETAIL: distribution column value: 4
|
||||
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
|
||||
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement
|
||||
modify_fast_path_plpsql
|
||||
modify_fast_path_plpsql
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT modify_fast_path_plpsql(5,5);
|
||||
|
@ -316,9 +316,9 @@ DEBUG: Plan is router executable
|
|||
DETAIL: distribution column value: 5
|
||||
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
|
||||
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement
|
||||
modify_fast_path_plpsql
|
||||
modify_fast_path_plpsql
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT modify_fast_path_plpsql(6,6);
|
||||
|
@ -335,9 +335,9 @@ DEBUG: Plan is router executable
|
|||
DETAIL: distribution column value: 6
|
||||
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
|
||||
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement
|
||||
modify_fast_path_plpsql
|
||||
modify_fast_path_plpsql
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT modify_fast_path_plpsql(6,6);
|
||||
|
@ -351,9 +351,9 @@ DEBUG: Plan is router executable
|
|||
DETAIL: distribution column value: 6
|
||||
CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2"
|
||||
PL/pgSQL function modify_fast_path_plpsql(integer,integer) line 3 at SQL statement
|
||||
modify_fast_path_plpsql
|
||||
modify_fast_path_plpsql
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
RESET client_min_messages;
|
||||
|
|
|
@ -10,30 +10,30 @@ SET citus.next_placement_id TO 2380000;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE transitive_reference_table(id int PRIMARY KEY);
|
||||
SELECT create_reference_table('transitive_reference_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE reference_table(id int PRIMARY KEY, value_1 int);
|
||||
SELECT create_reference_table('reference_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE on_update_fkey_table(id int PRIMARY KEY, value_1 int);
|
||||
SELECT create_distributed_table('on_update_fkey_table', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE unrelated_dist_table(id int PRIMARY KEY, value_1 int);
|
||||
SELECT create_distributed_table('unrelated_dist_table', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE on_update_fkey_table ADD CONSTRAINT fkey FOREIGN KEY(value_1) REFERENCES reference_table(id) ON UPDATE CASCADE;
|
||||
|
@ -47,13 +47,13 @@ SET client_min_messages TO DEBUG1;
|
|||
-- case 1.1: SELECT to a reference table is followed by a parallel SELECT to a distributed table
|
||||
BEGIN;
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
@ -61,13 +61,13 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
@ -76,31 +76,31 @@ ROLLBACK;
|
|||
-- case 1.2: SELECT to a reference table is followed by a multiple router SELECTs to a distributed table
|
||||
BEGIN;
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE id = 15;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE id = 16;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE id = 17;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE id = 18;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -108,31 +108,31 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE id = 15;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE id = 16;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE id = 17;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE id = 18;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -141,7 +141,7 @@ ROLLBACK;
|
|||
-- case 1.3: SELECT to a reference table is followed by a multi-shard UPDATE to a distributed table
|
||||
BEGIN;
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -150,7 +150,7 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -160,7 +160,7 @@ ROLLBACK;
|
|||
-- case 1.4: SELECT to a reference table is followed by a multiple sing-shard UPDATE to a distributed table
|
||||
BEGIN;
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -172,7 +172,7 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -185,7 +185,7 @@ ROLLBACK;
|
|||
-- case 1.5: SELECT to a reference table is followed by a DDL that touches fkey column
|
||||
BEGIN;
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -197,7 +197,7 @@ DEBUG: validating foreign key constraint "fkey"
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -210,7 +210,7 @@ ROLLBACK;
|
|||
-- case 1.6: SELECT to a reference table is followed by an unrelated DDL
|
||||
BEGIN;
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -221,7 +221,7 @@ DETAIL: cannot execute parallel DDL on relation "on_update_fkey_table" after SE
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -234,7 +234,7 @@ ROLLBACK;
|
|||
-- the foreign key column
|
||||
BEGIN;
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -245,7 +245,7 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -258,13 +258,13 @@ ROLLBACK;
|
|||
-- the foreign key column after a parallel query has been executed
|
||||
BEGIN;
|
||||
SELECT count(*) FROM unrelated_dist_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -276,13 +276,13 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM unrelated_dist_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -296,13 +296,13 @@ ROLLBACK;
|
|||
-- the foreign key column, and a parallel query has already been executed
|
||||
BEGIN;
|
||||
SELECT count(*) FROM unrelated_dist_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -314,13 +314,13 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM unrelated_dist_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -333,7 +333,7 @@ ROLLBACK;
|
|||
-- case 1.8: SELECT to a reference table is followed by a COPY
|
||||
BEGIN;
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -342,7 +342,7 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -355,13 +355,13 @@ BEGIN;
|
|||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
@ -372,13 +372,13 @@ BEGIN;
|
|||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -390,25 +390,25 @@ BEGIN;
|
|||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 199;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 299;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 399;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -419,25 +419,25 @@ BEGIN;
|
|||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 199;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 299;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 101 AND id = 399;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -549,7 +549,7 @@ BEGIN;
|
|||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
SELECT count(*) FROM on_update_fkey_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
@ -560,7 +560,7 @@ BEGIN;
|
|||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "transitive_reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
SELECT count(*) FROM on_update_fkey_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
@ -570,7 +570,7 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE int;
|
||||
SELECT count(*) FROM on_update_fkey_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
@ -579,7 +579,7 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE int;
|
||||
SELECT count(*) FROM on_update_fkey_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
@ -706,13 +706,13 @@ ROLLBACK;
|
|||
-- case 4.1: SELECT to a dist table is follwed by a SELECT to a reference table
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -720,13 +720,13 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -735,7 +735,7 @@ ROLLBACK;
|
|||
-- case 4.2: SELECT to a dist table is follwed by a DML to a reference table
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
@ -747,7 +747,7 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
@ -760,7 +760,7 @@ ROLLBACK;
|
|||
-- case 4.3: SELECT to a dist table is follwed by an unrelated DDL to a reference table
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
@ -771,7 +771,7 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
@ -783,7 +783,7 @@ ROLLBACK;
|
|||
-- case 4.4: SELECT to a dist table is follwed by a DDL to a reference table
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
@ -797,7 +797,7 @@ HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_m
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
@ -814,7 +814,7 @@ ROLLBACK;
|
|||
SET client_min_messages to LOG;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
@ -825,7 +825,7 @@ ERROR: cannot execute DDL on reference relation "reference_table" because there
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE value_1 = 99;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
@ -838,7 +838,7 @@ ROLLBACK;
|
|||
-- case 4.6: Router SELECT to a dist table is followed by a TRUNCATE
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE id = 9;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -848,7 +848,7 @@ NOTICE: truncate cascades to table "on_update_fkey_table"
|
|||
ROLLBACK;
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table WHERE id = 9;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -863,7 +863,7 @@ RESET client_min_messages;
|
|||
BEGIN;
|
||||
UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15;
|
||||
SELECT count(*) FROM reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -872,7 +872,7 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
UPDATE on_update_fkey_table SET value_1 = 16 WHERE value_1 = 15;
|
||||
SELECT count(*) FROM transitive_reference_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
101
|
||||
(1 row)
|
||||
|
@ -1033,16 +1033,16 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
SELECT create_reference_table('test_table_1');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- make sure that the output isn't too verbose
|
||||
|
@ -1054,16 +1054,16 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
SELECT create_reference_table('test_table_1');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
SELECT create_distributed_table('tt4', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
|
@ -1083,23 +1083,23 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
SELECT create_reference_table('test_table_1');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE tt4(id int PRIMARY KEY, value_1 int, FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
SELECT create_distributed_table('tt4', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id), FOREIGN KEY(id) REFERENCES tt4(id));
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- make sure that the output isn't too verbose
|
||||
|
@ -1112,16 +1112,16 @@ ROLLBACK;
|
|||
BEGIN;
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
SELECT create_reference_table('test_table_1');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id);
|
||||
|
@ -1139,16 +1139,16 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
SELECT create_reference_table('test_table_1');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id);
|
||||
|
@ -1162,16 +1162,16 @@ COMMIT;
|
|||
BEGIN;
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
SELECT create_reference_table('test_table_1');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id);
|
||||
|
@ -1189,16 +1189,16 @@ BEGIN;
|
|||
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int);
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
SELECT create_reference_table('test_table_1');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE test_table_2 ADD CONSTRAINT c_check FOREIGN KEY (value_1) REFERENCES test_table_1(id);
|
||||
|
@ -1211,7 +1211,7 @@ ROLLBACK;
|
|||
-- setting the mode to sequential should fail
|
||||
BEGIN;
|
||||
SELECT count(*) FROM on_update_fkey_table;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1001
|
||||
(1 row)
|
||||
|
@ -1243,9 +1243,9 @@ BEGIN;
|
|||
INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i;
|
||||
SELECT create_reference_table('test_table_1');
|
||||
NOTICE: Copying data from local table...
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
|
@ -1267,9 +1267,9 @@ BEGIN;
|
|||
INSERT INTO test_table_2 SELECT i, i FROM generate_series(0,100) i;
|
||||
SELECT create_reference_table('test_table_1');
|
||||
NOTICE: Copying data from local table...
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
|
@ -1287,28 +1287,28 @@ BEGIN;
|
|||
CREATE TABLE test_table_1(id int PRIMARY KEY);
|
||||
CREATE TABLE test_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_table_1(id));
|
||||
SELECT create_reference_table('test_table_1');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- and maybe some other test
|
||||
CREATE INDEX i1 ON test_table_1(id);
|
||||
ALTER TABLE test_table_2 ADD CONSTRAINT check_val CHECK (id > 0);
|
||||
SELECT count(*) FROM test_table_2;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM test_table_1;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -1327,18 +1327,18 @@ CREATE TABLE reference_table(id int PRIMARY KEY);
|
|||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_pkey" for table "reference_table"
|
||||
DEBUG: building index "reference_table_pkey" on table "reference_table" serially
|
||||
SELECT create_reference_table('reference_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE distributed_table(id int PRIMARY KEY, value_1 int);
|
||||
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "distributed_table_pkey" for table "distributed_table"
|
||||
DEBUG: building index "distributed_table_pkey" on table "distributed_table" serially
|
||||
SELECT create_distributed_table('distributed_table', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE
|
||||
|
@ -1363,7 +1363,7 @@ DEBUG: generating subplan 170_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.
|
|||
DEBUG: Plan 170 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.distributed_table USING (SELECT intermediate_result.id FROM read_intermediate_result('170_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) RETURNING distributed_table.id, distributed_table.value_1, t1.id
|
||||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
id | value_1 | id
|
||||
id | value_1 | id
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -1384,7 +1384,7 @@ DEBUG: generating subplan 174_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.
|
|||
DEBUG: Plan 174 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('174_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1 WHERE (distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id)
|
||||
DEBUG: switching to sequential query execution mode
|
||||
DETAIL: Reference relation "reference_table" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed relations due to foreign keys. Any parallel modification to those hash distributed relations in the same transaction can only be executed in sequential query execution mode
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -1417,7 +1417,7 @@ BEGIN;
|
|||
DEBUG: generating subplan 181_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id
|
||||
DEBUG: generating subplan 181_2 for CTE t2: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
DEBUG: Plan 181 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM test_fkey_to_ref_in_tx.distributed_table, (SELECT intermediate_result.id FROM read_intermediate_result('181_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t1, (SELECT intermediate_result.id FROM read_intermediate_result('181_2'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) t2 WHERE ((distributed_table.value_1 OPERATOR(pg_catalog.=) t1.id) AND (distributed_table.value_1 OPERATOR(pg_catalog.=) t2.id))
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -1429,7 +1429,7 @@ BEGIN;
|
|||
DELETE FROM reference_table RETURNING id;
|
||||
DEBUG: generating subplan 184_1 for CTE t1: DELETE FROM test_fkey_to_ref_in_tx.distributed_table RETURNING id
|
||||
DEBUG: Plan 184 query after replacing subqueries and CTEs: DELETE FROM test_fkey_to_ref_in_tx.reference_table RETURNING id
|
||||
id
|
||||
id
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -8,21 +8,21 @@ CREATE TABLE test_table_1(id int, val1 int);
|
|||
CREATE TABLE test_table_2(id bigint, val1 int);
|
||||
CREATE TABLE test_table_3(id int, val1 bigint);
|
||||
SELECT create_distributed_table('test_table_1', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table_3', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_1 VALUES(1,1),(2,2),(3,3);
|
||||
|
@ -30,7 +30,7 @@ INSERT INTO test_table_2 VALUES(2,2),(3,3),(4,4);
|
|||
INSERT INTO test_table_3 VALUES(1,1),(3,3),(4,5);
|
||||
-- Simple full outer join
|
||||
SELECT id FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1;
|
||||
id
|
||||
id
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
2
|
||||
|
@ -40,10 +40,10 @@ SELECT id FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1;
|
|||
|
||||
-- Get all columns as the result of the full join
|
||||
SELECT * FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1;
|
||||
id | val1 | val1
|
||||
id | val1 | val1
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | 1
|
||||
2 | 2 |
|
||||
2 | 2 |
|
||||
3 | 3 | 3
|
||||
4 | | 5
|
||||
(4 rows)
|
||||
|
@ -55,13 +55,13 @@ SELECT * FROM
|
|||
(SELECT test_table_1.id FROM test_table_1 FULL JOIN test_table_3 using(id)) as j2
|
||||
USING(id)
|
||||
ORDER BY 1;
|
||||
id
|
||||
id
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
2
|
||||
3
|
||||
|
||||
|
||||
|
||||
|
||||
(5 rows)
|
||||
|
||||
-- Join subqueries using multiple columns
|
||||
|
@ -71,18 +71,18 @@ SELECT * FROM
|
|||
(SELECT test_table_1.id, test_table_1.val1 FROM test_table_1 FULL JOIN test_table_3 using(id)) as j2
|
||||
USING(id, val1)
|
||||
ORDER BY 1;
|
||||
id | val1
|
||||
id | val1
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
(5 rows)
|
||||
|
||||
-- Full join using multiple columns
|
||||
SELECT * FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) ORDER BY 1;
|
||||
id | val1
|
||||
id | val1
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
|
@ -97,7 +97,7 @@ WHERE id::bigint < 55
|
|||
GROUP BY id
|
||||
ORDER BY 2
|
||||
ASC LIMIT 3;
|
||||
count | avg_value | not_null
|
||||
count | avg_value | not_null
|
||||
---------------------------------------------------------------------
|
||||
1 | 2 | t
|
||||
1 | 6 | t
|
||||
|
@ -108,7 +108,7 @@ SELECT max(val1)
|
|||
FROM test_table_1 FULL JOIN test_table_3 USING(id, val1)
|
||||
GROUP BY test_table_1.id
|
||||
ORDER BY 1;
|
||||
max
|
||||
max
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
2
|
||||
|
@ -121,7 +121,7 @@ SELECT max(val1)
|
|||
FROM test_table_1 LEFT JOIN test_table_3 USING(id, val1)
|
||||
GROUP BY test_table_1.id
|
||||
ORDER BY 1;
|
||||
max
|
||||
max
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
2
|
||||
|
@ -138,36 +138,36 @@ INSERT INTO test_table_2 VALUES(7, NULL);
|
|||
INSERT INTO test_table_3 VALUES(7, NULL);
|
||||
-- Get all columns as the result of the full join
|
||||
SELECT * FROM test_table_1 FULL JOIN test_table_3 using(id) ORDER BY 1;
|
||||
id | val1 | val1
|
||||
id | val1 | val1
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | 1
|
||||
2 | 2 |
|
||||
2 | 2 |
|
||||
3 | 3 | 3
|
||||
4 | | 5
|
||||
7 | |
|
||||
7 | |
|
||||
(5 rows)
|
||||
|
||||
-- Get the same result (with multiple id)
|
||||
SELECT * FROM test_table_1 FULL JOIN test_table_3 ON (test_table_1.id = test_table_3.id) ORDER BY 1;
|
||||
id | val1 | id | val1
|
||||
id | val1 | id | val1
|
||||
---------------------------------------------------------------------
|
||||
1 | 1 | 1 | 1
|
||||
2 | 2 | |
|
||||
2 | 2 | |
|
||||
3 | 3 | 3 | 3
|
||||
7 | | 7 |
|
||||
7 | | 7 |
|
||||
| | 4 | 5
|
||||
(5 rows)
|
||||
|
||||
-- Full join using multiple columns
|
||||
SELECT * FROM test_table_1 FULL JOIN test_table_3 USING(id, val1) ORDER BY 1;
|
||||
id | val1
|
||||
id | val1
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
3 | 3
|
||||
4 | 5
|
||||
7 |
|
||||
7 |
|
||||
7 |
|
||||
7 |
|
||||
(6 rows)
|
||||
|
||||
-- In order to make the same test with different data types use text-varchar pair
|
||||
|
@ -178,22 +178,22 @@ DROP TABLE test_table_3;
|
|||
CREATE TABLE test_table_1(id int, val1 text);
|
||||
CREATE TABLE test_table_2(id int, val1 varchar(30));
|
||||
SELECT create_distributed_table('test_table_1', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT create_distributed_table('test_table_2', 'id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_1 VALUES(1,'val_1'),(2,'val_2'),(3,'val_3'), (4, NULL);
|
||||
INSERT INTO test_table_2 VALUES(2,'val_2'),(3,'val_3'),(4,'val_4'), (5, NULL);
|
||||
-- Simple full outer join
|
||||
SELECT id FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1;
|
||||
id
|
||||
id
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
2
|
||||
|
@ -204,13 +204,13 @@ SELECT id FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1;
|
|||
|
||||
-- Get all columns as the result of the full join
|
||||
SELECT * FROM test_table_1 FULL JOIN test_table_2 using(id) ORDER BY 1;
|
||||
id | val1 | val1
|
||||
id | val1 | val1
|
||||
---------------------------------------------------------------------
|
||||
1 | val_1 |
|
||||
1 | val_1 |
|
||||
2 | val_2 | val_2
|
||||
3 | val_3 | val_3
|
||||
4 | | val_4
|
||||
5 | |
|
||||
5 | |
|
||||
(5 rows)
|
||||
|
||||
-- Join subqueries using multiple columns
|
||||
|
@ -220,28 +220,28 @@ SELECT * FROM
|
|||
(SELECT test_table_2.id, test_table_2.val1 FROM test_table_1 FULL JOIN test_table_2 using(id)) as j2
|
||||
USING(id, val1)
|
||||
ORDER BY 1,2;
|
||||
id | val1
|
||||
id | val1
|
||||
---------------------------------------------------------------------
|
||||
1 | val_1
|
||||
2 | val_2
|
||||
3 | val_3
|
||||
4 | val_4
|
||||
4 |
|
||||
5 |
|
||||
|
|
||||
|
|
||||
4 |
|
||||
5 |
|
||||
|
|
||||
|
|
||||
(8 rows)
|
||||
|
||||
-- Full join using multiple columns
|
||||
SELECT * FROM test_table_1 FULL JOIN test_table_2 USING(id, val1) ORDER BY 1,2;
|
||||
id | val1
|
||||
id | val1
|
||||
---------------------------------------------------------------------
|
||||
1 | val_1
|
||||
2 | val_2
|
||||
3 | val_3
|
||||
4 | val_4
|
||||
4 |
|
||||
5 |
|
||||
4 |
|
||||
5 |
|
||||
(6 rows)
|
||||
|
||||
DROP SCHEMA full_join CASCADE;
|
||||
|
|
|
@ -6,30 +6,30 @@ SET citus.next_shard_id TO 1480000;
|
|||
SET citus.shard_replication_factor = 1;
|
||||
CREATE TABLE table_1 (key int, value text);
|
||||
SELECT create_distributed_table('table_1', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE table_2 (key int, value text);
|
||||
SELECT create_distributed_table('table_2', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE table_3 (key int, value text);
|
||||
SELECT create_distributed_table('table_3', 'key');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE ref_table (key int, value text);
|
||||
SELECT create_reference_table('ref_table');
|
||||
create_reference_table
|
||||
create_reference_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
-- load some data
|
||||
|
@ -51,7 +51,7 @@ DEBUG: generating subplan 5_1 for CTE some_values_1: SELECT key FROM intermedia
|
|||
DEBUG: Plan 5 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('5_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key))
|
||||
DEBUG: Subplan 5_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 5_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
@ -68,7 +68,7 @@ FROM
|
|||
DEBUG: generating subplan 7_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
|
||||
DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Subplan 7_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -86,7 +86,7 @@ DEBUG: generating subplan 9_1 for CTE some_values_1: SELECT key, random() AS ra
|
|||
DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key))
|
||||
DEBUG: Subplan 9_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 9_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
@ -106,7 +106,7 @@ DEBUG: generating subplan 11_2 for CTE some_values_2: SELECT key, random() AS r
|
|||
DEBUG: Plan 11 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('11_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Subplan 11_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 11_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -127,7 +127,7 @@ DEBUG: Plan 14 query after replacing subqueries and CTEs: SELECT count(*) AS co
|
|||
DEBUG: Subplan 14_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 14_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 14_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -149,7 +149,7 @@ DEBUG: Plan 17 query after replacing subqueries and CTEs: SELECT count(*) AS co
|
|||
DEBUG: Subplan 17_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 17_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 17_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -171,7 +171,7 @@ DEBUG: Plan 20 query after replacing subqueries and CTEs: SELECT count(*) AS co
|
|||
DEBUG: Subplan 20_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 20_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 20_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -193,7 +193,7 @@ DEBUG: generating subplan 23_2 for CTE some_values_2: SELECT some_values_1.key,
|
|||
DEBUG: Plan 23 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('23_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('23_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
|
||||
DEBUG: Subplan 23_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 23_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -214,7 +214,7 @@ DEBUG: Subplan 26_1 will be sent to localhost:xxxxx
|
|||
DEBUG: Subplan 26_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 26_2 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 26_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
@ -237,7 +237,7 @@ DEBUG: Subplan 29_1 will be sent to localhost:xxxxx
|
|||
DEBUG: Subplan 29_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 29_2 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 29_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -255,7 +255,7 @@ DEBUG: generating subplan 32_1 for CTE some_values_1: SELECT key, random() AS r
|
|||
DEBUG: Plan 32 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('32_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) JOIN intermediate_result_pruning.table_2 USING (key))
|
||||
DEBUG: Subplan 32_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 32_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
@ -268,7 +268,7 @@ SELECT
|
|||
count(*)
|
||||
FROM
|
||||
(some_values_1 JOIN ref_table USING (key)) JOIN table_2 USING (key) WHERE table_2.key = 1;
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -290,7 +290,7 @@ DEBUG: generating subplan 35_2 for CTE some_values_2: SELECT some_values_1.key,
|
|||
DEBUG: Plan 35 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('35_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2
|
||||
DEBUG: Subplan 35_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 35_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -320,7 +320,7 @@ DEBUG: Subplan 38_1 will be sent to localhost:xxxxx
|
|||
DEBUG: Subplan 38_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 39_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 39_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -349,7 +349,7 @@ DEBUG: Plan 42 query after replacing subqueries and CTEs: SELECT count(*) AS co
|
|||
DEBUG: Subplan 42_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 43_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 43_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -373,7 +373,7 @@ DEBUG: Subplan 46_2 will be sent to localhost:xxxxx
|
|||
DEBUG: Subplan 46_2 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 46_3 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 46_3 will be sent to localhost:xxxxx
|
||||
key | key | value
|
||||
key | key | value
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -389,7 +389,7 @@ DEBUG: generating subplan 50_2 for CTE some_values_2: SELECT key, random() AS r
|
|||
DEBUG: Plan 50 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('50_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('50_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key))
|
||||
DEBUG: Subplan 50_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 50_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
@ -406,7 +406,7 @@ DEBUG: generating subplan 53_2 for CTE some_values_2: SELECT key, random() AS r
|
|||
DEBUG: Plan 53 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('53_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('53_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE false
|
||||
DEBUG: Subplan 53_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 53_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -428,7 +428,7 @@ DEBUG: generating subplan 56_2 for CTE some_values_3: SELECT key, random() AS r
|
|||
DEBUG: Plan 56 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('56_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_3
|
||||
DEBUG: Subplan 56_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 56_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
@ -488,7 +488,7 @@ DEBUG: Subplan 59_4 will be sent to localhost:xxxxx
|
|||
DEBUG: Subplan 59_5 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 59_5 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 59_6 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -544,7 +544,7 @@ DEBUG: Subplan 66_3 will be sent to localhost:xxxxx
|
|||
DEBUG: Subplan 66_4 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 66_5 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 66_6 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -559,7 +559,7 @@ DEBUG: generating subplan 73_2 for subquery SELECT key FROM intermediate_result
|
|||
DEBUG: Plan 73 query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('73_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('73_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)
|
||||
DEBUG: Subplan 73_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 73_2 will be sent to localhost:xxxxx
|
||||
key
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -589,7 +589,7 @@ DEBUG: Subplan 76_1 will be sent to localhost:xxxxx
|
|||
DEBUG: Subplan 77_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 77_2 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 76_2 will be sent to localhost:xxxxx
|
||||
key
|
||||
key
|
||||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
|
@ -619,7 +619,7 @@ DEBUG: Subplan 81_1 will be sent to localhost:xxxxx
|
|||
DEBUG: Subplan 82_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 82_2 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 81_2 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -638,7 +638,7 @@ DEBUG: generating subplan 86_1 for subquery SELECT key, random() AS random FROM
|
|||
DEBUG: Plan 86 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('86_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key)
|
||||
DEBUG: Subplan 86_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 86_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
14
|
||||
(1 row)
|
||||
|
@ -655,7 +655,7 @@ WHERE
|
|||
DEBUG: generating subplan 88_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2
|
||||
DEBUG: Plan 88 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('88_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key)
|
||||
DEBUG: Subplan 88_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
@ -682,7 +682,7 @@ DEBUG: Subplan 90_1 will be sent to localhost:xxxxx
|
|||
DEBUG: Subplan 90_2 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 92_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 92_1 will be sent to localhost:xxxxx
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
4 | 4
|
||||
|
@ -712,7 +712,7 @@ DEBUG: Subplan 94_1 will be sent to localhost:xxxxx
|
|||
DEBUG: Subplan 94_2 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 96_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 96_1 will be sent to localhost:xxxxx
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
4 | 4
|
||||
|
@ -737,7 +737,7 @@ DEBUG: Plan 99 query after replacing subqueries and CTEs: DELETE FROM intermedi
|
|||
DEBUG: Plan 98 query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('98_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data
|
||||
DEBUG: Subplan 98_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 99_1 will be sent to localhost:xxxxx
|
||||
key | value
|
||||
key | value
|
||||
---------------------------------------------------------------------
|
||||
6 | 6
|
||||
(1 row)
|
||||
|
@ -837,37 +837,37 @@ SET client_min_messages TO DEFAULT;
|
|||
CREATE TABLE range_partitioned(range_column text, data int);
|
||||
SET client_min_messages TO DEBUG1;
|
||||
SELECT create_distributed_table('range_partitioned', 'range_column', 'range');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('range_partitioned');
|
||||
master_create_empty_shard
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
1480013
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('range_partitioned');
|
||||
master_create_empty_shard
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
1480014
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('range_partitioned');
|
||||
master_create_empty_shard
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
1480015
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('range_partitioned');
|
||||
master_create_empty_shard
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
1480016
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('range_partitioned');
|
||||
master_create_empty_shard
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
1480017
|
||||
(1 row)
|
||||
|
@ -888,7 +888,7 @@ WHERE
|
|||
DEBUG: generating subplan 120_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned
|
||||
DEBUG: Plan 120 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) 'A'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('120_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer))))
|
||||
DEBUG: Subplan 120_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -905,7 +905,7 @@ DEBUG: generating subplan 122_1 for subquery SELECT data FROM intermediate_resu
|
|||
DEBUG: Plan 122 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.>=) 'A'::text) AND (range_column OPERATOR(pg_catalog.<=) 'K'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('122_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer))))
|
||||
DEBUG: Subplan 122_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 122_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -925,7 +925,7 @@ DEBUG: generating subplan 124_1 for CTE some_data: SELECT data FROM intermediat
|
|||
DEBUG: Plan 124 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) ANY (ARRAY['A'::text, 'E'::text])) AND (data OPERATOR(pg_catalog.=) ANY (SELECT some_data.data FROM (SELECT intermediate_result.data FROM read_intermediate_result('124_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)) some_data)))
|
||||
DEBUG: Subplan 124_1 will be sent to localhost:xxxxx
|
||||
DEBUG: Subplan 124_1 will be sent to localhost:xxxxx
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
|
|
@ -9,13 +9,13 @@ CREATE OR REPLACE FUNCTION pg_catalog.store_intermediate_result_on_node(nodename
|
|||
-- in the same transaction we can read a result
|
||||
BEGIN;
|
||||
SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int);
|
||||
x | x2
|
||||
x | x2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 4
|
||||
|
@ -27,7 +27,7 @@ SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 in
|
|||
COMMIT;
|
||||
-- in separate transactions, the result is no longer available
|
||||
SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
@ -37,15 +37,15 @@ ERROR: result "squares" does not exist
|
|||
BEGIN;
|
||||
CREATE TABLE interesting_squares (user_id text, interested_in text);
|
||||
SELECT create_distributed_table('interesting_squares', 'user_id');
|
||||
create_distributed_table
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO interesting_squares VALUES ('jon', '2'), ('jon', '5'), ('jack', '3');
|
||||
-- put an intermediate result on all workers
|
||||
SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s');
|
||||
broadcast_intermediate_result
|
||||
broadcast_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
@ -55,7 +55,7 @@ SELECT x, x2
|
|||
FROM interesting_squares JOIN (SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int)) squares ON (x::text = interested_in)
|
||||
WHERE user_id = 'jon'
|
||||
ORDER BY x;
|
||||
x | x2
|
||||
x | x2
|
||||
---------------------------------------------------------------------
|
||||
2 | 4
|
||||
5 | 25
|
||||
|
@ -65,7 +65,7 @@ END;
|
|||
BEGIN;
|
||||
-- put an intermediate result on all workers
|
||||
SELECT broadcast_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s');
|
||||
broadcast_intermediate_result
|
||||
broadcast_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
@ -75,7 +75,7 @@ SELECT x, x2
|
|||
FROM interesting_squares
|
||||
JOIN (SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int)) squares ON (x::text = interested_in)
|
||||
ORDER BY x;
|
||||
x | x2
|
||||
x | x2
|
||||
---------------------------------------------------------------------
|
||||
2 | 4
|
||||
3 | 9
|
||||
|
@ -110,7 +110,7 @@ SET client_min_messages TO DEFAULT;
|
|||
-- try to read the file as text, will fail because of binary encoding
|
||||
BEGIN;
|
||||
SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
@ -121,7 +121,7 @@ END;
|
|||
-- try to read the file with wrong encoding
|
||||
BEGIN;
|
||||
SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,5) s');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
@ -139,7 +139,7 @@ INSERT INTO stored_squares VALUES ('jon', '(5,25)'::intermediate_results.square_
|
|||
-- composite types change the format to text
|
||||
BEGIN;
|
||||
SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
@ -149,13 +149,13 @@ ERROR: COPY file signature not recognized
|
|||
COMMIT;
|
||||
BEGIN;
|
||||
SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type);
|
||||
s
|
||||
s
|
||||
---------------------------------------------------------------------
|
||||
(2,4)
|
||||
(3,9)
|
||||
|
@ -167,7 +167,7 @@ COMMIT;
|
|||
BEGIN;
|
||||
-- put an intermediate result in text format on all workers
|
||||
SELECT broadcast_intermediate_result('stored_squares', 'SELECT square, metadata FROM stored_squares');
|
||||
broadcast_intermediate_result
|
||||
broadcast_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
@ -178,7 +178,7 @@ SELECT * FROM interesting_squares JOIN (
|
|||
read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type, m jsonb)
|
||||
) squares
|
||||
ON ((s).x = interested_in) WHERE user_id = 'jon' ORDER BY 1,2;
|
||||
user_id | interested_in | s | m
|
||||
user_id | interested_in | s | m
|
||||
---------------------------------------------------------------------
|
||||
jon | 2 | (2,4) | {"value": 2}
|
||||
jon | 5 | (5,25) | {"value": 5}
|
||||
|
@ -190,7 +190,7 @@ SELECT * FROM interesting_squares JOIN (
|
|||
read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type, m jsonb)
|
||||
) squares
|
||||
ON ((s).x = interested_in) ORDER BY 1,2;
|
||||
user_id | interested_in | s | m
|
||||
user_id | interested_in | s | m
|
||||
---------------------------------------------------------------------
|
||||
jack | 3 | (3,9) | {"value": 3}
|
||||
jon | 2 | (2,4) | {"value": 2}
|
||||
|
@ -201,39 +201,39 @@ END;
|
|||
BEGIN;
|
||||
-- accurate row count estimates for primitive types
|
||||
SELECT create_intermediate_result('squares', 'SELECT s, s*s FROM generate_series(1,632) s');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
632
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('squares', 'binary') AS res (x int, x2 int);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Function Scan on read_intermediate_result res (cost=0.00..4.55 rows=632 width=8)
|
||||
(1 row)
|
||||
|
||||
-- less accurate results for variable types
|
||||
SELECT create_intermediate_result('hellos', $$SELECT s, 'hello-'||s FROM generate_series(1,63) s$$);
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
63
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('hellos', 'binary') AS res (x int, y text);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Function Scan on read_intermediate_result res (cost=0.00..0.32 rows=30 width=36)
|
||||
(1 row)
|
||||
|
||||
-- not very accurate results for text encoding
|
||||
SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_result('stored_squares', 'text') AS res (s intermediate_results.square_type);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Function Scan on read_intermediate_result res (cost=0.00..0.01 rows=1 width=32)
|
||||
(1 row)
|
||||
|
@ -245,7 +245,7 @@ TO PROGRAM
|
|||
$$psql -h localhost -p 57636 -U postgres -d regression -c "BEGIN; COPY squares FROM STDIN WITH (format result); CREATE TABLE intermediate_results.squares AS SELECT * FROM read_intermediate_result('squares', 'text') AS res(x int, x2 int); END;"$$
|
||||
WITH (FORMAT text);
|
||||
SELECT * FROM squares ORDER BY x;
|
||||
x | x2
|
||||
x | x2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 4
|
||||
|
@ -271,19 +271,19 @@ BEGIN;
|
|||
SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,3) s'),
|
||||
create_intermediate_result('squares_2', 'SELECT s, s*s FROM generate_series(4,6) s'),
|
||||
create_intermediate_result('squares_3', 'SELECT s, s*s FROM generate_series(7,10) s');
|
||||
create_intermediate_result | create_intermediate_result | create_intermediate_result
|
||||
create_intermediate_result | create_intermediate_result | create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
3 | 3 | 4
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM read_intermediate_results(ARRAY[]::text[], 'binary') AS res (x int, x2 int);
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS res (x int, x2 int);
|
||||
x | x2
|
||||
x | x2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 4
|
||||
|
@ -291,7 +291,7 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS
|
|||
(3 rows)
|
||||
|
||||
SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2', 'squares_3']::text[], 'binary') AS res (x int, x2 int);
|
||||
x | x2
|
||||
x | x2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 4
|
||||
|
@ -308,7 +308,7 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2', 'squares
|
|||
COMMIT;
|
||||
-- in separate transactions, the result is no longer available
|
||||
SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,5) s');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
@ -318,7 +318,7 @@ ERROR: result "squares_1" does not exist
|
|||
-- error behaviour, and also check that results are deleted on rollback
|
||||
BEGIN;
|
||||
SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,3) s');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
@ -335,13 +335,13 @@ ERROR: null array element not allowed in this context
|
|||
ROLLBACK TO SAVEPOINT s1;
|
||||
-- after rollbacks we should be able to run vail read_intermediate_results still.
|
||||
SELECT count(*) FROM read_intermediate_results(ARRAY['squares_1']::text[], 'binary') AS res (x int, x2 int);
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM read_intermediate_results(ARRAY[]::text[], 'binary') AS res (x int, x2 int);
|
||||
count
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
@ -356,7 +356,7 @@ SELECT broadcast_intermediate_result('stored_squares_1',
|
|||
'SELECT s, s*s, ROW(1::text, 2) FROM generate_series(1,3) s'),
|
||||
broadcast_intermediate_result('stored_squares_2',
|
||||
'SELECT s, s*s, ROW(2::text, 3) FROM generate_series(4,6) s');
|
||||
broadcast_intermediate_result | broadcast_intermediate_result
|
||||
broadcast_intermediate_result | broadcast_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
3 | 3
|
||||
(1 row)
|
||||
|
@ -367,7 +367,7 @@ SELECT * FROM interesting_squares JOIN (
|
|||
read_intermediate_results(ARRAY['stored_squares_1', 'stored_squares_2'], 'binary') AS res (x int, x2 int, z intermediate_results.square_type)
|
||||
) squares
|
||||
ON (squares.x::text = interested_in) WHERE user_id = 'jon' ORDER BY 1,2;
|
||||
user_id | interested_in | x | x2 | z
|
||||
user_id | interested_in | x | x2 | z
|
||||
---------------------------------------------------------------------
|
||||
jon | 2 | 2 | 4 | (1,2)
|
||||
jon | 5 | 5 | 25 | (2,3)
|
||||
|
@ -379,13 +379,13 @@ BEGIN;
|
|||
-- almost accurate row count estimates for primitive types
|
||||
SELECT create_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1,632) s'),
|
||||
create_intermediate_result('squares_2', 'SELECT s, s*s FROM generate_series(633,1024) s');
|
||||
create_intermediate_result | create_intermediate_result
|
||||
create_intermediate_result | create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
632 | 392
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2'], 'binary') AS res (x int, x2 int);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Function Scan on read_intermediate_results res (cost=0.00..7.37 rows=1024 width=8)
|
||||
(1 row)
|
||||
|
@ -393,26 +393,26 @@ EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['squares_1', 's
|
|||
-- less accurate results for variable types
|
||||
SELECT create_intermediate_result('hellos_1', $$SELECT s, 'hello-'||s FROM generate_series(1,63) s$$),
|
||||
create_intermediate_result('hellos_2', $$SELECT s, 'hello-'||s FROM generate_series(64,129) s$$);
|
||||
create_intermediate_result | create_intermediate_result
|
||||
create_intermediate_result | create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
63 | 66
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['hellos_1', 'hellos_2'], 'binary') AS res (x int, y text);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Function Scan on read_intermediate_results res (cost=0.00..0.66 rows=62 width=36)
|
||||
(1 row)
|
||||
|
||||
-- not very accurate results for text encoding
|
||||
SELECT create_intermediate_result('stored_squares', 'SELECT square FROM stored_squares');
|
||||
create_intermediate_result
|
||||
create_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
4
|
||||
(1 row)
|
||||
|
||||
EXPLAIN (COSTS ON) SELECT * FROM read_intermediate_results(ARRAY['stored_squares'], 'text') AS res (s intermediate_results.square_type);
|
||||
QUERY PLAN
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Function Scan on read_intermediate_results res (cost=0.00..0.01 rows=1 width=32)
|
||||
(1 row)
|
||||
|
@ -424,19 +424,19 @@ END;
|
|||
-- straightforward, single-result case
|
||||
BEGIN;
|
||||
SELECT broadcast_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1, 5) s');
|
||||
broadcast_intermediate_result
|
||||
broadcast_intermediate_result
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_2_port);
|
||||
fetch_intermediate_results
|
||||
fetch_intermediate_results
|
||||
---------------------------------------------------------------------
|
||||
111
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 int);
|
||||
x | x2
|
||||
x | x2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 4
|
||||
|
@ -446,13 +446,13 @@ SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2
|
|||
(5 rows)
|
||||
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_1_port);
|
||||
fetch_intermediate_results
|
||||
fetch_intermediate_results
|
||||
---------------------------------------------------------------------
|
||||
111
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 int);
|
||||
x | x2
|
||||
x | x2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 4
|
||||
|
@ -466,16 +466,16 @@ END;
|
|||
BEGIN;
|
||||
SELECT store_intermediate_result_on_node('localhost', :worker_1_port,
|
||||
'squares_1', 'SELECT s, s*s FROM generate_series(1, 2) s');
|
||||
store_intermediate_result_on_node
|
||||
store_intermediate_result_on_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT store_intermediate_result_on_node('localhost', :worker_1_port,
|
||||
'squares_2', 'SELECT s, s*s FROM generate_series(3, 4) s');
|
||||
store_intermediate_result_on_node
|
||||
store_intermediate_result_on_node
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
||||
(1 row)
|
||||
|
||||
SAVEPOINT s1;
|
||||
|
@ -494,13 +494,13 @@ ERROR: result "squares_1" does not exist
|
|||
ROLLBACK TO SAVEPOINT s1;
|
||||
-- fetch from worker 1 should succeed
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port);
|
||||
fetch_intermediate_results
|
||||
fetch_intermediate_results
|
||||
---------------------------------------------------------------------
|
||||
114
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int);
|
||||
x | x2
|
||||
x | x2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 4
|
||||
|
@ -510,13 +510,13 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[],
|
|||
|
||||
-- fetching again should succeed
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port);
|
||||
fetch_intermediate_results
|
||||
fetch_intermediate_results
|
||||
---------------------------------------------------------------------
|
||||
114
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int);
|
||||
x | x2
|
||||
x | x2
|
||||
---------------------------------------------------------------------
|
||||
1 | 1
|
||||
2 | 4
|
||||
|
@ -527,7 +527,7 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[],
|
|||
ROLLBACK TO SAVEPOINT s1;
|
||||
-- empty result id list should succeed
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY[]::text[], 'localhost', :worker_1_port);
|
||||
fetch_intermediate_results
|
||||
fetch_intermediate_results
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
|
|
@ -3,27 +3,27 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-copy-to-reference-table:
|
||||
1
|
||||
step s2-copy-to-reference-table:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-copy-to-reference-table: <... completed>
|
||||
step s2-print-content:
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -31,39 +31,39 @@ step s2-print-content:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 10
|
||||
57638 t 10
|
||||
57637 t 10
|
||||
57638 t 10
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-copy-to-reference-table:
|
||||
step s2-copy-to-reference-table:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-content:
|
||||
1
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -71,39 +71,39 @@ step s2-print-content:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 10
|
||||
57638 t 10
|
||||
57637 t 10
|
||||
57638 t 10
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-insert-to-reference-table:
|
||||
1
|
||||
step s2-insert-to-reference-table:
|
||||
INSERT INTO test_reference_table VALUES (6);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-insert-to-reference-table: <... completed>
|
||||
step s2-print-content:
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -111,39 +111,39 @@ step s2-print-content:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 6
|
||||
57638 t 6
|
||||
57637 t 6
|
||||
57638 t 6
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-insert-to-reference-table:
|
||||
step s2-insert-to-reference-table:
|
||||
INSERT INTO test_reference_table VALUES (6);
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-content:
|
||||
1
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -151,39 +151,39 @@ step s2-print-content:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 6
|
||||
57638 t 6
|
||||
57637 t 6
|
||||
57638 t 6
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-ddl-on-reference-table:
|
||||
1
|
||||
step s2-ddl-on-reference-table:
|
||||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-ddl-on-reference-table: <... completed>
|
||||
step s2-print-index-count:
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -191,39 +191,39 @@ step s2-print-index-count:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-ddl-on-reference-table:
|
||||
step s2-ddl-on-reference-table:
|
||||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-index-count:
|
||||
1
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -231,42 +231,42 @@ step s2-print-index-count:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-reference-table-2:
|
||||
1
|
||||
step s2-create-reference-table-2:
|
||||
SELECT create_reference_table('test_reference_table_2');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-reference-table-2: <... completed>
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s2-print-content-2:
|
||||
|
||||
step s2-print-content-2:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -274,42 +274,42 @@ step s2-print-content-2:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-load-metadata-cache s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
|
||||
step s2-load-metadata-cache:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-reference-table-2:
|
||||
step s2-create-reference-table-2:
|
||||
SELECT create_reference_table('test_reference_table_2');
|
||||
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-add-second-worker:
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-content-2:
|
||||
1
|
||||
step s2-print-content-2:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -317,36 +317,36 @@ step s2-print-content-2:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-copy-to-reference-table:
|
||||
1
|
||||
step s2-copy-to-reference-table:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-copy-to-reference-table: <... completed>
|
||||
step s2-print-content:
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -354,36 +354,36 @@ step s2-print-content:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-copy-to-reference-table:
|
||||
step s2-copy-to-reference-table:
|
||||
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-content:
|
||||
1
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -391,36 +391,36 @@ step s2-print-content:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-insert-to-reference-table:
|
||||
1
|
||||
step s2-insert-to-reference-table:
|
||||
INSERT INTO test_reference_table VALUES (6);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-insert-to-reference-table: <... completed>
|
||||
step s2-print-content:
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -428,36 +428,36 @@ step s2-print-content:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-insert-to-reference-table:
|
||||
step s2-insert-to-reference-table:
|
||||
INSERT INTO test_reference_table VALUES (6);
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-content:
|
||||
1
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -465,36 +465,36 @@ step s2-print-content:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-ddl-on-reference-table:
|
||||
1
|
||||
step s2-ddl-on-reference-table:
|
||||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-ddl-on-reference-table: <... completed>
|
||||
step s2-print-index-count:
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -502,36 +502,36 @@ step s2-print-index-count:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-ddl-on-reference-table:
|
||||
step s2-ddl-on-reference-table:
|
||||
CREATE INDEX reference_index ON test_reference_table(test_id);
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-index-count:
|
||||
1
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -539,39 +539,39 @@ step s2-print-index-count:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-second-worker s2-create-reference-table-2 s1-commit s2-print-content-2
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-second-worker:
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-reference-table-2:
|
||||
1
|
||||
step s2-create-reference-table-2:
|
||||
SELECT create_reference_table('test_reference_table_2');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-reference-table-2: <... completed>
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s2-print-content-2:
|
||||
|
||||
step s2-print-content-2:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -579,39 +579,39 @@ step s2-print-content-2:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-create-reference-table-2 s1-add-second-worker s2-commit s2-print-content-2
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-reference-table-2:
|
||||
step s2-create-reference-table-2:
|
||||
SELECT create_reference_table('test_reference_table_2');
|
||||
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-add-second-worker:
|
||||
|
||||
step s1-add-second-worker:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-second-worker: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-print-content-2:
|
||||
1
|
||||
step s2-print-content-2:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
|
@ -619,11 +619,11 @@ step s2-print-content-2:
|
|||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,634 +1,634 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-remove-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-remove-node-1:
|
||||
1
|
||||
step s2-remove-node-1:
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-remove-node-1: <... completed>
|
||||
master_remove_node
|
||||
|
||||
|
||||
step s1-show-nodes:
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-add-node-2:
|
||||
1
|
||||
step s2-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-add-node-2: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 t
|
||||
localhost 57638 t
|
||||
localhost 57637 t
|
||||
localhost 57638 t
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-add-node-1:
|
||||
1
|
||||
step s2-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-add-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 t
|
||||
localhost 57637 t
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-abort s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-add-node-2:
|
||||
1
|
||||
step s2-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s1-abort:
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-add-node-2: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57638 t
|
||||
localhost 57638 t
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-abort s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-add-node-1:
|
||||
1
|
||||
step s2-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-abort:
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-add-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 t
|
||||
localhost 57637 t
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-add-node-2 s1-begin s1-remove-node-1 s2-remove-node-2 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-node-1:
|
||||
1
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-node-2:
|
||||
1
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-1:
|
||||
step s1-remove-node-1:
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
step s2-remove-node-2:
|
||||
|
||||
step s2-remove-node-2:
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-remove-node-2: <... completed>
|
||||
master_remove_node
|
||||
|
||||
|
||||
step s1-show-nodes:
|
||||
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-remove-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-node-1:
|
||||
1
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-1:
|
||||
step s1-remove-node-1:
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
step s2-remove-node-1:
|
||||
|
||||
step s2-remove-node-1:
|
||||
SELECT * FROM master_remove_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-remove-node-1: <... completed>
|
||||
error in steps s1-commit s2-remove-node-1: ERROR: node at "localhost:xxxxx" does not exist
|
||||
step s1-show-nodes:
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-node-1:
|
||||
1
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-activate-node-1:
|
||||
step s1-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-activate-node-1:
|
||||
1
|
||||
step s2-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-activate-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 t
|
||||
localhost 57637 t
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-node-1:
|
||||
1
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-disable-node-1:
|
||||
step s1-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 f
|
||||
localhost 57637 f
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-inactive-1:
|
||||
1
|
||||
step s1-add-inactive-1:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-activate-node-1:
|
||||
step s1-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-activate-node-1:
|
||||
1
|
||||
step s2-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-activate-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 t
|
||||
localhost 57637 t
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-inactive-1:
|
||||
1
|
||||
step s1-add-inactive-1:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-disable-node-1:
|
||||
step s1-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 f
|
||||
localhost 57637 f
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-node-1:
|
||||
1
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-disable-node-1:
|
||||
step s1-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-activate-node-1:
|
||||
1
|
||||
step s2-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-activate-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 t
|
||||
localhost 57637 t
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-node-1:
|
||||
1
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-activate-node-1:
|
||||
step s1-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 f
|
||||
localhost 57637 f
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-inactive-1:
|
||||
1
|
||||
step s1-add-inactive-1:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-disable-node-1:
|
||||
step s1-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-activate-node-1:
|
||||
1
|
||||
step s2-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-activate-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 t
|
||||
localhost 57637 t
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-inactive-1:
|
||||
1
|
||||
step s1-add-inactive-1:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-activate-node-1:
|
||||
step s1-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 f
|
||||
localhost 57637 f
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-abort s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-inactive-1:
|
||||
1
|
||||
step s1-add-inactive-1:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-activate-node-1:
|
||||
step s1-activate-node-1:
|
||||
SELECT 1 FROM master_activate_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-abort:
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 f
|
||||
localhost 57637 f
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-abort s1-show-nodes
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-add-node-1:
|
||||
1
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-disable-node-1:
|
||||
step s1-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
1
|
||||
step s2-disable-node-1:
|
||||
SELECT 1 FROM master_disable_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-abort:
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-disable-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-nodes:
|
||||
1
|
||||
step s1-show-nodes:
|
||||
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
|
||||
|
||||
nodename nodeport isactive
|
||||
nodename nodeport isactive
|
||||
|
||||
localhost 57637 f
|
||||
localhost 57637 f
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -5,28 +5,28 @@ run_command_on_workers
|
|||
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
step s1-enable-propagation:
|
||||
step s1-enable-propagation:
|
||||
SET citus.enable_alter_role_propagation to ON;
|
||||
|
||||
step s2-enable-propagation:
|
||||
step s2-enable-propagation:
|
||||
SET citus.enable_alter_role_propagation to ON;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-alter-role-1:
|
||||
step s1-alter-role-1:
|
||||
ALTER ROLE alter_role_1 NOSUPERUSER;
|
||||
|
||||
step s2-add-node:
|
||||
step s2-add-node:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-add-node: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57637,t,"DROP ROLE")
|
||||
|
@ -37,25 +37,25 @@ run_command_on_workers
|
|||
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
step s1-enable-propagation:
|
||||
step s1-enable-propagation:
|
||||
SET citus.enable_alter_role_propagation to ON;
|
||||
|
||||
step s2-enable-propagation:
|
||||
step s2-enable-propagation:
|
||||
SET citus.enable_alter_role_propagation to ON;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node:
|
||||
step s1-add-node:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-alter-role-1:
|
||||
1
|
||||
step s2-alter-role-1:
|
||||
ALTER ROLE alter_role_1 NOSUPERUSER;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-alter-role-1: <... completed>
|
||||
|
@ -69,22 +69,22 @@ run_command_on_workers
|
|||
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
step s1-enable-propagation:
|
||||
step s1-enable-propagation:
|
||||
SET citus.enable_alter_role_propagation to ON;
|
||||
|
||||
step s2-enable-propagation:
|
||||
step s2-enable-propagation:
|
||||
SET citus.enable_alter_role_propagation to ON;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-alter-role-1:
|
||||
step s1-alter-role-1:
|
||||
ALTER ROLE alter_role_1 NOSUPERUSER;
|
||||
|
||||
step s2-alter-role-1:
|
||||
step s2-alter-role-1:
|
||||
ALTER ROLE alter_role_1 NOSUPERUSER;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-alter-role-1: <... completed>
|
||||
|
@ -99,22 +99,22 @@ run_command_on_workers
|
|||
|
||||
(localhost,57637,t,"CREATE ROLE")
|
||||
(localhost,57638,t,"CREATE ROLE")
|
||||
step s1-enable-propagation:
|
||||
step s1-enable-propagation:
|
||||
SET citus.enable_alter_role_propagation to ON;
|
||||
|
||||
step s2-enable-propagation:
|
||||
step s2-enable-propagation:
|
||||
SET citus.enable_alter_role_propagation to ON;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-alter-role-1:
|
||||
step s1-alter-role-1:
|
||||
ALTER ROLE alter_role_1 NOSUPERUSER;
|
||||
|
||||
step s2-alter-role-2:
|
||||
step s2-alter-role-2:
|
||||
ALTER ROLE alter_role_2 NOSUPERUSER;
|
||||
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
run_command_on_workers
|
||||
|
|
|
@ -3,139 +3,139 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
15
|
||||
15
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-router-select: SELECT * FROM append_copy WHERE id = 1;
|
||||
id data int_data
|
||||
id data int_data
|
||||
|
||||
1 b 1
|
||||
1 b 1
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2;
|
||||
id data int_data
|
||||
id data int_data
|
||||
|
||||
0 a 0
|
||||
1 b 1
|
||||
2 c 2
|
||||
3 d 3
|
||||
4 e 4
|
||||
0 a 0
|
||||
1 b 1
|
||||
2 c 2
|
||||
3 d 3
|
||||
4 e 4
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-task-tracker-select:
|
||||
step s2-task-tracker-select:
|
||||
SET citus.task_executor_type TO "task-tracker";
|
||||
SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
|
||||
id data int_data id data int_data
|
||||
id data int_data id data int_data
|
||||
|
||||
0 a 0 0 a 0
|
||||
1 b 1 1 b 1
|
||||
2 c 2 2 c 2
|
||||
3 d 3 3 d 3
|
||||
4 e 4 4 e 4
|
||||
0 a 0 0 a 0
|
||||
1 b 1 1 b 1
|
||||
2 c 2 2 c 2
|
||||
3 d 3 3 d 3
|
||||
4 e 4 4 e 4
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-insert: INSERT INTO append_copy VALUES(0, 'k', 0);
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
11
|
||||
11
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-insert-select: INSERT INTO append_copy SELECT * FROM append_copy;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-update: UPDATE append_copy SET data = 'l' WHERE id = 0;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-delete: DELETE FROM append_copy WHERE id = 1;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
9
|
||||
9
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -143,14 +143,14 @@ step s2-truncate: TRUNCATE append_copy; <waiting ...>
|
|||
step s1-commit: COMMIT;
|
||||
step s2-truncate: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -163,7 +163,7 @@ ERROR: relation "append_copy" does not exist
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -171,9 +171,9 @@ step s2-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); <wa
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-create-index: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -183,7 +183,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -192,9 +192,9 @@ step s2-ddl-drop-index: DROP INDEX append_copy_index; <waiting ...>
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-drop-index: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -204,7 +204,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -212,9 +212,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY append_copy_ind
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-create-index-concurrently: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -224,7 +224,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -232,9 +232,9 @@ step s2-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; <w
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-add-column: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -244,7 +244,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -253,9 +253,9 @@ step s2-ddl-drop-column: ALTER TABLE append_copy DROP new_column; <waiting ...>
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-drop-column: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -265,7 +265,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -273,9 +273,9 @@ step s2-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; <w
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-rename-column: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -285,24 +285,24 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-table-size: SELECT citus_total_relation_size('append_copy');
|
||||
citus_total_relation_size
|
||||
|
||||
32768
|
||||
32768
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-master-apply-delete-command s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -311,16 +311,16 @@ step s1-commit: COMMIT;
|
|||
step s2-master-apply-delete-command: <... completed>
|
||||
master_apply_delete_command
|
||||
|
||||
1
|
||||
1
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
5
|
||||
5
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -329,16 +329,16 @@ step s1-commit: COMMIT;
|
|||
step s2-master-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
|
||||
2
|
||||
2
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE append_copy;
|
||||
step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -348,134 +348,134 @@ step s1-commit: COMMIT;
|
|||
step s2-distribute-table: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
|
||||
0
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
|
||||
0
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-router-select: SELECT * FROM append_copy WHERE id = 1;
|
||||
id data int_data
|
||||
id data int_data
|
||||
|
||||
1 b 1
|
||||
1 b 1
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2;
|
||||
id data int_data
|
||||
id data int_data
|
||||
|
||||
0 a 0
|
||||
1 b 1
|
||||
2 c 2
|
||||
3 d 3
|
||||
4 e 4
|
||||
0 a 0
|
||||
1 b 1
|
||||
2 c 2
|
||||
3 d 3
|
||||
4 e 4
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-task-tracker-select:
|
||||
step s1-task-tracker-select:
|
||||
SET citus.task_executor_type TO "task-tracker";
|
||||
SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
|
||||
id data int_data id data int_data
|
||||
id data int_data id data int_data
|
||||
|
||||
0 a 0 0 a 0
|
||||
1 b 1 1 b 1
|
||||
2 c 2 2 c 2
|
||||
3 d 3 3 d 3
|
||||
4 e 4 4 e 4
|
||||
0 a 0 0 a 0
|
||||
1 b 1 1 b 1
|
||||
2 c 2 2 c 2
|
||||
3 d 3 3 d 3
|
||||
4 e 4 4 e 4
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert: INSERT INTO append_copy VALUES(0, 'k', 0);
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
11
|
||||
11
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert-select: INSERT INTO append_copy SELECT * FROM append_copy;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-update: UPDATE append_copy SET data = 'l' WHERE id = 0;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-delete: DELETE FROM append_copy WHERE id = 1;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
9
|
||||
9
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE append_copy;
|
||||
|
@ -483,14 +483,14 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech
|
|||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
5
|
||||
5
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-drop: DROP TABLE append_copy;
|
||||
|
@ -504,7 +504,7 @@ ERROR: relation "append_copy" does not exist
|
|||
starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id);
|
||||
|
@ -512,9 +512,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech
|
|||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -524,7 +524,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -533,9 +533,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech
|
|||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -545,7 +545,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0;
|
||||
|
@ -554,9 +554,9 @@ step s1-commit: COMMIT;
|
|||
step s2-copy: <... completed>
|
||||
error in steps s1-commit s2-copy: ERROR: missing data for column "new_column"
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
5
|
||||
5
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -566,7 +566,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -575,9 +575,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech
|
|||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -587,7 +587,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column;
|
||||
|
@ -595,9 +595,9 @@ step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && ech
|
|||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -607,71 +607,71 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('append_copy');
|
||||
citus_total_relation_size
|
||||
|
||||
32768
|
||||
32768
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_copy WHERE id <= 4;');
|
||||
master_apply_delete_command
|
||||
|
||||
1
|
||||
1
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
5
|
||||
5
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy');
|
||||
master_drop_all_shards
|
||||
|
||||
1
|
||||
1
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
5
|
||||
5
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE append_copy;
|
||||
step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int);
|
||||
step s1-begin: BEGIN;
|
||||
step s1-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append');
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
count
|
||||
|
||||
5
|
||||
5
|
||||
|
|
|
@ -1,127 +1,127 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-timeout s1-sleep10000 s1-reset s1-drop
|
||||
step s1-timeout:
|
||||
step s1-timeout:
|
||||
SET statement_timeout = '100ms';
|
||||
|
||||
step s1-sleep10000:
|
||||
step s1-sleep10000:
|
||||
SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1;
|
||||
|
||||
ERROR: canceling statement due to statement timeout
|
||||
step s1-reset:
|
||||
step s1-reset:
|
||||
RESET ALL;
|
||||
|
||||
step s1-drop:
|
||||
step s1-drop:
|
||||
|
||||
DROP TABLE cancel_table;
|
||||
|
||||
|
||||
starting permutation: s1-timeout s1-sleep10000 s1-reset s2-drop
|
||||
step s1-timeout:
|
||||
step s1-timeout:
|
||||
SET statement_timeout = '100ms';
|
||||
|
||||
step s1-sleep10000:
|
||||
step s1-sleep10000:
|
||||
SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1;
|
||||
|
||||
ERROR: canceling statement due to statement timeout
|
||||
step s1-reset:
|
||||
step s1-reset:
|
||||
RESET ALL;
|
||||
|
||||
step s2-drop:
|
||||
step s2-drop:
|
||||
|
||||
DROP TABLE cancel_table;
|
||||
|
||||
|
||||
starting permutation: s1-timeout s1-begin s1-sleep10000 s1-rollback s1-reset s1-drop
|
||||
step s1-timeout:
|
||||
step s1-timeout:
|
||||
SET statement_timeout = '100ms';
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-sleep10000:
|
||||
step s1-sleep10000:
|
||||
SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1;
|
||||
|
||||
ERROR: canceling statement due to statement timeout
|
||||
step s1-rollback:
|
||||
step s1-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
step s1-reset:
|
||||
step s1-reset:
|
||||
RESET ALL;
|
||||
|
||||
step s1-drop:
|
||||
step s1-drop:
|
||||
|
||||
DROP TABLE cancel_table;
|
||||
|
||||
|
||||
starting permutation: s1-timeout s1-begin s1-sleep10000 s1-rollback s1-reset s2-drop
|
||||
step s1-timeout:
|
||||
step s1-timeout:
|
||||
SET statement_timeout = '100ms';
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-sleep10000:
|
||||
step s1-sleep10000:
|
||||
SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1;
|
||||
|
||||
ERROR: canceling statement due to statement timeout
|
||||
step s1-rollback:
|
||||
step s1-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
step s1-reset:
|
||||
step s1-reset:
|
||||
RESET ALL;
|
||||
|
||||
step s2-drop:
|
||||
step s2-drop:
|
||||
|
||||
DROP TABLE cancel_table;
|
||||
|
||||
|
||||
starting permutation: s1-timeout s1-begin s1-update1 s1-sleep10000 s1-rollback s1-reset s1-drop
|
||||
step s1-timeout:
|
||||
step s1-timeout:
|
||||
SET statement_timeout = '100ms';
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-update1:
|
||||
step s1-update1:
|
||||
UPDATE cancel_table SET data = '' WHERE test_id = 1;
|
||||
|
||||
step s1-sleep10000:
|
||||
step s1-sleep10000:
|
||||
SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1;
|
||||
|
||||
ERROR: canceling statement due to statement timeout
|
||||
step s1-rollback:
|
||||
step s1-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
step s1-reset:
|
||||
step s1-reset:
|
||||
RESET ALL;
|
||||
|
||||
step s1-drop:
|
||||
step s1-drop:
|
||||
|
||||
DROP TABLE cancel_table;
|
||||
|
||||
|
||||
starting permutation: s1-timeout s1-begin s1-update1 s1-sleep10000 s1-rollback s1-reset s2-drop
|
||||
step s1-timeout:
|
||||
step s1-timeout:
|
||||
SET statement_timeout = '100ms';
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-update1:
|
||||
step s1-update1:
|
||||
UPDATE cancel_table SET data = '' WHERE test_id = 1;
|
||||
|
||||
step s1-sleep10000:
|
||||
step s1-sleep10000:
|
||||
SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1;
|
||||
|
||||
ERROR: canceling statement due to statement timeout
|
||||
step s1-rollback:
|
||||
step s1-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
step s1-reset:
|
||||
step s1-reset:
|
||||
RESET ALL;
|
||||
|
||||
step s2-drop:
|
||||
step s2-drop:
|
||||
|
||||
DROP TABLE cancel_table;
|
||||
|
||||
|
|
|
@ -3,223 +3,223 @@ Parsed test spec with 3 sessions
|
|||
starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-alter-table s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-cache-connections:
|
||||
|
||||
step s1-cache-connections:
|
||||
SET citus.max_cached_conns_per_worker TO 4;
|
||||
SET citus.force_max_query_parallelization TO on;
|
||||
UPDATE test_table SET column2 = 0;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s3-begin:
|
||||
step s3-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-alter-table:
|
||||
step s1-alter-table:
|
||||
ALTER TABLE test_table ADD COLUMN x INT;
|
||||
|
||||
step s2-sleep:
|
||||
step s2-sleep:
|
||||
SELECT pg_sleep(0.5);
|
||||
|
||||
pg_sleep
|
||||
pg_sleep
|
||||
|
||||
|
||||
step s2-view-dist:
|
||||
|
||||
step s2-view-dist:
|
||||
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
|
||||
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
|
||||
|
||||
ALTER TABLE test_table ADD COLUMN x INT;
|
||||
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s3-view-worker:
|
||||
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s3-view-worker:
|
||||
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
|
||||
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
|
||||
SELECT worker_apply_shard_ddl_command (1300004, 'public', '
|
||||
ALTER TABLE test_table ADD COLUMN x INT;
|
||||
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT worker_apply_shard_ddl_command (1300003, 'public', '
|
||||
ALTER TABLE test_table ADD COLUMN x INT;
|
||||
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT worker_apply_shard_ddl_command (1300002, 'public', '
|
||||
ALTER TABLE test_table ADD COLUMN x INT;
|
||||
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT worker_apply_shard_ddl_command (1300001, 'public', '
|
||||
ALTER TABLE test_table ADD COLUMN x INT;
|
||||
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s3-rollback:
|
||||
step s3-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
|
||||
starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-insert s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-cache-connections:
|
||||
|
||||
step s1-cache-connections:
|
||||
SET citus.max_cached_conns_per_worker TO 4;
|
||||
SET citus.force_max_query_parallelization TO on;
|
||||
UPDATE test_table SET column2 = 0;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s3-begin:
|
||||
step s3-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-insert:
|
||||
step s1-insert:
|
||||
INSERT INTO test_table VALUES (100, 100);
|
||||
|
||||
step s2-sleep:
|
||||
step s2-sleep:
|
||||
SELECT pg_sleep(0.5);
|
||||
|
||||
pg_sleep
|
||||
pg_sleep
|
||||
|
||||
|
||||
step s2-view-dist:
|
||||
|
||||
step s2-view-dist:
|
||||
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
|
||||
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
|
||||
|
||||
INSERT INTO test_table VALUES (100, 100);
|
||||
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s3-view-worker:
|
||||
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s3-view-worker:
|
||||
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
|
||||
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
|
||||
INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s3-rollback:
|
||||
step s3-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
|
||||
starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-cache-connections:
|
||||
|
||||
step s1-cache-connections:
|
||||
SET citus.max_cached_conns_per_worker TO 4;
|
||||
SET citus.force_max_query_parallelization TO on;
|
||||
UPDATE test_table SET column2 = 0;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s3-begin:
|
||||
step s3-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_table;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
step s2-sleep:
|
||||
0
|
||||
step s2-sleep:
|
||||
SELECT pg_sleep(0.5);
|
||||
|
||||
pg_sleep
|
||||
pg_sleep
|
||||
|
||||
|
||||
step s2-view-dist:
|
||||
|
||||
step s2-view-dist:
|
||||
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
|
||||
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
|
||||
|
||||
SELECT count(*) FROM test_table;
|
||||
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s3-view-worker:
|
||||
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s3-view-worker:
|
||||
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
|
||||
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
|
||||
SELECT count(*) AS count FROM test_table_1300014 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
SELECT count(*) AS count FROM test_table_1300014 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s3-rollback:
|
||||
step s3-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
|
||||
starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select-router s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-cache-connections:
|
||||
|
||||
step s1-cache-connections:
|
||||
SET citus.max_cached_conns_per_worker TO 4;
|
||||
SET citus.force_max_query_parallelization TO on;
|
||||
UPDATE test_table SET column2 = 0;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s3-begin:
|
||||
step s3-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-select-router:
|
||||
step s1-select-router:
|
||||
SELECT count(*) FROM test_table WHERE column1 = 55;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
step s2-sleep:
|
||||
0
|
||||
step s2-sleep:
|
||||
SELECT pg_sleep(0.5);
|
||||
|
||||
pg_sleep
|
||||
pg_sleep
|
||||
|
||||
|
||||
step s2-view-dist:
|
||||
|
||||
step s2-view-dist:
|
||||
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
|
||||
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
|
||||
|
||||
SELECT count(*) FROM test_table WHERE column1 = 55;
|
||||
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s3-view-worker:
|
||||
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s3-view-worker:
|
||||
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
|
||||
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
|
||||
|
||||
SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
|
||||
step s2-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s3-rollback:
|
||||
step s3-rollback:
|
||||
ROLLBACK;
|
||||
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
Parsed test spec with 1 sessions
|
||||
|
||||
starting permutation: s1a
|
||||
step s1a:
|
||||
step s1a:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
?column?
|
||||
1
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
|
|
|
@ -3,17 +3,17 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s1-begin s1-insert s2-update s1-commit
|
||||
master_create_worker_shards
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-insert:
|
||||
step s1-insert:
|
||||
INSERT INTO test_concurrent_dml VALUES(1);
|
||||
|
||||
step s2-update:
|
||||
step s2-update:
|
||||
UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-update: <... completed>
|
||||
|
@ -21,28 +21,28 @@ step s2-update: <... completed>
|
|||
starting permutation: s1-insert s2-update
|
||||
master_create_worker_shards
|
||||
|
||||
|
||||
step s1-insert:
|
||||
|
||||
step s1-insert:
|
||||
INSERT INTO test_concurrent_dml VALUES(1);
|
||||
|
||||
step s2-update:
|
||||
step s2-update:
|
||||
UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-multi-insert s2-update s1-commit
|
||||
master_create_worker_shards
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-multi-insert:
|
||||
step s1-multi-insert:
|
||||
INSERT INTO test_concurrent_dml VALUES (1), (2);
|
||||
|
||||
step s2-update:
|
||||
step s2-update:
|
||||
UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-update: <... completed>
|
||||
|
@ -50,39 +50,39 @@ step s2-update: <... completed>
|
|||
starting permutation: s1-begin s1-multi-insert s2-multi-insert-overlap s1-commit
|
||||
master_create_worker_shards
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-multi-insert:
|
||||
step s1-multi-insert:
|
||||
INSERT INTO test_concurrent_dml VALUES (1), (2);
|
||||
|
||||
step s2-multi-insert-overlap:
|
||||
step s2-multi-insert-overlap:
|
||||
INSERT INTO test_concurrent_dml VALUES (1), (4);
|
||||
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-multi-insert s2-multi-insert s1-commit s2-commit
|
||||
master_create_worker_shards
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-multi-insert:
|
||||
step s1-multi-insert:
|
||||
INSERT INTO test_concurrent_dml VALUES (1), (2);
|
||||
|
||||
step s2-multi-insert:
|
||||
step s2-multi-insert:
|
||||
INSERT INTO test_concurrent_dml VALUES (3), (4);
|
||||
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
|
|
@ -1,50 +1,50 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-load-cache s2-load-cache s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit
|
||||
step s1-load-cache:
|
||||
step s1-load-cache:
|
||||
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
|
||||
|
||||
step s2-load-cache:
|
||||
step s2-load-cache:
|
||||
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
|
||||
|
||||
step s2-set-placement-inactive:
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-repair-placement:
|
||||
|
||||
step s1-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-repair-placement: <... completed>
|
||||
error in steps s2-commit s1-repair-placement: ERROR: target placement must be in inactive state
|
||||
|
||||
starting permutation: s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit
|
||||
step s2-set-placement-inactive:
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-repair-placement:
|
||||
|
||||
step s1-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-repair-placement: <... completed>
|
||||
|
|
|
@ -1,508 +1,508 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content
|
||||
step s1-load-cache:
|
||||
step s1-load-cache:
|
||||
TRUNCATE test_copy_placement_vs_modification;
|
||||
|
||||
step s1-insert:
|
||||
step s1-insert:
|
||||
INSERT INTO test_copy_placement_vs_modification VALUES (5, 10);
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
1
|
||||
step s2-set-placement-inactive:
|
||||
1
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-update:
|
||||
|
||||
step s1-update:
|
||||
UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-update: <... completed>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
|
||||
starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content
|
||||
step s1-load-cache:
|
||||
step s1-load-cache:
|
||||
TRUNCATE test_copy_placement_vs_modification;
|
||||
|
||||
step s1-insert:
|
||||
step s1-insert:
|
||||
INSERT INTO test_copy_placement_vs_modification VALUES (5, 10);
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
1
|
||||
step s2-set-placement-inactive:
|
||||
1
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-delete:
|
||||
|
||||
step s1-delete:
|
||||
DELETE FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-delete: <... completed>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t
|
||||
57638 t
|
||||
57637 t
|
||||
57638 t
|
||||
|
||||
starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content
|
||||
step s1-load-cache:
|
||||
step s1-load-cache:
|
||||
TRUNCATE test_copy_placement_vs_modification;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-insert:
|
||||
|
||||
step s1-insert:
|
||||
INSERT INTO test_copy_placement_vs_modification VALUES (5, 10);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-insert: <... completed>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 10
|
||||
57638 t 10
|
||||
57637 t 10
|
||||
57638 t 10
|
||||
|
||||
starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content
|
||||
step s1-load-cache:
|
||||
step s1-load-cache:
|
||||
TRUNCATE test_copy_placement_vs_modification;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-copy:
|
||||
|
||||
step s1-copy:
|
||||
COPY test_copy_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-copy: <... completed>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
|
||||
starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count
|
||||
step s1-load-cache:
|
||||
step s1-load-cache:
|
||||
TRUNCATE test_copy_placement_vs_modification;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-ddl:
|
||||
|
||||
step s1-ddl:
|
||||
CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-ddl: <... completed>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_copy_placement_vs_modification', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57638 t 1
|
||||
|
||||
starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content
|
||||
step s1-insert:
|
||||
step s1-insert:
|
||||
INSERT INTO test_copy_placement_vs_modification VALUES (5, 10);
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
1
|
||||
step s2-set-placement-inactive:
|
||||
1
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-update:
|
||||
|
||||
step s1-update:
|
||||
UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-update: <... completed>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
|
||||
starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content
|
||||
step s1-insert:
|
||||
step s1-insert:
|
||||
INSERT INTO test_copy_placement_vs_modification VALUES (5, 10);
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
1
|
||||
step s2-set-placement-inactive:
|
||||
1
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-delete:
|
||||
|
||||
step s1-delete:
|
||||
DELETE FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-delete: <... completed>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t
|
||||
57638 t
|
||||
57637 t
|
||||
57638 t
|
||||
|
||||
starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-insert:
|
||||
|
||||
step s1-insert:
|
||||
INSERT INTO test_copy_placement_vs_modification VALUES (5, 10);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-insert: <... completed>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 10
|
||||
57638 t 10
|
||||
57637 t 10
|
||||
57638 t 10
|
||||
|
||||
starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-copy:
|
||||
|
||||
step s1-copy:
|
||||
COPY test_copy_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-copy: <... completed>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
step s2-print-content:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5')
|
||||
WHERE
|
||||
shardid IN (SELECT * FROM selected_shard)
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
57637 t 5
|
||||
57638 t 5
|
||||
|
||||
starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET LOCAL citus.select_opens_transaction_block TO off;
|
||||
|
||||
step s1-select:
|
||||
step s1-select:
|
||||
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
0
|
||||
step s2-set-placement-inactive:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair-placement:
|
||||
step s2-repair-placement:
|
||||
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-ddl:
|
||||
|
||||
step s1-ddl:
|
||||
CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-ddl: <... completed>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
step s2-print-index-count:
|
||||
SELECT
|
||||
nodeport, success, result
|
||||
FROM
|
||||
run_command_on_placements('test_copy_placement_vs_modification', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
|
||||
ORDER BY
|
||||
nodeport;
|
||||
|
||||
nodeport success result
|
||||
nodeport success result
|
||||
|
||||
57637 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57638 t 1
|
||||
57637 t 1
|
||||
57637 t 1
|
||||
57638 t 1
|
||||
57638 t 1
|
||||
|
|
|
@ -1,192 +1,192 @@
|
|||
Parsed test spec with 3 sessions
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-copy:
|
||||
|
||||
step s1-copy:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-copy:
|
||||
|
||||
step s2-copy:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 8, 80 && echo 9, 90''WITH CSV');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-commit-worker:
|
||||
|
||||
step s1-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-commit-worker:
|
||||
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-select-count:
|
||||
|
||||
step s3-select-count:
|
||||
SELECT COUNT(*) FROM copy_table;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
11
|
||||
11
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s3-select-count
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-copy:
|
||||
|
||||
step s1-copy:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-coordinator-drop:
|
||||
step s2-coordinator-drop:
|
||||
DROP TABLE copy_table;
|
||||
<waiting ...>
|
||||
step s1-commit-worker:
|
||||
step s1-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
step s2-coordinator-drop: <... completed>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-stop-connection:
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-select-count:
|
||||
|
||||
step s3-select-count:
|
||||
SELECT COUNT(*) FROM copy_table;
|
||||
|
||||
ERROR: relation "copy_table" does not exist
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-copy:
|
||||
|
||||
step s1-copy:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY copy_table FROM PROGRAM ''echo 5, 50 && echo 6, 60 && echo 7, 70''WITH CSV');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-select-for-update:
|
||||
|
||||
step s2-select-for-update:
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM copy_table WHERE id=5 FOR UPDATE');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-commit-worker:
|
||||
|
||||
step s1-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-commit-worker:
|
||||
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-select-count:
|
||||
|
||||
step s3-select-count:
|
||||
SELECT COUNT(*) FROM copy_table;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
8
|
||||
8
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,102 +1,102 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-create_distributed_table s2-create_distributed_table s1-commit s2-commit
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-create_distributed_table:
|
||||
step s1-create_distributed_table:
|
||||
SELECT create_distributed_table('table_to_distribute', 'id');
|
||||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-create_distributed_table:
|
||||
|
||||
step s2-create_distributed_table:
|
||||
SELECT create_distributed_table('table_to_distribute', 'id');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create_distributed_table: <... completed>
|
||||
error in steps s1-commit s2-create_distributed_table: ERROR: table "table_to_distribute" is already distributed
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-create_distributed_table s2-copy_to_local_table s1-commit s2-commit
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-create_distributed_table:
|
||||
step s1-create_distributed_table:
|
||||
SELECT create_distributed_table('table_to_distribute', 'id');
|
||||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-copy_to_local_table:
|
||||
|
||||
step s2-copy_to_local_table:
|
||||
COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8';
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-copy_to_local_table: <... completed>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s2-begin s2-copy_to_local_table s1-create_distributed_table s2-commit s1-commit
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-copy_to_local_table:
|
||||
step s2-copy_to_local_table:
|
||||
COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8';
|
||||
|
||||
step s1-create_distributed_table:
|
||||
step s1-create_distributed_table:
|
||||
SELECT create_distributed_table('table_to_distribute', 'id');
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-create_distributed_table: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-commit:
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-copy_to_local_table s1-begin s2-begin s1-create_distributed_table s2-create_distributed_table s1-commit s2-commit
|
||||
step s1-copy_to_local_table:
|
||||
step s1-copy_to_local_table:
|
||||
COPY table_to_distribute FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3 && echo 4 && echo 5 && echo 6 && echo 7 && echo 8';
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-create_distributed_table:
|
||||
step s1-create_distributed_table:
|
||||
SELECT create_distributed_table('table_to_distribute', 'id');
|
||||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-create_distributed_table:
|
||||
|
||||
step s2-create_distributed_table:
|
||||
SELECT create_distributed_table('table_to_distribute', 'id');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create_distributed_table: <... completed>
|
||||
error in steps s1-commit s2-create_distributed_table: ERROR: table "table_to_distribute" is already distributed
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
|
|
@ -3,252 +3,252 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s1-begin s1-create-distributed s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-create-distributed:
|
||||
step s1-create-distributed:
|
||||
CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_distributed_table('test_create_distributed_table', 'test_id');
|
||||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-create-restore:
|
||||
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-restore: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
|
||||
starting permutation: s1-begin s1-insert s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-insert:
|
||||
step s1-insert:
|
||||
INSERT INTO restore_table VALUES (1,'hello');
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-commit:
|
||||
1
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-modify-multiple s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-modify-multiple:
|
||||
step s1-modify-multiple:
|
||||
UPDATE restore_table SET data = 'world';
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-commit:
|
||||
1
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-ddl s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-ddl:
|
||||
step s1-ddl:
|
||||
ALTER TABLE restore_table ADD COLUMN x int;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-commit:
|
||||
1
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-copy s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-copy:
|
||||
step s1-copy:
|
||||
COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-commit:
|
||||
1
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-recover s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-recover:
|
||||
step s1-recover:
|
||||
SELECT recover_prepared_transactions();
|
||||
|
||||
recover_prepared_transactions
|
||||
|
||||
0
|
||||
step s2-create-restore:
|
||||
0
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-restore: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
|
||||
starting permutation: s1-begin s1-drop s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-drop:
|
||||
step s1-drop:
|
||||
DROP TABLE restore_table;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-restore: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
|
||||
starting permutation: s1-begin s1-add-node s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-add-node:
|
||||
step s1-add-node:
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 9999);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-restore:
|
||||
1
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-restore: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
|
||||
starting permutation: s1-begin s1-remove-node s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-remove-node:
|
||||
step s1-remove-node:
|
||||
SELECT master_remove_node('localhost', 9999);
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
step s2-create-restore:
|
||||
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-restore: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
|
||||
starting permutation: s1-begin s1-create-restore s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-create-restore:
|
||||
step s1-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test-2');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-restore:
|
||||
1
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-restore: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
|
||||
starting permutation: s2-begin s2-create-restore s1-modify-multiple s2-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-modify-multiple:
|
||||
1
|
||||
step s1-modify-multiple:
|
||||
UPDATE restore_table SET data = 'world';
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-modify-multiple: <... completed>
|
||||
|
@ -256,20 +256,20 @@ step s1-modify-multiple: <... completed>
|
|||
starting permutation: s2-begin s2-create-restore s1-ddl s2-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-ddl:
|
||||
1
|
||||
step s1-ddl:
|
||||
ALTER TABLE restore_table ADD COLUMN x int;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-ddl: <... completed>
|
||||
|
@ -277,24 +277,24 @@ step s1-ddl: <... completed>
|
|||
starting permutation: s2-begin s2-create-restore s1-multi-statement s2-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-multi-statement:
|
||||
1
|
||||
step s1-multi-statement:
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
BEGIN;
|
||||
INSERT INTO restore_table VALUES (1,'hello');
|
||||
INSERT INTO restore_table VALUES (2,'hello');
|
||||
COMMIT;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-multi-statement: <... completed>
|
||||
|
@ -302,153 +302,153 @@ step s1-multi-statement: <... completed>
|
|||
starting permutation: s1-begin s1-create-reference s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-create-reference:
|
||||
step s1-create-reference:
|
||||
CREATE TABLE test_create_reference_table (test_id integer NOT NULL, data text);
|
||||
SELECT create_reference_table('test_create_reference_table');
|
||||
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s2-create-restore:
|
||||
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-restore: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
|
||||
starting permutation: s1-begin s1-insert-ref s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-insert-ref:
|
||||
step s1-insert-ref:
|
||||
INSERT INTO restore_ref_table VALUES (1,'hello');
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-commit:
|
||||
1
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-modify-multiple-ref s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-modify-multiple-ref:
|
||||
step s1-modify-multiple-ref:
|
||||
UPDATE restore_ref_table SET data = 'world';
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-commit:
|
||||
1
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-ddl-ref s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-ddl-ref:
|
||||
step s1-ddl-ref:
|
||||
ALTER TABLE restore_ref_table ADD COLUMN x int;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-restore: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
|
||||
starting permutation: s1-begin s1-copy-ref s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-copy-ref:
|
||||
step s1-copy-ref:
|
||||
COPY restore_ref_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-commit:
|
||||
1
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-drop-ref s2-create-restore s1-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
||||
step s1-drop-ref:
|
||||
step s1-drop-ref:
|
||||
DROP TABLE restore_ref_table;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-restore: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
1
|
||||
|
||||
starting permutation: s2-begin s2-create-restore s1-modify-multiple-ref s2-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-modify-multiple-ref:
|
||||
1
|
||||
step s1-modify-multiple-ref:
|
||||
UPDATE restore_ref_table SET data = 'world';
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-modify-multiple-ref: <... completed>
|
||||
|
@ -456,20 +456,20 @@ step s1-modify-multiple-ref: <... completed>
|
|||
starting permutation: s2-begin s2-create-restore s1-ddl-ref s2-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-ddl-ref:
|
||||
1
|
||||
step s1-ddl-ref:
|
||||
ALTER TABLE restore_ref_table ADD COLUMN x int;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-ddl-ref: <... completed>
|
||||
|
@ -477,24 +477,24 @@ step s1-ddl-ref: <... completed>
|
|||
starting permutation: s2-begin s2-create-restore s1-multi-statement-ref s2-commit
|
||||
create_reference_table
|
||||
|
||||
|
||||
step s2-begin:
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-restore:
|
||||
step s2-create-restore:
|
||||
SELECT 1 FROM citus_create_restore_point('citus-test');
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-multi-statement-ref:
|
||||
1
|
||||
step s1-multi-statement-ref:
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
BEGIN;
|
||||
INSERT INTO restore_ref_table VALUES (1,'hello');
|
||||
INSERT INTO restore_ref_table VALUES (2,'hello');
|
||||
COMMIT;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-multi-statement-ref: <... completed>
|
||||
|
|
|
@ -1,32 +1,32 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select
|
||||
node_name node_port
|
||||
node_name node_port
|
||||
|
||||
localhost 57637
|
||||
step s1-begin:
|
||||
localhost 57637
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-2:
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-table-1:
|
||||
1
|
||||
step s2-create-table-1:
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-table-1: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-show-placements:
|
||||
|
||||
step s1-show-placements:
|
||||
SELECT
|
||||
nodename, nodeport
|
||||
FROM
|
||||
|
@ -36,49 +36,49 @@ step s1-show-placements:
|
|||
ORDER BY
|
||||
nodename, nodeport;
|
||||
|
||||
nodename nodeport
|
||||
nodename nodeport
|
||||
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57638
|
||||
localhost 57638
|
||||
step s2-select:
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57638
|
||||
localhost 57638
|
||||
step s2-select:
|
||||
SELECT * FROM dist_table;
|
||||
|
||||
x y
|
||||
x y
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select
|
||||
node_name node_port
|
||||
node_name node_port
|
||||
|
||||
localhost 57637
|
||||
step s1-begin:
|
||||
localhost 57637
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-2:
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-table-1:
|
||||
1
|
||||
step s2-create-table-1:
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
<waiting ...>
|
||||
step s1-abort:
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-create-table-1: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-show-placements:
|
||||
|
||||
step s1-show-placements:
|
||||
SELECT
|
||||
nodename, nodeport
|
||||
FROM
|
||||
|
@ -88,29 +88,29 @@ step s1-show-placements:
|
|||
ORDER BY
|
||||
nodename, nodeport;
|
||||
|
||||
nodename nodeport
|
||||
nodename nodeport
|
||||
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
step s2-select:
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
step s2-select:
|
||||
SELECT * FROM dist_table;
|
||||
|
||||
x y
|
||||
x y
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-create-table-1 s1-add-node-2 s2-commit s1-show-placements s2-select
|
||||
node_name node_port
|
||||
node_name node_port
|
||||
|
||||
localhost 57637
|
||||
step s2-begin:
|
||||
localhost 57637
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-table-1:
|
||||
step s2-create-table-1:
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
|
@ -118,18 +118,18 @@ step s2-create-table-1:
|
|||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-add-node-2:
|
||||
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-node-2: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-show-placements:
|
||||
1
|
||||
step s1-show-placements:
|
||||
SELECT
|
||||
nodename, nodeport
|
||||
FROM
|
||||
|
@ -139,55 +139,55 @@ step s1-show-placements:
|
|||
ORDER BY
|
||||
nodename, nodeport;
|
||||
|
||||
nodename nodeport
|
||||
nodename nodeport
|
||||
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
step s2-select:
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
step s2-select:
|
||||
SELECT * FROM dist_table;
|
||||
|
||||
x y
|
||||
x y
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select
|
||||
node_name node_port
|
||||
node_name node_port
|
||||
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-2:
|
||||
step s1-remove-node-2:
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
step s2-create-table-1:
|
||||
|
||||
step s2-create-table-1:
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-table-1: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-show-placements:
|
||||
|
||||
step s1-show-placements:
|
||||
SELECT
|
||||
nodename, nodeport
|
||||
FROM
|
||||
|
@ -197,54 +197,54 @@ step s1-show-placements:
|
|||
ORDER BY
|
||||
nodename, nodeport;
|
||||
|
||||
nodename nodeport
|
||||
nodename nodeport
|
||||
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
step s2-select:
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
step s2-select:
|
||||
SELECT * FROM dist_table;
|
||||
|
||||
x y
|
||||
x y
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select
|
||||
node_name node_port
|
||||
node_name node_port
|
||||
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-2:
|
||||
step s1-remove-node-2:
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
step s2-create-table-1:
|
||||
|
||||
step s2-create-table-1:
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
<waiting ...>
|
||||
step s1-abort:
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-create-table-1: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-show-placements:
|
||||
|
||||
step s1-show-placements:
|
||||
SELECT
|
||||
nodename, nodeport
|
||||
FROM
|
||||
|
@ -254,36 +254,36 @@ step s1-show-placements:
|
|||
ORDER BY
|
||||
nodename, nodeport;
|
||||
|
||||
nodename nodeport
|
||||
nodename nodeport
|
||||
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57638
|
||||
localhost 57638
|
||||
step s2-select:
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57638
|
||||
localhost 57638
|
||||
step s2-select:
|
||||
SELECT * FROM dist_table;
|
||||
|
||||
x y
|
||||
x y
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-2 s2-begin s2-create-table-1 s1-remove-node-2 s2-commit s1-show-placements s2-select
|
||||
node_name node_port
|
||||
node_name node_port
|
||||
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-begin:
|
||||
1
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-table-1:
|
||||
step s2-create-table-1:
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
|
@ -291,16 +291,16 @@ step s2-create-table-1:
|
|||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-remove-node-2:
|
||||
|
||||
step s1-remove-node-2:
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-remove-node-2: <... completed>
|
||||
error in steps s2-commit s1-remove-node-2: ERROR: you cannot remove the primary node of a node group which has shard placements
|
||||
step s1-show-placements:
|
||||
step s1-show-placements:
|
||||
SELECT
|
||||
nodename, nodeport
|
||||
FROM
|
||||
|
@ -310,74 +310,74 @@ step s1-show-placements:
|
|||
ORDER BY
|
||||
nodename, nodeport;
|
||||
|
||||
nodename nodeport
|
||||
nodename nodeport
|
||||
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57638
|
||||
localhost 57638
|
||||
step s2-select:
|
||||
localhost 57637
|
||||
localhost 57637
|
||||
localhost 57638
|
||||
localhost 57638
|
||||
step s2-select:
|
||||
SELECT * FROM dist_table;
|
||||
|
||||
x y
|
||||
x y
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-2 s1-commit s2-select
|
||||
node_name node_port
|
||||
node_name node_port
|
||||
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-2:
|
||||
step s1-remove-node-2:
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
step s2-create-table-2:
|
||||
|
||||
step s2-create-table-2:
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-table-2: <... completed>
|
||||
error in steps s1-commit s2-create-table-2: ERROR: replication_factor (2) exceeds number of worker nodes (1)
|
||||
step s2-select:
|
||||
step s2-select:
|
||||
SELECT * FROM dist_table;
|
||||
|
||||
ERROR: relation "dist_table" does not exist
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-2 s2-begin s2-create-table-2 s1-remove-node-2 s2-commit s2-select
|
||||
node_name node_port
|
||||
node_name node_port
|
||||
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-begin:
|
||||
1
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-table-2:
|
||||
step s2-create-table-2:
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
|
@ -385,83 +385,83 @@ step s2-create-table-2:
|
|||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-remove-node-2:
|
||||
|
||||
step s1-remove-node-2:
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-remove-node-2: <... completed>
|
||||
error in steps s2-commit s1-remove-node-2: ERROR: you cannot remove the primary node of a node group which has shard placements
|
||||
step s2-select:
|
||||
step s2-select:
|
||||
SELECT * FROM dist_table;
|
||||
|
||||
x y
|
||||
x y
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-append-table s1-commit s2-select
|
||||
node_name node_port
|
||||
node_name node_port
|
||||
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-2:
|
||||
step s1-remove-node-2:
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
step s2-create-append-table:
|
||||
|
||||
step s2-create-append-table:
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x', 'append');
|
||||
SELECT 1 FROM master_create_empty_shard('dist_table');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-append-table: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-select:
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-select:
|
||||
SELECT * FROM dist_table;
|
||||
|
||||
x y
|
||||
x y
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-2 s2-begin s2-create-append-table s1-remove-node-2 s2-commit s2-select
|
||||
node_name node_port
|
||||
node_name node_port
|
||||
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
localhost 57637
|
||||
step s1-add-node-2:
|
||||
SELECT 1 FROM master_add_node('localhost', 57638);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-begin:
|
||||
1
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-append-table:
|
||||
step s2-create-append-table:
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE dist_table (x int, y int);
|
||||
SELECT create_distributed_table('dist_table', 'x', 'append');
|
||||
|
@ -469,25 +469,25 @@ step s2-create-append-table:
|
|||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-remove-node-2:
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-remove-node-2:
|
||||
SELECT * FROM master_remove_node('localhost', 57638);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-remove-node-2: <... completed>
|
||||
master_remove_node
|
||||
|
||||
|
||||
step s2-select:
|
||||
|
||||
step s2-select:
|
||||
SELECT * FROM dist_table;
|
||||
|
||||
x y
|
||||
x y
|
||||
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,151 +1,151 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s2-begin s2-copy s1-create_distributed_table s2-commit s2-select
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-copy:
|
||||
step s2-copy:
|
||||
COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
|
||||
step s1-create_distributed_table:
|
||||
step s1-create_distributed_table:
|
||||
SELECT create_distributed_table('migration_table', 'test_id');
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-create_distributed_table: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-select:
|
||||
|
||||
step s2-select:
|
||||
SELECT * FROM migration_table ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 hello
|
||||
1 hello
|
||||
|
||||
starting permutation: s1-begin s1-create_distributed_table s2-copy s1-commit s2-select
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-create_distributed_table:
|
||||
step s1-create_distributed_table:
|
||||
SELECT create_distributed_table('migration_table', 'test_id');
|
||||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-copy:
|
||||
|
||||
step s2-copy:
|
||||
COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-copy: <... completed>
|
||||
step s2-select:
|
||||
step s2-select:
|
||||
SELECT * FROM migration_table ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 hello
|
||||
1 hello
|
||||
|
||||
starting permutation: s2-begin s2-insert s1-create_distributed_table s2-commit s2-select
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-insert:
|
||||
step s2-insert:
|
||||
INSERT INTO migration_table VALUES (1, 'hello');
|
||||
|
||||
step s1-create_distributed_table:
|
||||
step s1-create_distributed_table:
|
||||
SELECT create_distributed_table('migration_table', 'test_id');
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-create_distributed_table: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-select:
|
||||
|
||||
step s2-select:
|
||||
SELECT * FROM migration_table ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 hello
|
||||
1 hello
|
||||
|
||||
starting permutation: s1-begin s1-create_distributed_table s2-insert s1-commit s2-select
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-create_distributed_table:
|
||||
step s1-create_distributed_table:
|
||||
SELECT create_distributed_table('migration_table', 'test_id');
|
||||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s2-insert:
|
||||
|
||||
step s2-insert:
|
||||
INSERT INTO migration_table VALUES (1, 'hello');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-insert: <... completed>
|
||||
step s2-select:
|
||||
step s2-select:
|
||||
SELECT * FROM migration_table ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 hello
|
||||
1 hello
|
||||
|
||||
starting permutation: s1-begin-serializable s2-copy s1-create_distributed_table s1-commit s2-select
|
||||
step s1-begin-serializable:
|
||||
step s1-begin-serializable:
|
||||
BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
SELECT 1;
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-copy:
|
||||
1
|
||||
step s2-copy:
|
||||
COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV;
|
||||
|
||||
step s1-create_distributed_table:
|
||||
step s1-create_distributed_table:
|
||||
SELECT create_distributed_table('migration_table', 'test_id');
|
||||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-commit:
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-select:
|
||||
step s2-select:
|
||||
SELECT * FROM migration_table ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 hello
|
||||
1 hello
|
||||
|
||||
starting permutation: s1-begin-serializable s2-insert s1-create_distributed_table s1-commit s2-select
|
||||
step s1-begin-serializable:
|
||||
step s1-begin-serializable:
|
||||
BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
|
||||
SELECT 1;
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-insert:
|
||||
1
|
||||
step s2-insert:
|
||||
INSERT INTO migration_table VALUES (1, 'hello');
|
||||
|
||||
step s1-create_distributed_table:
|
||||
step s1-create_distributed_table:
|
||||
SELECT create_distributed_table('migration_table', 'test_id');
|
||||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-commit:
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-select:
|
||||
step s2-select:
|
||||
SELECT * FROM migration_table ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 hello
|
||||
1 hello
|
||||
|
|
|
@ -3,7 +3,7 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-create-index s1-commit s2-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -20,12 +20,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-ddl-create-index-concurrently s1-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id);
|
||||
|
@ -40,12 +40,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-add-column s1-commit s2-commit s1-show-indexes s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -66,12 +66,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-rename-column s1-commit s2-commit s1-show-indexes s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -92,12 +92,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -118,12 +118,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0;
|
||||
|
@ -142,12 +142,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-add-column s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -163,12 +163,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -184,12 +184,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -210,12 +210,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column;
|
||||
|
@ -234,12 +234,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-add-column s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -255,12 +255,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -277,12 +277,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-table-size s1-commit s2-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -290,7 +290,7 @@ step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id);
|
|||
step s2-table-size: SELECT citus_total_relation_size('ddl_hash');
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s1-commit: COMMIT;
|
||||
step s2-commit: COMMIT;
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
|
||||
|
@ -300,12 +300,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-master-modify-multiple-shards s1-commit s2-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -321,12 +321,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-create-index s2-distribute-table s1-commit s2-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE ddl_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -338,7 +338,7 @@ step s1-commit: COMMIT;
|
|||
step s2-distribute-table: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-commit: COMMIT;
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
|
||||
run_command_on_workers
|
||||
|
@ -347,12 +347,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,4)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-table-size s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -362,7 +362,7 @@ step s1-commit: COMMIT;
|
|||
step s2-table-size: <... completed>
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s2-commit: COMMIT;
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
@ -371,12 +371,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -392,12 +392,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-add-column s2-distribute-table s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE ddl_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -409,7 +409,7 @@ step s1-commit: COMMIT;
|
|||
step s2-distribute-table: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-commit: COMMIT;
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
@ -418,12 +418,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-table-size s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -433,7 +433,7 @@ step s1-commit: COMMIT;
|
|||
step s2-table-size: <... completed>
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s2-commit: COMMIT;
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
@ -442,12 +442,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -463,12 +463,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-distribute-table s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE ddl_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -480,7 +480,7 @@ step s1-commit: COMMIT;
|
|||
step s2-distribute-table: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-commit: COMMIT;
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
@ -489,19 +489,19 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-create-index s1-commit s2-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('ddl_hash');
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id);
|
||||
step s1-commit: COMMIT;
|
||||
step s2-commit: COMMIT;
|
||||
|
@ -512,12 +512,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-create-index s1-commit s2-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -533,12 +533,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-create-index s1-commit s2-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE ddl_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -547,7 +547,7 @@ step s2-begin: BEGIN;
|
|||
step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id');
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-ddl-create-index: <... completed>
|
||||
|
@ -559,18 +559,18 @@ run_command_on_workers
|
|||
(localhost,57638,t,4)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-table-size s2-ddl-create-index-concurrently s1-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('ddl_hash');
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id);
|
||||
step s1-commit: COMMIT;
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
|
||||
|
@ -580,12 +580,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-ddl-create-index-concurrently s1-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-modify-multiple-shards: DELETE FROM ddl_hash;
|
||||
|
@ -599,12 +599,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-ddl-create-index-concurrently s1-commit s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE ddl_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -612,7 +612,7 @@ step s1-begin: BEGIN;
|
|||
step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id');
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-ddl-create-index-concurrently: <... completed>
|
||||
|
@ -623,19 +623,19 @@ run_command_on_workers
|
|||
(localhost,57638,t,4)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-add-column s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('ddl_hash');
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0;
|
||||
step s1-commit: COMMIT;
|
||||
step s2-commit: COMMIT;
|
||||
|
@ -646,12 +646,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-add-column s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -667,12 +667,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-add-column s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE ddl_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -681,7 +681,7 @@ step s2-begin: BEGIN;
|
|||
step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id');
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-ddl-add-column: <... completed>
|
||||
|
@ -693,19 +693,19 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-rename-column s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('ddl_hash');
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column;
|
||||
step s1-commit: COMMIT;
|
||||
step s2-commit: COMMIT;
|
||||
|
@ -716,12 +716,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-rename-column s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -737,12 +737,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-rename-column s1-commit s2-commit s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE ddl_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -751,7 +751,7 @@ step s2-begin: BEGIN;
|
|||
step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id');
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-ddl-rename-column: <... completed>
|
||||
|
@ -763,4 +763,4 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-delete s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -13,17 +13,17 @@ step s1-commit: COMMIT;
|
|||
step s2-delete: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-truncate s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -33,17 +33,17 @@ step s1-commit: COMMIT;
|
|||
step s2-truncate: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
0
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-drop s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -56,12 +56,12 @@ step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
|||
ERROR: relation "delete_hash" does not exist
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -71,9 +71,9 @@ step s1-commit: COMMIT;
|
|||
step s2-ddl-create-index: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -81,12 +81,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-delete s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX delete_hash_index ON delete_hash(id);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -97,9 +97,9 @@ step s1-commit: COMMIT;
|
|||
step s2-ddl-drop-index: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -107,12 +107,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,0)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-delete s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-delete: DELETE FROM delete_hash WHERE id = 4;
|
||||
|
@ -120,9 +120,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY delete_hash_ind
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-create-index-concurrently: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -130,12 +130,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -145,9 +145,9 @@ step s1-commit: COMMIT;
|
|||
step s2-ddl-add-column: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -155,12 +155,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-delete s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE delete_hash ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -171,9 +171,9 @@ step s1-commit: COMMIT;
|
|||
step s2-ddl-drop-column: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -181,12 +181,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -196,9 +196,9 @@ step s1-commit: COMMIT;
|
|||
step s2-ddl-rename-column: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -206,12 +206,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-table-size s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -219,21 +219,21 @@ step s1-delete: DELETE FROM delete_hash WHERE id = 4;
|
|||
step s2-table-size: SELECT citus_total_relation_size('delete_hash');
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s1-commit: COMMIT;
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-delete s2-distribute-table s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE delete_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE delete_hash(id integer, data text); COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -245,20 +245,20 @@ step s1-commit: COMMIT;
|
|||
step s2-distribute-table: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
8
|
||||
8
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-delete s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -268,17 +268,17 @@ step s1-commit: COMMIT;
|
|||
step s2-delete: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
0
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-delete s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -292,12 +292,12 @@ step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
|||
ERROR: relation "delete_hash" does not exist
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -307,9 +307,9 @@ step s1-commit: COMMIT;
|
|||
step s2-delete: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -317,12 +317,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,2)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX delete_hash_index ON delete_hash(id);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -333,9 +333,9 @@ step s1-commit: COMMIT;
|
|||
step s2-delete: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -343,12 +343,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,0)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -358,9 +358,9 @@ step s1-commit: COMMIT;
|
|||
step s2-delete: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -368,12 +368,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE delete_hash ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -384,9 +384,9 @@ step s1-commit: COMMIT;
|
|||
step s2-delete: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -394,12 +394,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -409,9 +409,9 @@ step s1-commit: COMMIT;
|
|||
step s2-delete: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -419,34 +419,34 @@ run_command_on_workers
|
|||
(localhost,57638,t,new_column)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-delete s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('delete_hash');
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s2-delete: DELETE FROM delete_hash WHERE id = 4;
|
||||
step s1-commit: COMMIT;
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
4
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-delete s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE delete_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE delete_hash(id integer, data text); COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -455,15 +455,15 @@ step s2-begin: BEGIN;
|
|||
step s1-distribute-table: SELECT create_distributed_table('delete_hash', 'id');
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-delete: DELETE FROM delete_hash WHERE id = 4; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-delete: <... completed>
|
||||
step s2-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
|
||||
count
|
||||
count
|
||||
|
||||
8
|
||||
8
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,544 +1,544 @@
|
|||
Parsed test spec with 3 sessions
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-delete:
|
||||
|
||||
step s1-delete:
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-insert:
|
||||
|
||||
step s2-insert:
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table VALUES (1, 1)');
|
||||
<waiting ...>
|
||||
step s1-rollback-worker:
|
||||
step s1-rollback-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
step s2-insert: <... completed>
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-commit-worker:
|
||||
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-display:
|
||||
|
||||
step s3-display:
|
||||
SELECT * FROM ref_table ORDER BY id, value;
|
||||
SELECT * FROM dist_table ORDER BY id, value;
|
||||
|
||||
id value
|
||||
id value
|
||||
|
||||
1 10
|
||||
2 20
|
||||
id value
|
||||
1 10
|
||||
2 20
|
||||
id value
|
||||
|
||||
1 1
|
||||
1 1
|
||||
2 2
|
||||
1 1
|
||||
1 1
|
||||
2 2
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-delete:
|
||||
|
||||
step s1-delete:
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-select:
|
||||
|
||||
step s2-select:
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id=1');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-commit-worker:
|
||||
|
||||
step s1-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-commit-worker:
|
||||
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-display:
|
||||
|
||||
step s3-display:
|
||||
SELECT * FROM ref_table ORDER BY id, value;
|
||||
SELECT * FROM dist_table ORDER BY id, value;
|
||||
|
||||
id value
|
||||
id value
|
||||
|
||||
2 20
|
||||
id value
|
||||
2 20
|
||||
id value
|
||||
|
||||
2 2
|
||||
2 2
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-delete:
|
||||
|
||||
step s1-delete:
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-insert-select:
|
||||
|
||||
step s2-insert-select:
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table SELECT * FROM dist_table');
|
||||
<waiting ...>
|
||||
step s1-rollback-worker:
|
||||
step s1-rollback-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
step s2-insert-select: <... completed>
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-commit-worker:
|
||||
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-display:
|
||||
|
||||
step s3-display:
|
||||
SELECT * FROM ref_table ORDER BY id, value;
|
||||
SELECT * FROM dist_table ORDER BY id, value;
|
||||
|
||||
id value
|
||||
id value
|
||||
|
||||
1 10
|
||||
2 20
|
||||
id value
|
||||
1 10
|
||||
2 20
|
||||
id value
|
||||
|
||||
1 1
|
||||
1 1
|
||||
2 2
|
||||
2 2
|
||||
1 1
|
||||
1 1
|
||||
2 2
|
||||
2 2
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-update:
|
||||
|
||||
step s1-update:
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-update:
|
||||
|
||||
step s2-update:
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE dist_table SET value=2 WHERE id=1');
|
||||
<waiting ...>
|
||||
step s1-commit-worker:
|
||||
step s1-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
step s2-update: <... completed>
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-commit-worker:
|
||||
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-display:
|
||||
|
||||
step s3-display:
|
||||
SELECT * FROM ref_table ORDER BY id, value;
|
||||
SELECT * FROM dist_table ORDER BY id, value;
|
||||
|
||||
id value
|
||||
id value
|
||||
|
||||
2 20
|
||||
3 10
|
||||
id value
|
||||
2 20
|
||||
3 10
|
||||
id value
|
||||
|
||||
1 2
|
||||
2 2
|
||||
1 2
|
||||
2 2
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-update:
|
||||
|
||||
step s1-update:
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-copy:
|
||||
|
||||
step s2-copy:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COPY dist_table FROM PROGRAM ''echo 1, 1''WITH CSV');
|
||||
<waiting ...>
|
||||
step s1-rollback-worker:
|
||||
step s1-rollback-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
step s2-copy: <... completed>
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-commit-worker:
|
||||
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-display:
|
||||
|
||||
step s3-display:
|
||||
SELECT * FROM ref_table ORDER BY id, value;
|
||||
SELECT * FROM dist_table ORDER BY id, value;
|
||||
|
||||
id value
|
||||
id value
|
||||
|
||||
1 10
|
||||
2 20
|
||||
id value
|
||||
1 10
|
||||
2 20
|
||||
id value
|
||||
|
||||
1 1
|
||||
1 1
|
||||
2 2
|
||||
1 1
|
||||
1 1
|
||||
2 2
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-update:
|
||||
|
||||
step s1-update:
|
||||
SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET id=id+2 WHERE id=1');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-truncate:
|
||||
|
||||
step s2-truncate:
|
||||
SELECT run_commands_on_session_level_connection_to_node('TRUNCATE dist_table');
|
||||
<waiting ...>
|
||||
step s1-commit-worker:
|
||||
step s1-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
step s2-truncate: <... completed>
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-commit-worker:
|
||||
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-display:
|
||||
|
||||
step s3-display:
|
||||
SELECT * FROM ref_table ORDER BY id, value;
|
||||
SELECT * FROM dist_table ORDER BY id, value;
|
||||
|
||||
id value
|
||||
id value
|
||||
|
||||
2 20
|
||||
3 10
|
||||
id value
|
||||
2 20
|
||||
3 10
|
||||
id value
|
||||
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select-for-udpate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-delete:
|
||||
|
||||
step s1-delete:
|
||||
SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE id=1');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-select-for-udpate:
|
||||
|
||||
step s2-select-for-udpate:
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id=1 FOR UPDATE');
|
||||
<waiting ...>
|
||||
step s1-commit-worker:
|
||||
step s1-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
step s2-select-for-udpate: <... completed>
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-commit-worker:
|
||||
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-display:
|
||||
|
||||
step s3-display:
|
||||
SELECT * FROM ref_table ORDER BY id, value;
|
||||
SELECT * FROM dist_table ORDER BY id, value;
|
||||
|
||||
id value
|
||||
id value
|
||||
|
||||
2 20
|
||||
id value
|
||||
2 20
|
||||
id value
|
||||
|
||||
2 2
|
||||
2 2
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,63 +1,63 @@
|
|||
Parsed test spec with 3 sessions
|
||||
|
||||
starting permutation: s1-begin s1-assign-transaction-id s1-get-all-transactions s2-begin s2-assign-transaction-id s2-get-all-transactions s3-begin s3-assign-transaction-id s3-get-all-transactions s1-commit s2-commit s3-commit
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-assign-transaction-id:
|
||||
step s1-assign-transaction-id:
|
||||
SELECT assign_distributed_transaction_id(1, 1, '2015-01-01 00:00:00+0');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
|
||||
|
||||
step s1-get-all-transactions:
|
||||
|
||||
step s1-get-all-transactions:
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3;
|
||||
|
||||
initiator_node_identifiertransaction_numbertransaction_stamp
|
||||
|
||||
1 1 Wed Dec 31 16:00:00 2014 PST
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-assign-transaction-id:
|
||||
step s2-assign-transaction-id:
|
||||
SELECT assign_distributed_transaction_id(2, 2, '2015-01-02 00:00:00+0');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
|
||||
|
||||
step s2-get-all-transactions:
|
||||
|
||||
step s2-get-all-transactions:
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3;
|
||||
|
||||
initiator_node_identifiertransaction_numbertransaction_stamp
|
||||
|
||||
2 2 Thu Jan 01 16:00:00 2015 PST
|
||||
step s3-begin:
|
||||
step s3-begin:
|
||||
BEGIN;
|
||||
|
||||
step s3-assign-transaction-id:
|
||||
step s3-assign-transaction-id:
|
||||
SELECT assign_distributed_transaction_id(3, 3, '2015-01-03 00:00:00+0');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
|
||||
|
||||
step s3-get-all-transactions:
|
||||
|
||||
step s3-get-all-transactions:
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3;
|
||||
|
||||
initiator_node_identifiertransaction_numbertransaction_stamp
|
||||
|
||||
3 3 Fri Jan 02 16:00:00 2015 PST
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s3-commit:
|
||||
step s3-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-create-table s1-begin s1-insert s1-verify-current-xact-is-on-worker s1-commit
|
||||
step s1-create-table:
|
||||
step s1-create-table:
|
||||
-- some tests also use distributed table
|
||||
CREATE TABLE distributed_transaction_id_table(some_value int, other_value int);
|
||||
SET citus.shard_count TO 4;
|
||||
|
@ -65,14 +65,14 @@ step s1-create-table:
|
|||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-insert:
|
||||
step s1-insert:
|
||||
INSERT INTO distributed_transaction_id_table VALUES (1, 1);
|
||||
|
||||
step s1-verify-current-xact-is-on-worker:
|
||||
step s1-verify-current-xact-is-on-worker:
|
||||
SELECT
|
||||
remote.nodeport,
|
||||
remote.result = row(xact.initiator_node_identifier, xact.transaction_number)::text AS xact_exists
|
||||
|
@ -84,39 +84,39 @@ step s1-verify-current-xact-is-on-worker:
|
|||
$$) as remote
|
||||
ORDER BY remote.nodeport ASC;
|
||||
|
||||
nodeport xact_exists
|
||||
nodeport xact_exists
|
||||
|
||||
57637 t
|
||||
57638 t
|
||||
step s1-commit:
|
||||
57637 t
|
||||
57638 t
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-assign-transaction-id s1-has-transaction-number s2-vacuum s1-has-transaction-number s1-commit
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-assign-transaction-id:
|
||||
step s1-assign-transaction-id:
|
||||
SELECT assign_distributed_transaction_id(1, 1, '2015-01-01 00:00:00+0');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
|
||||
|
||||
step s1-has-transaction-number:
|
||||
|
||||
step s1-has-transaction-number:
|
||||
SELECT transaction_number > 0 FROM get_current_transaction_id();
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
t
|
||||
step s2-vacuum:
|
||||
t
|
||||
step s2-vacuum:
|
||||
VACUUM FULL pg_dist_partition;
|
||||
|
||||
step s1-has-transaction-number:
|
||||
step s1-has-transaction-number:
|
||||
SELECT transaction_number > 0 FROM get_current_transaction_id();
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
t
|
||||
step s1-commit:
|
||||
t
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
|
|
|
@ -3,202 +3,202 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s2-invalidate-57637 s1-begin s1-insertone s2-repair s1-commit
|
||||
master_create_worker_shards
|
||||
|
||||
|
||||
step s2-invalidate-57637:
|
||||
|
||||
step s2-invalidate-57637:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-insertone:
|
||||
step s1-insertone:
|
||||
INSERT INTO test_dml_vs_repair VALUES(1, 1);
|
||||
|
||||
step s2-repair:
|
||||
step s2-repair:
|
||||
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-repair: <... completed>
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-insertone s2-invalidate-57637 s1-begin s1-insertall s2-repair s1-commit
|
||||
master_create_worker_shards
|
||||
|
||||
|
||||
step s1-insertone:
|
||||
|
||||
step s1-insertone:
|
||||
INSERT INTO test_dml_vs_repair VALUES(1, 1);
|
||||
|
||||
step s2-invalidate-57637:
|
||||
step s2-invalidate-57637:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-insertall:
|
||||
step s1-insertall:
|
||||
INSERT INTO test_dml_vs_repair SELECT test_id, data+1 FROM test_dml_vs_repair;
|
||||
|
||||
step s2-repair:
|
||||
step s2-repair:
|
||||
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637);
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-repair: <... completed>
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-invalidate-57637 s2-begin s2-repair s1-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display
|
||||
master_create_worker_shards
|
||||
|
||||
|
||||
step s2-invalidate-57637:
|
||||
|
||||
step s2-invalidate-57637:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair:
|
||||
step s2-repair:
|
||||
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-insertone:
|
||||
|
||||
step s1-insertone:
|
||||
INSERT INTO test_dml_vs_repair VALUES(1, 1);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-insertone: <... completed>
|
||||
step s2-invalidate-57638:
|
||||
step s2-invalidate-57638:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
|
||||
|
||||
step s1-display:
|
||||
step s1-display:
|
||||
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 1
|
||||
step s2-invalidate-57637:
|
||||
1 1
|
||||
step s2-invalidate-57637:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
|
||||
|
||||
step s2-revalidate-57638:
|
||||
step s2-revalidate-57638:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
|
||||
|
||||
step s1-display:
|
||||
step s1-display:
|
||||
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 1
|
||||
1 1
|
||||
|
||||
starting permutation: s2-invalidate-57637 s1-prepared-insertone s2-begin s2-repair s1-prepared-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display
|
||||
master_create_worker_shards
|
||||
|
||||
|
||||
step s2-invalidate-57637:
|
||||
|
||||
step s2-invalidate-57637:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
|
||||
|
||||
step s1-prepared-insertone:
|
||||
step s1-prepared-insertone:
|
||||
EXECUTE insertone;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair:
|
||||
step s2-repair:
|
||||
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-prepared-insertone:
|
||||
|
||||
step s1-prepared-insertone:
|
||||
EXECUTE insertone;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-prepared-insertone: <... completed>
|
||||
step s2-invalidate-57638:
|
||||
step s2-invalidate-57638:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
|
||||
|
||||
step s1-display:
|
||||
step s1-display:
|
||||
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 1
|
||||
1 1
|
||||
step s2-invalidate-57637:
|
||||
1 1
|
||||
1 1
|
||||
step s2-invalidate-57637:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
|
||||
|
||||
step s2-revalidate-57638:
|
||||
step s2-revalidate-57638:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
|
||||
|
||||
step s1-display:
|
||||
step s1-display:
|
||||
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 1
|
||||
1 1
|
||||
1 1
|
||||
1 1
|
||||
|
||||
starting permutation: s2-invalidate-57637 s1-insertone s1-prepared-insertall s2-begin s2-repair s1-prepared-insertall s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display
|
||||
master_create_worker_shards
|
||||
|
||||
|
||||
step s2-invalidate-57637:
|
||||
|
||||
step s2-invalidate-57637:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
|
||||
|
||||
step s1-insertone:
|
||||
step s1-insertone:
|
||||
INSERT INTO test_dml_vs_repair VALUES(1, 1);
|
||||
|
||||
step s1-prepared-insertall:
|
||||
step s1-prepared-insertall:
|
||||
EXECUTE insertall;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-repair:
|
||||
step s2-repair:
|
||||
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637);
|
||||
|
||||
master_copy_shard_placement
|
||||
|
||||
|
||||
step s1-prepared-insertall:
|
||||
|
||||
step s1-prepared-insertall:
|
||||
EXECUTE insertall;
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-prepared-insertall: <... completed>
|
||||
step s2-invalidate-57638:
|
||||
step s2-invalidate-57638:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
|
||||
|
||||
step s1-display:
|
||||
step s1-display:
|
||||
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 1
|
||||
1 2
|
||||
1 2
|
||||
1 3
|
||||
step s2-invalidate-57637:
|
||||
1 1
|
||||
1 2
|
||||
1 2
|
||||
1 3
|
||||
step s2-invalidate-57637:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
|
||||
|
||||
step s2-revalidate-57638:
|
||||
step s2-revalidate-57638:
|
||||
UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
|
||||
|
||||
step s1-display:
|
||||
step s1-display:
|
||||
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
|
||||
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
1 1
|
||||
1 2
|
||||
1 2
|
||||
1 3
|
||||
1 1
|
||||
1 2
|
||||
1 2
|
||||
1 3
|
||||
|
|
|
@ -1,218 +1,218 @@
|
|||
Parsed test spec with 3 sessions
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-alter s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-insert:
|
||||
|
||||
step s1-insert:
|
||||
SELECT run_commands_on_session_level_connection_to_node('INSERT INTO dist_table VALUES(5, 55)');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-alter:
|
||||
|
||||
step s2-alter:
|
||||
ALTER TABLE dist_table DROP value;
|
||||
<waiting ...>
|
||||
step s1-commit-worker:
|
||||
step s1-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
step s2-alter: <... completed>
|
||||
step s2-commit-worker:
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s3-select-count:
|
||||
|
||||
step s3-select-count:
|
||||
SELECT COUNT(*) FROM dist_table;
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
6
|
||||
6
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-index s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit s2-commit-worker s2-stop-connection
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-index:
|
||||
step s1-index:
|
||||
CREATE INDEX dist_table_index ON dist_table (id);
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-select-for-update:
|
||||
|
||||
step s2-select-for-update:
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-commit:
|
||||
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-commit-worker:
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-select-for-update:
|
||||
|
||||
step s1-select-for-update:
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
|
||||
step s2-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57638);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-begin-on-worker:
|
||||
|
||||
step s2-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-select-for-update:
|
||||
|
||||
step s2-select-for-update:
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE');
|
||||
<waiting ...>
|
||||
step s1-commit-worker:
|
||||
step s1-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
step s2-select-for-update: <... completed>
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-commit-worker:
|
||||
|
||||
step s2-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-stop-connection:
|
||||
|
||||
step s2-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection
|
||||
step s1-start-session-level-connection:
|
||||
step s1-start-session-level-connection:
|
||||
SELECT start_session_level_connection_to_node('localhost', 57637);
|
||||
|
||||
start_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-begin-on-worker:
|
||||
|
||||
step s1-begin-on-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('BEGIN');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-select-for-update:
|
||||
|
||||
step s1-select-for-update:
|
||||
SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM dist_table WHERE id = 5 FOR UPDATE');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s2-coordinator-create-index-concurrently:
|
||||
|
||||
step s2-coordinator-create-index-concurrently:
|
||||
CREATE INDEX CONCURRENTLY dist_table_index_conc ON dist_table(id);
|
||||
|
||||
step s1-commit-worker:
|
||||
step s1-commit-worker:
|
||||
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
|
||||
|
||||
run_commands_on_session_level_connection_to_node
|
||||
|
||||
|
||||
step s1-stop-connection:
|
||||
|
||||
step s1-stop-connection:
|
||||
SELECT stop_session_level_connection_to_node();
|
||||
|
||||
stop_session_level_connection_to_node
|
||||
|
||||
|
||||
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,242 +1,242 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s1-drop-all-shards s2-truncate s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-drop-all-shards:
|
||||
step s1-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
|
||||
master_drop_all_shards
|
||||
|
||||
16
|
||||
step s2-truncate:
|
||||
16
|
||||
step s2-truncate:
|
||||
TRUNCATE append_table;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-truncate: <... completed>
|
||||
|
||||
starting permutation: s1-begin s1-drop-all-shards s2-apply-delete-command s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-drop-all-shards:
|
||||
step s1-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
|
||||
master_drop_all_shards
|
||||
|
||||
16
|
||||
step s2-apply-delete-command:
|
||||
16
|
||||
step s2-apply-delete-command:
|
||||
SELECT master_apply_delete_command('DELETE FROM append_table');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-apply-delete-command: <... completed>
|
||||
master_apply_delete_command
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-begin s1-drop-all-shards s2-drop-all-shards s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-drop-all-shards:
|
||||
step s1-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
|
||||
master_drop_all_shards
|
||||
|
||||
16
|
||||
step s2-drop-all-shards:
|
||||
16
|
||||
step s2-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-begin s1-drop-all-shards s2-select s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-drop-all-shards:
|
||||
step s1-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
|
||||
master_drop_all_shards
|
||||
|
||||
16
|
||||
step s2-select:
|
||||
16
|
||||
step s2-select:
|
||||
SELECT * FROM append_table;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-select: <... completed>
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-apply-delete-command s2-truncate s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-apply-delete-command:
|
||||
step s1-apply-delete-command:
|
||||
SELECT master_apply_delete_command('DELETE FROM append_table');
|
||||
|
||||
master_apply_delete_command
|
||||
|
||||
16
|
||||
step s2-truncate:
|
||||
16
|
||||
step s2-truncate:
|
||||
TRUNCATE append_table;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-truncate: <... completed>
|
||||
|
||||
starting permutation: s1-begin s1-apply-delete-command s2-apply-delete-command s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-apply-delete-command:
|
||||
step s1-apply-delete-command:
|
||||
SELECT master_apply_delete_command('DELETE FROM append_table');
|
||||
|
||||
master_apply_delete_command
|
||||
|
||||
16
|
||||
step s2-apply-delete-command:
|
||||
16
|
||||
step s2-apply-delete-command:
|
||||
SELECT master_apply_delete_command('DELETE FROM append_table');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-apply-delete-command: <... completed>
|
||||
master_apply_delete_command
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-begin s1-apply-delete-command s2-drop-all-shards s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-apply-delete-command:
|
||||
step s1-apply-delete-command:
|
||||
SELECT master_apply_delete_command('DELETE FROM append_table');
|
||||
|
||||
master_apply_delete_command
|
||||
|
||||
16
|
||||
step s2-drop-all-shards:
|
||||
16
|
||||
step s2-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-begin s1-truncate s2-truncate s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-truncate:
|
||||
step s1-truncate:
|
||||
TRUNCATE append_table;
|
||||
|
||||
step s2-truncate:
|
||||
step s2-truncate:
|
||||
TRUNCATE append_table;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-truncate: <... completed>
|
||||
|
||||
starting permutation: s1-begin s1-truncate s2-apply-delete-command s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-truncate:
|
||||
step s1-truncate:
|
||||
TRUNCATE append_table;
|
||||
|
||||
step s2-apply-delete-command:
|
||||
step s2-apply-delete-command:
|
||||
SELECT master_apply_delete_command('DELETE FROM append_table');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-apply-delete-command: <... completed>
|
||||
master_apply_delete_command
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-begin s1-truncate s2-drop-all-shards s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-truncate:
|
||||
step s1-truncate:
|
||||
TRUNCATE append_table;
|
||||
|
||||
step s2-drop-all-shards:
|
||||
step s2-drop-all-shards:
|
||||
SELECT master_drop_all_shards('append_table', 'public', 'append_table');
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-begin s1-truncate s2-select s1-commit
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-truncate:
|
||||
step s1-truncate:
|
||||
TRUNCATE append_table;
|
||||
|
||||
step s2-select:
|
||||
step s2-select:
|
||||
SELECT * FROM append_table;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-select: <... completed>
|
||||
test_id data
|
||||
test_id data
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-drop s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -17,12 +17,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash;
|
|||
ERROR: relation "drop_hash" does not exist
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -41,12 +41,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,0)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-drop s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -66,12 +66,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,0)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-drop s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-drop: DROP TABLE drop_hash;
|
||||
|
@ -88,12 +88,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,0)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -112,12 +112,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-drop s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -137,12 +137,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -161,12 +161,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-table-size s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -180,12 +180,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash;
|
|||
ERROR: relation "drop_hash" does not exist
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-drop s2-distribute-table s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE drop_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -201,12 +201,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash;
|
|||
ERROR: relation "drop_hash" does not exist
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -224,12 +224,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,0)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -248,12 +248,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,0)
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -271,12 +271,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -295,12 +295,12 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -318,19 +318,19 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-drop s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('drop_hash');
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s2-drop: DROP TABLE drop_hash;
|
||||
step s1-commit: COMMIT;
|
||||
step s2-commit: COMMIT;
|
||||
|
@ -338,12 +338,12 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash;
|
|||
ERROR: relation "drop_hash" does not exist
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-drop s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE drop_hash;
|
||||
step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
|
@ -352,7 +352,7 @@ step s2-begin: BEGIN;
|
|||
step s1-distribute-table: SELECT create_distributed_table('drop_hash', 'id');
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-drop: DROP TABLE drop_hash; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-drop: <... completed>
|
||||
|
@ -361,4 +361,4 @@ step s1-select-count: SELECT COUNT(*) FROM drop_hash;
|
|||
ERROR: relation "drop_hash" does not exist
|
||||
restore_isolation_tester_func
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
Parsed test spec with 4 sessions
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-update:
|
||||
step s1-update:
|
||||
UPDATE distributed_table SET y = 1 WHERE x = 1;
|
||||
|
||||
step s2-update:
|
||||
step s2-update:
|
||||
UPDATE distributed_table SET y = 2 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step detector-dump-wait-edges:
|
||||
step detector-dump-wait-edges:
|
||||
SELECT
|
||||
waiting_transaction_num,
|
||||
blocking_transaction_num,
|
||||
|
@ -28,39 +28,39 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
|
||||
|
||||
357 356 f
|
||||
357 356 f
|
||||
transactionnumberwaitingtransactionnumbers
|
||||
|
||||
356
|
||||
357 356
|
||||
step s1-abort:
|
||||
356
|
||||
357 356
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-update: <... completed>
|
||||
step s2-abort:
|
||||
step s2-abort:
|
||||
ABORT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s3-begin:
|
||||
step s3-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-update:
|
||||
step s1-update:
|
||||
UPDATE distributed_table SET y = 1 WHERE x = 1;
|
||||
|
||||
step s2-update:
|
||||
step s2-update:
|
||||
UPDATE distributed_table SET y = 2 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step s3-update:
|
||||
step s3-update:
|
||||
UPDATE distributed_table SET y = 3 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step detector-dump-wait-edges:
|
||||
step detector-dump-wait-edges:
|
||||
SELECT
|
||||
waiting_transaction_num,
|
||||
blocking_transaction_num,
|
||||
|
@ -75,22 +75,22 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_transaction_numblocking_transaction_numblocking_transaction_waiting
|
||||
|
||||
361 360 f
|
||||
362 360 f
|
||||
362 361 t
|
||||
361 360 f
|
||||
362 360 f
|
||||
362 361 t
|
||||
transactionnumberwaitingtransactionnumbers
|
||||
|
||||
360
|
||||
361 360
|
||||
362 360,361
|
||||
step s1-abort:
|
||||
360
|
||||
361 360
|
||||
362 360,361
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-update: <... completed>
|
||||
step s2-abort:
|
||||
step s2-abort:
|
||||
ABORT;
|
||||
|
||||
step s3-update: <... completed>
|
||||
step s3-abort:
|
||||
step s3-abort:
|
||||
ABORT;
|
||||
|
||||
|
|
|
@ -1,27 +1,27 @@
|
|||
Parsed test spec with 4 sessions
|
||||
|
||||
starting permutation: dist11-begin dist13-begin dist11-update dist13-update detector-dump-wait-edges dist11-abort dist13-abort
|
||||
step dist11-begin:
|
||||
step dist11-begin:
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(11, 1, '2017-01-01 00:00:00+0');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
|
||||
|
||||
step dist13-begin:
|
||||
|
||||
step dist13-begin:
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
|
||||
|
||||
step dist11-update:
|
||||
|
||||
step dist11-update:
|
||||
UPDATE local_table SET y = 1 WHERE x = 1;
|
||||
|
||||
step dist13-update:
|
||||
step dist13-update:
|
||||
UPDATE local_table SET y = 3 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step detector-dump-wait-edges:
|
||||
step detector-dump-wait-edges:
|
||||
SELECT
|
||||
waiting_node_id,
|
||||
waiting_transaction_num,
|
||||
|
@ -37,33 +37,33 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting
|
||||
|
||||
13 1 11 1 f
|
||||
step dist11-abort:
|
||||
13 1 11 1 f
|
||||
step dist11-abort:
|
||||
ABORT;
|
||||
|
||||
step dist13-update: <... completed>
|
||||
step dist13-abort:
|
||||
step dist13-abort:
|
||||
ABORT;
|
||||
|
||||
|
||||
starting permutation: local-begin dist13-begin local-update dist13-update detector-dump-wait-edges local-abort dist13-abort
|
||||
step local-begin:
|
||||
step local-begin:
|
||||
BEGIN;
|
||||
|
||||
step dist13-begin:
|
||||
step dist13-begin:
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
|
||||
|
||||
step local-update:
|
||||
|
||||
step local-update:
|
||||
UPDATE local_table SET y = 2 WHERE x = 1;
|
||||
|
||||
step dist13-update:
|
||||
step dist13-update:
|
||||
UPDATE local_table SET y = 3 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step detector-dump-wait-edges:
|
||||
step detector-dump-wait-edges:
|
||||
SELECT
|
||||
waiting_node_id,
|
||||
waiting_transaction_num,
|
||||
|
@ -79,43 +79,43 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting
|
||||
|
||||
13 1 0 f
|
||||
step local-abort:
|
||||
13 1 0 f
|
||||
step local-abort:
|
||||
ABORT;
|
||||
|
||||
step dist13-update: <... completed>
|
||||
step dist13-abort:
|
||||
step dist13-abort:
|
||||
ABORT;
|
||||
|
||||
|
||||
starting permutation: dist11-begin local-begin dist13-begin dist11-update local-update dist13-update detector-dump-wait-edges dist11-abort local-abort dist13-abort
|
||||
step dist11-begin:
|
||||
step dist11-begin:
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(11, 1, '2017-01-01 00:00:00+0');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
|
||||
|
||||
step local-begin:
|
||||
|
||||
step local-begin:
|
||||
BEGIN;
|
||||
|
||||
step dist13-begin:
|
||||
step dist13-begin:
|
||||
BEGIN;
|
||||
SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0');
|
||||
|
||||
assign_distributed_transaction_id
|
||||
|
||||
|
||||
step dist11-update:
|
||||
|
||||
step dist11-update:
|
||||
UPDATE local_table SET y = 1 WHERE x = 1;
|
||||
|
||||
step local-update:
|
||||
step local-update:
|
||||
UPDATE local_table SET y = 2 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step dist13-update:
|
||||
step dist13-update:
|
||||
UPDATE local_table SET y = 3 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step detector-dump-wait-edges:
|
||||
step detector-dump-wait-edges:
|
||||
SELECT
|
||||
waiting_node_id,
|
||||
waiting_transaction_num,
|
||||
|
@ -131,16 +131,16 @@ step detector-dump-wait-edges:
|
|||
|
||||
waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting
|
||||
|
||||
0 11 1 f
|
||||
13 1 0 t
|
||||
step dist11-abort:
|
||||
0 11 1 f
|
||||
13 1 0 t
|
||||
step dist11-abort:
|
||||
ABORT;
|
||||
|
||||
step local-update: <... completed>
|
||||
step local-abort:
|
||||
step local-abort:
|
||||
ABORT;
|
||||
|
||||
step dist13-update: <... completed>
|
||||
step dist13-abort:
|
||||
step dist13-abort:
|
||||
ABORT;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,35 +1,35 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-create-extension-version-11 s1-commit s1-print
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-extension-version-11:
|
||||
1
|
||||
step s2-create-extension-version-11:
|
||||
CREATE extension seg VERSION "1.1";
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-extension-version-11: <... completed>
|
||||
step s1-print:
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
1
|
||||
extname extversion nspname
|
||||
1
|
||||
extname extversion nspname
|
||||
|
||||
seg 1.1 public
|
||||
seg 1.1 public
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57637,t,seg)
|
||||
|
@ -44,39 +44,39 @@ run_command_on_workers
|
|||
(localhost,57638,t,public)
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-alter-extension-update-to-version-12 s1-commit s1-print
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-alter-extension-update-to-version-12:
|
||||
1
|
||||
step s2-alter-extension-update-to-version-12:
|
||||
ALTER extension seg update to "1.2";
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-alter-extension-update-to-version-12: <... completed>
|
||||
step s1-print:
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
1
|
||||
extname extversion nspname
|
||||
1
|
||||
extname extversion nspname
|
||||
|
||||
seg 1.2 public
|
||||
seg 1.2 public
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57637,t,seg)
|
||||
|
@ -91,43 +91,43 @@ run_command_on_workers
|
|||
(localhost,57638,t,public)
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-drop-extension s1-commit s1-print
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-begin:
|
||||
1
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-1:
|
||||
step s1-remove-node-1:
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-drop-extension:
|
||||
1
|
||||
step s2-drop-extension:
|
||||
drop extension seg;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-drop-extension: <... completed>
|
||||
step s1-print:
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
extname extversion nspname
|
||||
0
|
||||
extname extversion nspname
|
||||
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -140,38 +140,38 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-create-extension-with-schema1 s1-commit s1-print
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-extension-with-schema1:
|
||||
1
|
||||
step s2-create-extension-with-schema1:
|
||||
CREATE extension seg with schema schema1;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-extension-with-schema1: <... completed>
|
||||
step s1-print:
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
2
|
||||
extname extversion nspname
|
||||
2
|
||||
extname extversion nspname
|
||||
|
||||
seg 1.3 schema1
|
||||
seg 1.3 schema1
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57637,t,seg)
|
||||
|
@ -186,37 +186,37 @@ run_command_on_workers
|
|||
(localhost,57638,t,schema1)
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-add-node-1 s2-drop-extension s1-commit s1-print
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-drop-extension:
|
||||
1
|
||||
step s2-drop-extension:
|
||||
drop extension seg;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-drop-extension: <... completed>
|
||||
step s1-print:
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
1
|
||||
extname extversion nspname
|
||||
1
|
||||
extname extversion nspname
|
||||
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -232,48 +232,48 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-1 s1-create-extension-with-schema2 s1-begin s1-remove-node-1 s2-alter-extension-set-schema3 s1-commit s1-print
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-create-extension-with-schema2:
|
||||
1
|
||||
step s1-create-extension-with-schema2:
|
||||
CREATE extension seg with schema schema2;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-1:
|
||||
step s1-remove-node-1:
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-alter-extension-set-schema3:
|
||||
1
|
||||
step s2-alter-extension-set-schema3:
|
||||
alter extension seg set schema schema3;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-alter-extension-set-schema3: <... completed>
|
||||
step s1-print:
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
extname extversion nspname
|
||||
4
|
||||
extname extversion nspname
|
||||
|
||||
seg 1.3 schema3
|
||||
seg 1.3 schema3
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57638,t,seg)
|
||||
|
@ -285,47 +285,47 @@ run_command_on_workers
|
|||
(localhost,57638,t,schema3)
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s1-add-node-1 s2-drop-extension s1-begin s1-remove-node-1 s2-create-extension-with-schema1 s1-commit s1-print
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-drop-extension:
|
||||
1
|
||||
step s2-drop-extension:
|
||||
drop extension seg;
|
||||
|
||||
step s1-begin:
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-remove-node-1:
|
||||
step s1-remove-node-1:
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-extension-with-schema1:
|
||||
1
|
||||
step s2-create-extension-with-schema1:
|
||||
CREATE extension seg with schema schema1;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-create-extension-with-schema1: <... completed>
|
||||
step s1-print:
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
extname extversion nspname
|
||||
4
|
||||
extname extversion nspname
|
||||
|
||||
seg 1.3 schema1
|
||||
seg 1.3 schema1
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57638,t,seg)
|
||||
|
@ -337,52 +337,52 @@ run_command_on_workers
|
|||
(localhost,57638,t,schema1)
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-add-node-1 s2-drop-extension s2-remove-node-1 s2-begin s2-create-extension-version-11 s1-add-node-1 s2-commit s1-print
|
||||
step s2-add-node-1:
|
||||
step s2-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-drop-extension:
|
||||
1
|
||||
step s2-drop-extension:
|
||||
drop extension seg;
|
||||
|
||||
step s2-remove-node-1:
|
||||
step s2-remove-node-1:
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-begin:
|
||||
1
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-extension-version-11:
|
||||
step s2-create-extension-version-11:
|
||||
CREATE extension seg VERSION "1.1";
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-commit:
|
||||
1
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-print:
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
3
|
||||
extname extversion nspname
|
||||
3
|
||||
extname extversion nspname
|
||||
|
||||
seg 1.1 public
|
||||
seg 1.1 public
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57637,t,"")
|
||||
|
@ -397,57 +397,57 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-drop-extension s2-add-node-1 s2-create-extension-version-11 s2-remove-node-1 s2-begin s2-alter-extension-update-to-version-12 s1-add-node-1 s2-commit s1-print
|
||||
step s2-drop-extension:
|
||||
step s2-drop-extension:
|
||||
drop extension seg;
|
||||
|
||||
step s2-add-node-1:
|
||||
step s2-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-extension-version-11:
|
||||
1
|
||||
step s2-create-extension-version-11:
|
||||
CREATE extension seg VERSION "1.1";
|
||||
|
||||
step s2-remove-node-1:
|
||||
step s2-remove-node-1:
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-begin:
|
||||
1
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-alter-extension-update-to-version-12:
|
||||
step s2-alter-extension-update-to-version-12:
|
||||
ALTER extension seg update to "1.2";
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-print:
|
||||
1
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
extname extversion nspname
|
||||
4
|
||||
extname extversion nspname
|
||||
|
||||
seg 1.2 public
|
||||
seg 1.2 public
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57637,t,seg)
|
||||
|
@ -462,43 +462,43 @@ run_command_on_workers
|
|||
(localhost,57638,t,public)
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-add-node-1 s2-begin s2-drop-extension s1-remove-node-1 s2-commit s1-print
|
||||
step s2-add-node-1:
|
||||
step s2-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-begin:
|
||||
1
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-drop-extension:
|
||||
step s2-drop-extension:
|
||||
drop extension seg;
|
||||
|
||||
step s1-remove-node-1:
|
||||
step s1-remove-node-1:
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-remove-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-print:
|
||||
1
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
3
|
||||
extname extversion nspname
|
||||
3
|
||||
extname extversion nspname
|
||||
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -511,37 +511,37 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-begin s2-create-extension-with-schema1 s1-add-node-1 s2-commit s1-print
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-extension-with-schema1:
|
||||
step s2-create-extension-with-schema1:
|
||||
CREATE extension seg with schema schema1;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-commit:
|
||||
1
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-print:
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
3
|
||||
extname extversion nspname
|
||||
3
|
||||
extname extversion nspname
|
||||
|
||||
seg 1.3 schema1
|
||||
seg 1.3 schema1
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57637,t,"")
|
||||
|
@ -556,51 +556,51 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-drop-extension s2-add-node-1 s2-create-extension-with-schema2 s2-begin s2-alter-extension-version-13 s1-remove-node-1 s2-commit s1-print
|
||||
step s2-drop-extension:
|
||||
step s2-drop-extension:
|
||||
drop extension seg;
|
||||
|
||||
step s2-add-node-1:
|
||||
step s2-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-extension-with-schema2:
|
||||
1
|
||||
step s2-create-extension-with-schema2:
|
||||
CREATE extension seg with schema schema2;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-alter-extension-version-13:
|
||||
step s2-alter-extension-version-13:
|
||||
ALTER extension seg update to "1.3";
|
||||
|
||||
step s1-remove-node-1:
|
||||
step s1-remove-node-1:
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-remove-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-print:
|
||||
1
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
4
|
||||
extname extversion nspname
|
||||
4
|
||||
extname extversion nspname
|
||||
|
||||
seg 1.3 schema2
|
||||
seg 1.3 schema2
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57638,t,seg)
|
||||
|
@ -612,46 +612,46 @@ run_command_on_workers
|
|||
(localhost,57638,t,schema2)
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-drop-extension s2-add-node-1 s2-begin s2-create-extension-version-11 s1-remove-node-1 s2-commit s1-print
|
||||
step s2-drop-extension:
|
||||
step s2-drop-extension:
|
||||
drop extension seg;
|
||||
|
||||
step s2-add-node-1:
|
||||
step s2-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-begin:
|
||||
1
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-create-extension-version-11:
|
||||
step s2-create-extension-version-11:
|
||||
CREATE extension seg VERSION "1.1";
|
||||
|
||||
step s1-remove-node-1:
|
||||
step s1-remove-node-1:
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-commit:
|
||||
1
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-print:
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
3
|
||||
extname extversion nspname
|
||||
3
|
||||
extname extversion nspname
|
||||
|
||||
seg 1.1 public
|
||||
seg 1.1 public
|
||||
run_command_on_workers
|
||||
|
||||
(localhost,57638,t,"")
|
||||
|
@ -663,54 +663,54 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
starting permutation: s2-drop-extension s2-add-node-1 s2-create-extension-version-11 s2-remove-node-1 s2-begin s2-drop-extension s1-add-node-1 s2-commit s1-print
|
||||
step s2-drop-extension:
|
||||
step s2-drop-extension:
|
||||
drop extension seg;
|
||||
|
||||
step s2-add-node-1:
|
||||
step s2-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-create-extension-version-11:
|
||||
1
|
||||
step s2-create-extension-version-11:
|
||||
CREATE extension seg VERSION "1.1";
|
||||
|
||||
step s2-remove-node-1:
|
||||
step s2-remove-node-1:
|
||||
SELECT 1 FROM master_remove_node('localhost', 57637);
|
||||
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s2-begin:
|
||||
1
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-drop-extension:
|
||||
step s2-drop-extension:
|
||||
drop extension seg;
|
||||
|
||||
step s1-add-node-1:
|
||||
step s1-add-node-1:
|
||||
SELECT 1 FROM master_add_node('localhost', 57637);
|
||||
<waiting ...>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
step s1-add-node-1: <... completed>
|
||||
?column?
|
||||
?column?
|
||||
|
||||
1
|
||||
step s1-print:
|
||||
1
|
||||
step s1-print:
|
||||
select count(*) from citus.pg_dist_object ;
|
||||
select extname, extversion, nspname from pg_extension, pg_namespace where pg_namespace.oid=pg_extension.extnamespace and extname='seg';
|
||||
SELECT run_command_on_workers($$select extname from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select extversion from pg_extension where extname='seg'$$);
|
||||
SELECT run_command_on_workers($$select nspname from pg_extension, pg_namespace where extname='seg' and pg_extension.extnamespace=pg_namespace.oid$$);
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
3
|
||||
extname extversion nspname
|
||||
3
|
||||
extname extversion nspname
|
||||
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -726,5 +726,5 @@ run_command_on_workers
|
|||
(localhost,57638,t,"")
|
||||
master_remove_node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -5,79 +5,79 @@ run_command_on_workers
|
|||
|
||||
(localhost,57637,t,"GRANT ROLE")
|
||||
(localhost,57638,t,"GRANT ROLE")
|
||||
step s1-grant:
|
||||
step s1-grant:
|
||||
GRANT ALL ON test_table TO test_user_1;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1');
|
||||
GRANT ALL ON test_table TO test_user_2;
|
||||
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2');
|
||||
|
||||
bool_and
|
||||
bool_and
|
||||
|
||||
t
|
||||
bool_and
|
||||
t
|
||||
bool_and
|
||||
|
||||
t
|
||||
step s1-begin-insert:
|
||||
t
|
||||
step s1-begin-insert:
|
||||
BEGIN;
|
||||
SET ROLE test_user_1;
|
||||
INSERT INTO test_table VALUES (100, 100);
|
||||
|
||||
step s2-begin-insert:
|
||||
step s2-begin-insert:
|
||||
BEGIN;
|
||||
SET ROLE test_user_2;
|
||||
INSERT INTO test_table VALUES (200, 200);
|
||||
|
||||
step s3-as-admin:
|
||||
step s3-as-admin:
|
||||
-- Admin should be able to see all transactions
|
||||
SELECT count(*) FROM get_all_active_transactions();
|
||||
SELECT count(*) FROM get_global_active_transactions();
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
2
|
||||
count
|
||||
2
|
||||
count
|
||||
|
||||
4
|
||||
step s3-as-user-1:
|
||||
4
|
||||
step s3-as-user-1:
|
||||
-- User should only be able to see its own transactions
|
||||
SET ROLE test_user_1;
|
||||
SELECT count(*) FROM get_all_active_transactions();
|
||||
SELECT count(*) FROM get_global_active_transactions();
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
1
|
||||
count
|
||||
1
|
||||
count
|
||||
|
||||
2
|
||||
step s3-as-readonly:
|
||||
2
|
||||
step s3-as-readonly:
|
||||
-- Other user should not see transactions
|
||||
SET ROLE test_readonly;
|
||||
SELECT count(*) FROM get_all_active_transactions();
|
||||
SELECT count(*) FROM get_global_active_transactions();
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
count
|
||||
0
|
||||
count
|
||||
|
||||
0
|
||||
step s3-as-monitor:
|
||||
0
|
||||
step s3-as-monitor:
|
||||
-- Monitor should see all transactions
|
||||
SET ROLE test_monitor;
|
||||
SELECT count(*) FROM get_all_active_transactions();
|
||||
SELECT count(*) FROM get_global_active_transactions();
|
||||
|
||||
count
|
||||
count
|
||||
|
||||
2
|
||||
count
|
||||
2
|
||||
count
|
||||
|
||||
4
|
||||
step s1-commit:
|
||||
4
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
run_command_on_workers
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3,139 +3,139 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
15
|
||||
15
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-router-select: SELECT * FROM hash_copy WHERE id = 1;
|
||||
id data int_data
|
||||
id data int_data
|
||||
|
||||
1 b 1
|
||||
1 b 1
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-real-time-select: SELECT * FROM hash_copy ORDER BY 1, 2;
|
||||
id data int_data
|
||||
id data int_data
|
||||
|
||||
0 a 0
|
||||
1 b 1
|
||||
2 c 2
|
||||
3 d 3
|
||||
4 e 4
|
||||
0 a 0
|
||||
1 b 1
|
||||
2 c 2
|
||||
3 d 3
|
||||
4 e 4
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-task-tracker-select:
|
||||
step s2-task-tracker-select:
|
||||
SET citus.task_executor_type TO "task-tracker";
|
||||
SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
|
||||
id data int_data id data int_data
|
||||
id data int_data id data int_data
|
||||
|
||||
0 a 0 0 a 0
|
||||
1 b 1 1 b 1
|
||||
2 c 2 2 c 2
|
||||
3 d 3 3 d 3
|
||||
4 e 4 4 e 4
|
||||
0 a 0 0 a 0
|
||||
1 b 1 1 b 1
|
||||
2 c 2 2 c 2
|
||||
3 d 3 3 d 3
|
||||
4 e 4 4 e 4
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-insert: INSERT INTO hash_copy VALUES(0, 'k', 0);
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
11
|
||||
11
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
15
|
||||
15
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-update: UPDATE hash_copy SET data = 'l' WHERE id = 0;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-delete: DELETE FROM hash_copy WHERE id = 1;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
9
|
||||
9
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -143,14 +143,14 @@ step s2-truncate: TRUNCATE hash_copy; <waiting ...>
|
|||
step s1-commit: COMMIT;
|
||||
step s2-truncate: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -163,7 +163,7 @@ ERROR: relation "hash_copy" does not exist
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -171,9 +171,9 @@ step s2-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); <waitin
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-create-index: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -183,7 +183,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -192,9 +192,9 @@ step s2-ddl-drop-index: DROP INDEX hash_copy_index; <waiting ...>
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-drop-index: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -204,7 +204,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -212,9 +212,9 @@ step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY hash_copy_index
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-create-index-concurrently: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -224,7 +224,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -232,9 +232,9 @@ step s2-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; <wai
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-add-column: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -244,7 +244,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -253,9 +253,9 @@ step s2-ddl-drop-column: ALTER TABLE hash_copy DROP new_column; <waiting ...>
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-drop-column: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -265,7 +265,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -273,9 +273,9 @@ step s2-ddl-rename-column: ALTER TABLE hash_copy RENAME data TO new_column; <wai
|
|||
step s1-commit: COMMIT;
|
||||
step s2-ddl-rename-column: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -285,38 +285,38 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-table-size: SELECT citus_total_relation_size('hash_copy');
|
||||
citus_total_relation_size
|
||||
|
||||
65536
|
||||
65536
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-master-modify-multiple-shards: DELETE FROM hash_copy;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
5
|
||||
5
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -325,16 +325,16 @@ step s1-commit: COMMIT;
|
|||
step s2-master-drop-all-shards: <... completed>
|
||||
master_drop_all_shards
|
||||
|
||||
4
|
||||
4
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE hash_copy;
|
||||
step s1-create-non-distributed-table: CREATE TABLE hash_copy(id integer, data text, int_data int); COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
|
@ -345,17 +345,17 @@ step s1-commit: COMMIT;
|
|||
step s2-distribute-table: <... completed>
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
|
||||
15
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
|
||||
15
|
||||
|
||||
starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-recreate-with-replication-2:
|
||||
|
||||
step s1-recreate-with-replication-2:
|
||||
DROP TABLE hash_copy;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE hash_copy(id integer, data text, int_data int);
|
||||
|
@ -363,7 +363,7 @@ step s1-recreate-with-replication-2:
|
|||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -371,15 +371,15 @@ step s2-update: UPDATE hash_copy SET data = 'l' WHERE id = 0; <waiting ...>
|
|||
step s1-commit: COMMIT;
|
||||
step s2-update: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-recreate-with-replication-2:
|
||||
|
||||
step s1-recreate-with-replication-2:
|
||||
DROP TABLE hash_copy;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE hash_copy(id integer, data text, int_data int);
|
||||
|
@ -387,7 +387,7 @@ step s1-recreate-with-replication-2:
|
|||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -395,15 +395,15 @@ step s2-delete: DELETE FROM hash_copy WHERE id = 1; <waiting ...>
|
|||
step s1-commit: COMMIT;
|
||||
step s2-delete: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
9
|
||||
9
|
||||
|
||||
starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-recreate-with-replication-2:
|
||||
|
||||
step s1-recreate-with-replication-2:
|
||||
DROP TABLE hash_copy;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE hash_copy(id integer, data text, int_data int);
|
||||
|
@ -411,7 +411,7 @@ step s1-recreate-with-replication-2:
|
|||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -419,15 +419,15 @@ step s2-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy; <waiting .
|
|||
step s1-commit: COMMIT;
|
||||
step s2-insert-select: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
20
|
||||
20
|
||||
|
||||
starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-recreate-with-replication-2:
|
||||
|
||||
step s1-recreate-with-replication-2:
|
||||
DROP TABLE hash_copy;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
CREATE TABLE hash_copy(id integer, data text, int_data int);
|
||||
|
@ -435,7 +435,7 @@ step s1-recreate-with-replication-2:
|
|||
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -443,132 +443,132 @@ step s2-master-modify-multiple-shards: DELETE FROM hash_copy; <waiting ...>
|
|||
step s1-commit: COMMIT;
|
||||
step s2-master-modify-multiple-shards: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-router-select: SELECT * FROM hash_copy WHERE id = 1;
|
||||
id data int_data
|
||||
id data int_data
|
||||
|
||||
1 b 1
|
||||
1 b 1
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-real-time-select: SELECT * FROM hash_copy ORDER BY 1, 2;
|
||||
id data int_data
|
||||
id data int_data
|
||||
|
||||
0 a 0
|
||||
1 b 1
|
||||
2 c 2
|
||||
3 d 3
|
||||
4 e 4
|
||||
0 a 0
|
||||
1 b 1
|
||||
2 c 2
|
||||
3 d 3
|
||||
4 e 4
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-task-tracker-select:
|
||||
step s1-task-tracker-select:
|
||||
SET citus.task_executor_type TO "task-tracker";
|
||||
SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
|
||||
id data int_data id data int_data
|
||||
id data int_data id data int_data
|
||||
|
||||
0 a 0 0 a 0
|
||||
1 b 1 1 b 1
|
||||
2 c 2 2 c 2
|
||||
3 d 3 3 d 3
|
||||
4 e 4 4 e 4
|
||||
0 a 0 0 a 0
|
||||
1 b 1 1 b 1
|
||||
2 c 2 2 c 2
|
||||
3 d 3 3 d 3
|
||||
4 e 4 4 e 4
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert: INSERT INTO hash_copy VALUES(0, 'k', 0);
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
11
|
||||
11
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy;
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
15
|
||||
15
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-update: UPDATE hash_copy SET data = 'l' WHERE id = 0;
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-delete: DELETE FROM hash_copy WHERE id = 1;
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
9
|
||||
9
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE hash_copy;
|
||||
|
@ -576,14 +576,14 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo
|
|||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
5
|
||||
5
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-drop: DROP TABLE hash_copy;
|
||||
|
@ -597,7 +597,7 @@ ERROR: relation "hash_copy" does not exist
|
|||
starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id);
|
||||
|
@ -605,9 +605,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo
|
|||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -617,7 +617,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -626,9 +626,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo
|
|||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%''');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -638,7 +638,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0;
|
||||
|
@ -647,9 +647,9 @@ step s1-commit: COMMIT;
|
|||
step s2-copy: <... completed>
|
||||
error in steps s1-commit s2-copy: ERROR: missing data for column "new_column"
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
5
|
||||
5
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -659,7 +659,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -668,9 +668,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo
|
|||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -680,7 +680,7 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-rename-column: ALTER TABLE hash_copy RENAME data TO new_column;
|
||||
|
@ -688,9 +688,9 @@ step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo
|
|||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
|
||||
|
@ -700,57 +700,57 @@ run_command_on_workers
|
|||
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('hash_copy');
|
||||
citus_total_relation_size
|
||||
|
||||
57344
|
||||
57344
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
10
|
||||
10
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-modify-multiple-shards: DELETE FROM hash_copy;
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
5
|
||||
5
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy');
|
||||
master_drop_all_shards
|
||||
|
||||
4
|
||||
4
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
0
|
||||
0
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s1-drop: DROP TABLE hash_copy;
|
||||
step s1-create-non-distributed-table: CREATE TABLE hash_copy(id integer, data text, int_data int); COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
|
@ -758,11 +758,11 @@ step s1-begin: BEGIN;
|
|||
step s1-distribute-table: SELECT create_distributed_table('hash_copy', 'id');
|
||||
create_distributed_table
|
||||
|
||||
|
||||
|
||||
step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
|
||||
count
|
||||
count
|
||||
|
||||
15
|
||||
15
|
||||
|
|
|
@ -3,299 +3,299 @@ Parsed test spec with 2 sessions
|
|||
starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-update s1-commit s2-commit
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
SET citus.shard_replication_factor to 1;
|
||||
BEGIN;
|
||||
|
||||
step s1-insert-into-select-conflict-update:
|
||||
step s1-insert-into-select-conflict-update:
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
|
||||
col_1 col_2
|
||||
col_1 col_2
|
||||
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-begin:
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-update:
|
||||
step s2-update:
|
||||
UPDATE target_table SET col_2 = 5;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-update: <... completed>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-insert-into-select-conflict-do-nothing s2-begin s2-delete s1-commit s2-commit
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
SET citus.shard_replication_factor to 1;
|
||||
BEGIN;
|
||||
|
||||
step s1-insert-into-select-conflict-do-nothing:
|
||||
step s1-insert-into-select-conflict-do-nothing:
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-delete:
|
||||
step s2-delete:
|
||||
DELETE FROM target_table;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-delete: <... completed>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-insert-into-select-conflict-do-nothing s2-begin s2-insert-into-select-conflict-update s1-commit s2-commit
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
SET citus.shard_replication_factor to 1;
|
||||
BEGIN;
|
||||
|
||||
step s1-insert-into-select-conflict-do-nothing:
|
||||
step s1-insert-into-select-conflict-do-nothing:
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
step s2-begin:
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-insert-into-select-conflict-update:
|
||||
step s2-insert-into-select-conflict-update:
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-insert-into-select-conflict-update: <... completed>
|
||||
col_1 col_2
|
||||
col_1 col_2
|
||||
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-commit:
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-insert-into-select-conflict-update s1-commit s2-commit
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
SET citus.shard_replication_factor to 1;
|
||||
BEGIN;
|
||||
|
||||
step s1-insert-into-select-conflict-update:
|
||||
step s1-insert-into-select-conflict-update:
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
|
||||
col_1 col_2
|
||||
col_1 col_2
|
||||
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-begin:
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-insert-into-select-conflict-update:
|
||||
step s2-insert-into-select-conflict-update:
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-insert-into-select-conflict-update: <... completed>
|
||||
col_1 col_2
|
||||
col_1 col_2
|
||||
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-commit:
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-insert-into-select-conflict-do-nothing s1-commit s2-commit
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin:
|
||||
|
||||
step s1-begin:
|
||||
SET citus.shard_replication_factor to 1;
|
||||
BEGIN;
|
||||
|
||||
step s1-insert-into-select-conflict-update:
|
||||
step s1-insert-into-select-conflict-update:
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
|
||||
col_1 col_2
|
||||
col_1 col_2
|
||||
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-begin:
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-insert-into-select-conflict-do-nothing:
|
||||
step s2-insert-into-select-conflict-do-nothing:
|
||||
INSERT INTO target_table
|
||||
SELECT
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT DO NOTHING;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-insert-into-select-conflict-do-nothing: <... completed>
|
||||
step s2-commit:
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
||||
starting permutation: s1-begin-replication-factor-2 s1-insert-into-select-conflict-update-replication-factor-2 s2-begin-replication-factor-2 s2-insert-into-select-conflict-update-replication-factor-2 s1-commit s2-commit
|
||||
create_distributed_table
|
||||
|
||||
|
||||
step s1-begin-replication-factor-2:
|
||||
|
||||
step s1-begin-replication-factor-2:
|
||||
SET citus.shard_replication_factor to 2;
|
||||
BEGIN;
|
||||
|
||||
step s1-insert-into-select-conflict-update-replication-factor-2:
|
||||
step s1-insert-into-select-conflict-update-replication-factor-2:
|
||||
INSERT INTO target_table_2
|
||||
SELECT
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
|
||||
col_1 col_2 col_3
|
||||
col_1 col_2 col_3
|
||||
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-begin-replication-factor-2:
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-begin-replication-factor-2:
|
||||
SET citus.shard_replication_factor to 2;
|
||||
BEGIN;
|
||||
|
||||
step s2-insert-into-select-conflict-update-replication-factor-2:
|
||||
step s2-insert-into-select-conflict-update-replication-factor-2:
|
||||
INSERT INTO target_table_2
|
||||
SELECT
|
||||
SELECT
|
||||
col_1, col_2
|
||||
FROM (
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
SELECT
|
||||
col_1, col_2, col_3
|
||||
FROM
|
||||
source_table
|
||||
LIMIT 5
|
||||
) as foo
|
||||
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
|
||||
<waiting ...>
|
||||
step s1-commit:
|
||||
step s1-commit:
|
||||
COMMIT;
|
||||
|
||||
step s2-insert-into-select-conflict-update-replication-factor-2: <... completed>
|
||||
col_1 col_2 col_3
|
||||
col_1 col_2 col_3
|
||||
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-commit:
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
4 4
|
||||
5 5
|
||||
step s2-commit:
|
||||
COMMIT;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue