Adds changes to expected files

connection-string-tests-9.2-include
Halil Ozan Akgul 2020-03-16 11:56:59 +03:00
parent 4ced3018fb
commit e54fba6552
201 changed files with 4194 additions and 4194 deletions

View File

@ -2,15 +2,15 @@
+++ file_different.out.modified
@@ -1,3 +1,2 @@
-This line is missing in file_different
Ports are replaced with xxxxx: localhost:2187
Ports are replaced with xxxxx: <host>:2187
This line is the same
@@ -7,6 +6,8 @@
Filler 2, localhost:1111
Filler 3, localhost:111
Filler 2, <host>:1111
Filler 3, <host>:111
-This line is missing in file_different
+This line has been inserted
+This line has also been inserted, localhost:10812
Line below will be removed, localhost:2781
+This line has also been inserted, <host>:10812
Line below will be removed, <host>:2781
-This line will be changed ✓
+This line has been changed ✇
End of file

View File

@ -1,20 +1,20 @@
--
-- ADD_COORDINATOR
--
SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset
SELECT master_add_node('<host>', :master_port, groupid => 0) AS master_nodeid \gset
-- adding the same node again should return the existing nodeid
SELECT master_add_node('localhost', :master_port, groupid => 0) = :master_nodeid;
SELECT master_add_node('<host>', :master_port, groupid => 0) = :master_nodeid;
?column?
---------------------------------------------------------------------
t
(1 row)
-- adding another node with groupid=0 should error out
SELECT master_add_node('localhost', 12345, groupid => 0) = :master_nodeid;
SELECT master_add_node('<host>', 12345, groupid => 0) = :master_nodeid;
ERROR: group 0 already has a primary node
-- start_metadata_sync_to_node() for coordinator should raise a notice
SELECT start_metadata_sync_to_node('localhost', :master_port);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
SELECT start_metadata_sync_to_node('<host>', :master_port);
NOTICE: <host>:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
start_metadata_sync_to_node
---------------------------------------------------------------------

View File

@ -220,7 +220,7 @@ create aggregate sumstring(text) (
);
select sumstring(valf::text) from aggdata where valf is not null;
ERROR: function "aggregate_support.sumstring(text)" does not exist
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
select create_distributed_function('sumstring(text)');
create_distributed_function
---------------------------------------------------------------------
@ -264,8 +264,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
select run_command_on_workers($$create user notsuper$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(2 rows)
grant all on schema aggregate_support to notsuper;
@ -276,8 +276,8 @@ grant all on all tables in schema aggregate_support to notsuper;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,GRANT)
(localhost,57638,t,GRANT)
(<host>,xxxxx,t,GRANT)
(<host>,xxxxx,t,GRANT)
(2 rows)
set role notsuper;
@ -431,6 +431,6 @@ RESET citus.task_executor_type;
select key, count(distinct aggdata)
from aggdata group by key order by 1, 2;
ERROR: type "aggregate_support.aggdata" does not exist
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
set client_min_messages to error;
drop schema aggregate_support cascade;

View File

@ -6,11 +6,11 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
SELECT run_command_on_workers($$CREATE ROLE alter_role_1 WITH LOGIN;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(2 rows)
-- postgres errors out
-- <user> errors out
ALTER ROLE alter_role_1 WITH SUPERUSER NOSUPERUSER;
ERROR: conflicting or redundant options
-- make sure that we propagate all options accurately
@ -24,8 +24,8 @@ SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlog
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)")
(localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)")
(<host>,xxxxx,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)")
(<host>,xxxxx,t,"(alter_role_1,t,t,t,t,t,t,t,66,,2032)")
(2 rows)
-- make sure that we propagate all options accurately
@ -39,8 +39,8 @@ SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlog
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)")
(localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)")
(<host>,xxxxx,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)")
(<host>,xxxxx,t,"(alter_role_1,f,f,f,f,f,f,f,0,,2052)")
(2 rows)
-- make sure that non-existent users are handled properly
@ -59,8 +59,8 @@ SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER;
SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = CURRENT_USER;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,123)
(localhost,57638,t,123)
(<host>,xxxxx,t,123)
(<host>,xxxxx,t,123)
(2 rows)
-- make sure that SESSION_USER just works fine
@ -74,8 +74,8 @@ SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER;
SELECT run_command_on_workers($$SELECT rolconnlimit FROM pg_authid WHERE rolname = SESSION_USER;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,124)
(localhost,57638,t,124)
(<host>,xxxxx,t,124)
(<host>,xxxxx,t,124)
(2 rows)
-- now lets test the passwords in more detail
@ -89,8 +89,8 @@ SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1';
SELECT run_command_on_workers($$SELECT rolpassword is NULL FROM pg_authid WHERE rolname = 'alter_role_1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,t)
(localhost,57638,t,t)
(<host>,xxxxx,t,t)
(<host>,xxxxx,t,t)
(2 rows)
ALTER ROLE alter_role_1 WITH PASSWORD 'test1';
@ -103,8 +103,8 @@ SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1';
SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,md52f9cc8d65e37edcc45c4a489bdfc699d)
(localhost,57638,t,md52f9cc8d65e37edcc45c4a489bdfc699d)
(<host>,xxxxx,t,md52f9cc8d65e37edcc45c4a489bdfc699d)
(<host>,xxxxx,t,md52f9cc8d65e37edcc45c4a489bdfc699d)
(2 rows)
ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'test2';
@ -117,8 +117,8 @@ SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1';
SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,md5e17f7818c5ec023fa87bdb97fd3e842e)
(localhost,57638,t,md5e17f7818c5ec023fa87bdb97fd3e842e)
(<host>,xxxxx,t,md5e17f7818c5ec023fa87bdb97fd3e842e)
(<host>,xxxxx,t,md5e17f7818c5ec023fa87bdb97fd3e842e)
(2 rows)
ALTER ROLE alter_role_1 WITH ENCRYPTED PASSWORD 'md59cce240038b7b335c6aa9674a6f13e72';
@ -131,8 +131,8 @@ SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1';
SELECT run_command_on_workers($$SELECT rolpassword FROM pg_authid WHERE rolname = 'alter_role_1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,md59cce240038b7b335c6aa9674a6f13e72)
(localhost,57638,t,md59cce240038b7b335c6aa9674a6f13e72)
(<host>,xxxxx,t,md59cce240038b7b335c6aa9674a6f13e72)
(<host>,xxxxx,t,md59cce240038b7b335c6aa9674a6f13e72)
(2 rows)
-- edge case role names
@ -142,8 +142,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
SELECT run_command_on_workers($$CREATE ROLE "alter_role'1" WITH LOGIN;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(2 rows)
ALTER ROLE "alter_role'1" CREATEROLE;
@ -156,8 +156,8 @@ SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1';
SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role''1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,t)
(localhost,57638,t,t)
(<host>,xxxxx,t,t)
(<host>,xxxxx,t,t)
(2 rows)
CREATE ROLE "alter_role""1" WITH LOGIN;
@ -166,8 +166,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
SELECT run_command_on_workers($$CREATE ROLE "alter_role""1" WITH LOGIN;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(2 rows)
ALTER ROLE "alter_role""1" CREATEROLE;
@ -180,8 +180,8 @@ SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1';
SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolname = 'alter_role"1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,t)
(localhost,57638,t,t)
(<host>,xxxxx,t,t)
(<host>,xxxxx,t,t)
(2 rows)
-- add node
@ -195,11 +195,11 @@ SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlog
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)")
(localhost,57638,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)")
(<host>,xxxxx,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)")
(<host>,xxxxx,t,"(alter_role_1,t,t,t,t,t,t,t,66,md5ead5c53df946838b1291bba7757f41a7,2032)")
(2 rows)
SELECT master_remove_node('localhost', :worker_1_port);
SELECT master_remove_node('<host>', :worker_1_port);
master_remove_node
---------------------------------------------------------------------
@ -215,10 +215,10 @@ SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlog
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)")
(<host>,xxxxx,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)")
(1 row)
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
SELECT 1 FROM master_add_node('<host>', :worker_1_port);
?column?
---------------------------------------------------------------------
1
@ -233,8 +233,8 @@ SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlog
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)")
(localhost,57638,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)")
(<host>,xxxxx,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)")
(<host>,xxxxx,t,"(alter_role_1,f,f,f,f,f,f,f,0,md5be308f25c7b1a2d50c85cf7e6f074df9,2052)")
(2 rows)
-- table belongs to a role

View File

@ -1,13 +1,13 @@
--
-- Setup MX data syncing
--
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT start_metadata_sync_to_node('<host>', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
SELECT start_metadata_sync_to_node('<host>', :worker_2_port);
start_metadata_sync_to_node
---------------------------------------------------------------------

View File

@ -35,7 +35,7 @@ order by s_i_id;
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Seq Scan on stock_1640000 stock
-> Distributed Subplan XXX_2
@ -44,13 +44,13 @@ order by s_i_id;
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Seq Scan on stock_1640000 stock
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: stock.s_i_id
InitPlan 1 (returns $0)
@ -81,13 +81,13 @@ order by s_i_id;
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Seq Scan on stock_1640000 stock
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: stock.s_i_id
-> Seq Scan on stock_1640000 stock
@ -112,13 +112,13 @@ having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from st
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Seq Scan on stock_1640000 stock
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: stock.s_i_id
-> Seq Scan on stock_1640000 stock
@ -142,7 +142,7 @@ order by s_i_id;
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate (cost=40.60..42.60 rows=200 width=12)
Group Key: s.s_i_id
-> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8)
@ -163,7 +163,7 @@ having (select true);
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate (cost=40.60..42.60 rows=200 width=12)
Group Key: s.s_i_id
-> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8)

View File

@ -40,7 +40,7 @@ order by s_i_id;
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Seq Scan on stock_1640000 stock
-> Distributed Subplan XXX_2
@ -49,13 +49,13 @@ order by s_i_id;
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Seq Scan on stock_1640000 stock
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: stock.s_i_id
InitPlan 1 (returns $0)
@ -86,13 +86,13 @@ order by s_i_id;
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Seq Scan on stock_1640000 stock
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: stock.s_i_id
-> Seq Scan on stock_1640000 stock
@ -117,13 +117,13 @@ having sum(s_order_cnt) > (select max(s_order_cnt) - 3 as having_query from st
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Seq Scan on stock_1640000 stock
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: stock.s_i_id
-> Seq Scan on stock_1640000 stock
@ -147,7 +147,7 @@ order by s_i_id;
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate (cost=40.60..42.60 rows=200 width=12)
Group Key: s.s_i_id
-> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8)
@ -168,7 +168,7 @@ having (select true);
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate (cost=40.60..42.60 rows=200 width=12)
Group Key: s.s_i_id
-> Seq Scan on stock_1640000 s (cost=0.00..30.40 rows=2040 width=8)

View File

@ -4,14 +4,14 @@ SET search_path TO coordinator_shouldhaveshards;
SET citus.next_shard_id TO 1503000;
-- idempotently add node to allow this test to run without add_coordinator
SET client_min_messages TO WARNING;
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
SELECT 1 FROM master_add_node('<host>', :master_port, groupid => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true);
SELECT 1 FROM master_set_node_property('<host>', :master_port, 'shouldhaveshards', true);
?column?
---------------------------------------------------------------------
1
@ -47,7 +47,7 @@ NOTICE: executing the command locally: SELECT y FROM coordinator_shouldhaveshar
1
(1 row)
-- multi-shard queries connect to localhost
-- multi-shard queries connect to <host>
SELECT count(*) FROM test;
count
---------------------------------------------------------------------
@ -123,7 +123,7 @@ END;
DELETE FROM test;
DROP TABLE test;
DROP SCHEMA coordinator_shouldhaveshards CASCADE;
SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', false);
SELECT 1 FROM master_set_node_property('<host>', :master_port, 'shouldhaveshards', false);
?column?
---------------------------------------------------------------------
1

View File

@ -375,7 +375,7 @@ DETAIL: distribution column value: 1
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Seq Scan on test_table_1960000 test_table
Filter: (key = 1)
@ -402,12 +402,12 @@ DEBUG: Plan is router executable
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Seq Scan on test_table_1960000 test_table
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Function Scan on read_intermediate_result intermediate_result
Filter: (key = 1)
@ -484,12 +484,12 @@ DEBUG: Plan is router executable
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Seq Scan on test_table_1960000 test_table
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Merge Join
Merge Cond: (intermediate_result.key = intermediate_result_1.key)
@ -530,7 +530,7 @@ DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823]
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Hash Join
Hash Cond: (test_table.key = test_table_1.key)
@ -833,7 +833,7 @@ DEBUG: Router planner cannot handle multi-shard select queries
1021
(1 row)
-- the CTEs are very simple, so postgres
-- the CTEs are very simple, so <user>
-- can pull-up the subqueries after inlining
-- the CTEs, and the query that we send to workers
-- becomes a join between two tables

View File

@ -342,7 +342,7 @@ DEBUG: Router planner cannot handle multi-shard select queries
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Seq Scan on test_table_1960000 test_table
Filter: (key = 1)
@ -409,12 +409,12 @@ DEBUG: Plan is router executable
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Seq Scan on test_table_1960000 test_table
Task Count: 1
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Aggregate
-> Merge Join
Merge Cond: (intermediate_result.key = intermediate_result_1.key)
@ -723,7 +723,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
DEBUG: Router planner cannot handle multi-shard select queries
ERROR: cannot pushdown the subquery
DETAIL: Complex subqueries and CTEs cannot be in the outer part of the outer join
-- the CTEs are very simple, so postgres
-- the CTEs are very simple, so <user>
-- can pull-up the subqueries after inlining
-- the CTEs, and the query that we send to workers
-- becomes a join between two tables

View File

@ -115,22 +115,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -149,22 +149,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -184,22 +184,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -218,22 +218,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -253,22 +253,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -287,22 +287,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -322,22 +322,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -357,25 +357,25 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1;
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)

View File

@ -117,22 +117,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -155,22 +155,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -192,22 +192,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -230,22 +230,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -267,22 +267,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -305,22 +305,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -342,22 +342,22 @@ GROUP BY(1);
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
@ -382,7 +382,7 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1;
Task Count: 4
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> GroupAggregate
Group Key: day
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)
@ -390,7 +390,7 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1;
Sort Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> GroupAggregate
Group Key: day
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)
@ -398,7 +398,7 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1;
Sort Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> GroupAggregate
Group Key: day
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)
@ -406,7 +406,7 @@ HAVING hll_cardinality(hll_union_agg(unique_users)) > 1;
Sort Key: day
-> Seq Scan on daily_uniques_xxxxxxx daily_uniques
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> GroupAggregate
Group Key: day
Filter: (hll_cardinality(hll_union_agg(unique_users)) > '1'::double precision)

View File

@ -19,7 +19,7 @@ CREATE TYPE tt1 AS (a int , b int);
CREATE TABLE t2 (a int PRIMARY KEY, b tt1);
SELECT create_distributed_table('t2', 'a');
ERROR: type "disabled_object_propagation.tt1" does not exist
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT 1 FROM run_command_on_workers($$
BEGIN;
SET LOCAL citus.enable_ddl_propagation TO off;
@ -43,7 +43,7 @@ CREATE TYPE tt2 AS ENUM ('a', 'b');
CREATE TABLE t3 (a int PRIMARY KEY, b tt2);
SELECT create_distributed_table('t3', 'a');
ERROR: type "disabled_object_propagation.tt2" does not exist
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT 1 FROM run_command_on_workers($$
BEGIN;
SET LOCAL citus.enable_ddl_propagation TO off;
@ -99,8 +99,8 @@ SELECT row(nspname, typname, usename)
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(disabled_object_propagation,tt3,postgres)")
(localhost,57638,t,"(disabled_object_propagation,tt3,postgres)")
(<host>,xxxxx,t,"(disabled_object_propagation,tt3,<user>)")
(<host>,xxxxx,t,"(disabled_object_propagation,tt3,<user>)")
(2 rows)
SELECT run_command_on_workers($$
@ -114,8 +114,8 @@ GROUP BY pg_type.typname;
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(tt3,""a int4, b int4"")")
(localhost,57638,t,"(tt3,""a int4, b int4"")")
(<host>,xxxxx,t,"(tt3,""a int4, b int4"")")
(<host>,xxxxx,t,"(tt3,""a int4, b int4"")")
(2 rows)
-- suppress any warnings during cleanup

View File

@ -5,8 +5,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
SELECT run_command_on_workers($$CREATE USER collationuser;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(2 rows)
CREATE SCHEMA collation_tests AUTHORIZATION collationuser;
@ -25,7 +25,7 @@ WHERE collname like 'german_phonebook%'
ORDER BY 1,2,3;
collname | nspname | rolname
---------------------------------------------------------------------
german_phonebook | collation_tests | postgres
german_phonebook | collation_tests | <user>
(1 row)
\c - - - :master_port
@ -90,8 +90,8 @@ WHERE collname like 'german_phonebook%'
ORDER BY 1,2,3;
collname | nspname | rolname
---------------------------------------------------------------------
german_phonebook | collation_tests | postgres
german_phonebook_unpropagated | collation_tests | postgres
german_phonebook | collation_tests | <user>
german_phonebook_unpropagated | collation_tests | <user>
(2 rows)
\c - - - :master_port
@ -108,7 +108,7 @@ ORDER BY 1,2,3;
collname | nspname | rolname
---------------------------------------------------------------------
german_phonebook2 | collation_tests2 | collationuser
german_phonebook_unpropagated | collation_tests | postgres
german_phonebook_unpropagated | collation_tests | <user>
(2 rows)
\c - - - :master_port
@ -129,7 +129,7 @@ DROP USER collationuser;
SELECT run_command_on_workers($$DROP USER collationuser;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(2 rows)

View File

@ -2,8 +2,8 @@ CREATE SCHEMA collation_conflict;
SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE SCHEMA")
(localhost,57638,t,"CREATE SCHEMA")
(<host>,xxxxx,t,"CREATE SCHEMA")
(<host>,xxxxx,t,"CREATE SCHEMA")
(2 rows)
\c - - - :worker_1_port
@ -34,7 +34,7 @@ WHERE collname like 'caseinsensitive%'
ORDER BY 1,2,3;
collname | nspname | rolname
---------------------------------------------------------------------
caseinsensitive | collation_conflict | postgres
caseinsensitive | collation_conflict | <user>
(1 row)
\c - - - :master_port
@ -72,8 +72,8 @@ WHERE collname like 'caseinsensitive%'
ORDER BY 1,2,3;
collname | nspname | rolname
---------------------------------------------------------------------
caseinsensitive | collation_conflict | postgres
caseinsensitive(citus_backup_0) | collation_conflict | postgres
caseinsensitive | collation_conflict | <user>
caseinsensitive(citus_backup_0) | collation_conflict | <user>
(2 rows)
\c - - - :master_port

View File

@ -5,8 +5,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
SELECT run_command_on_workers($$CREATE USER functionuser;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(2 rows)
CREATE SCHEMA function_tests AUTHORIZATION functionuser;
@ -24,7 +24,7 @@ CREATE FUNCTION eq8(macaddr8, macaddr8) RETURNS bool
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
-- $function$ is what postgres escapes functions with when deparsing
-- $function$ is what <user> escapes functions with when deparsing
-- make sure $function$ doesn't cause invalid syntax
CREATE FUNCTION add_text(text, text) RETURNS text
AS 'select $function$test$function$ || $1::int || $2::int;'
@ -97,7 +97,7 @@ CREATE AGGREGATE sum2(int) (
minitcond = '1',
sortop = ">"
);
-- Test VARIADIC, example taken from postgres test suite
-- Test VARIADIC, example taken from <user> test suite
CREATE AGGREGATE my_rank(VARIADIC "any" ORDER BY VARIADIC "any") (
stype = internal,
sfunc = ordered_set_transition_multi,
@ -172,8 +172,8 @@ WHERE objid = 'eq_mi''xed_param_names(macaddr, macaddr)'::regprocedure;
SELECT * FROM run_command_on_workers($$SELECT function_tests."eq_mi'xed_param_names"('0123456789ab','ba9876543210');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | f
localhost | 57638 | t | f
<host> | xxxxx | t | f
<host> | xxxxx | t | f
(2 rows)
-- make sure that none of the active and primary nodes hasmetadata
@ -210,8 +210,8 @@ SELECT create_distributed_function('dup(macaddr)', '$1', colocate_with := 'strea
SELECT * FROM run_command_on_workers($$SELECT function_tests.dup('0123456789ab');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | (01:23:45:67:89:ab,"01:23:45:67:89:ab is text")
localhost | 57638 | t | (01:23:45:67:89:ab,"01:23:45:67:89:ab is text")
<host> | xxxxx | t | (01:23:45:67:89:ab,"01:23:45:67:89:ab is text")
<host> | xxxxx | t | (01:23:45:67:89:ab,"01:23:45:67:89:ab is text")
(2 rows)
SELECT create_distributed_function('eq(macaddr,macaddr)', '$1', colocate_with := 'streaming_table');
@ -223,8 +223,8 @@ SELECT create_distributed_function('eq(macaddr,macaddr)', '$1', colocate_with :=
SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('012345689ab','0123456789ab');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | f
localhost | 57638 | t | f
<host> | xxxxx | t | f
<host> | xxxxx | t | f
(2 rows)
SELECT public.verify_function_is_same_on_workers('function_tests.eq(macaddr,macaddr)');
@ -366,15 +366,15 @@ SELECT public.verify_function_is_same_on_workers('function_tests.eq2(macaddr,mac
SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('012346789ab','012345689ab');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
localhost | 57638 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
<host> | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
<host> | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
(2 rows)
SELECT * FROM run_command_on_workers($$SELECT function_tests.eq2('012345689ab','012345689ab');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | t
localhost | 57638 | t | t
<host> | xxxxx | t | t
<host> | xxxxx | t | t
(2 rows)
ALTER ROUTINE eq2(macaddr,macaddr) RENAME TO eq;
@ -382,8 +382,8 @@ ALTER AGGREGATE sum2(int) RENAME TO sum27;
SELECT * FROM run_command_on_workers($$SELECT 1 from pg_proc where proname = 'sum27';$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 1
localhost | 57638 | t | 1
<host> | xxxxx | t | 1
<host> | xxxxx | t | 1
(2 rows)
ALTER AGGREGATE sum27(int) RENAME TO sum2;
@ -407,8 +407,8 @@ WHERE proname IN ('eq', 'sum2', 'my_rank');
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"{""(functionuser,function_tests,eq)"",""(functionuser,function_tests,my_rank)"",""(functionuser,function_tests,sum2)""}")
(localhost,57638,t,"{""(functionuser,function_tests,eq)"",""(functionuser,function_tests,my_rank)"",""(functionuser,function_tests,sum2)""}")
(<host>,xxxxx,t,"{""(functionuser,function_tests,eq)"",""(functionuser,function_tests,my_rank)"",""(functionuser,function_tests,sum2)""}")
(<host>,xxxxx,t,"{""(functionuser,function_tests,eq)"",""(functionuser,function_tests,my_rank)"",""(functionuser,function_tests,sum2)""}")
(2 rows)
-- change the schema of the function and verify the old schema doesn't exist anymore while
@ -423,15 +423,15 @@ SELECT public.verify_function_is_same_on_workers('function_tests2.eq(macaddr,mac
SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('0123456789ab','ba9876543210');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
localhost | 57638 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
<host> | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
<host> | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
(2 rows)
SELECT * FROM run_command_on_workers($$SELECT function_tests2.eq('012345689ab','ba9876543210');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | f
localhost | 57638 | t | f
<host> | xxxxx | t | f
<host> | xxxxx | t | f
(2 rows)
ALTER ROUTINE function_tests2.eq(macaddr,macaddr) SET SCHEMA function_tests;
@ -451,8 +451,8 @@ SELECT public.verify_function_is_same_on_workers('function_tests.eq(macaddr,maca
SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('012345689ab','012345689ab');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | f
localhost | 57638 | t | f
<host> | xxxxx | t | f
<host> | xxxxx | t | f
(2 rows)
-- distributed functions should not be allowed to depend on an extension, also functions
@ -468,8 +468,8 @@ DROP FUNCTION eq(macaddr,macaddr);
SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('0123456789ab','ba9876543210');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
localhost | 57638 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
<host> | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
<host> | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
(2 rows)
-- Test DROP for ROUTINE
@ -489,8 +489,8 @@ DROP ROUTINE eq(macaddr, macaddr);
SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('0123456789ab','ba9876543210');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
localhost | 57638 | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
<host> | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
<host> | xxxxx | f | ERROR: function function_tests.eq(unknown, unknown) does not exist
(2 rows)
DROP AGGREGATE function_tests2.sum2(int);
@ -498,11 +498,11 @@ DROP AGGREGATE function_tests2.sum2(int);
SELECT * FROM run_command_on_workers('SELECT function_tests2.sum2(id) FROM (select 1 id, 2) subq;') ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | f | ERROR: function function_tests2.sum2(integer) does not exist
localhost | 57638 | f | ERROR: function function_tests2.sum2(integer) does not exist
<host> | xxxxx | f | ERROR: function function_tests2.sum2(integer) does not exist
<host> | xxxxx | f | ERROR: function function_tests2.sum2(integer) does not exist
(2 rows)
-- postgres doesn't accept parameter names in the regprocedure input
-- <user> doesn't accept parameter names in the regprocedure input
SELECT create_distributed_function('eq_with_param_names(val1 macaddr, macaddr)', 'val1');
ERROR: invalid type name "val1 macaddr"
-- invalid distribution_arg_name
@ -557,8 +557,8 @@ ROLLBACK;
SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='eq_with_param_names';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- make sure that none of the active and primary nodes hasmetadata
@ -586,8 +586,8 @@ select bool_and(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'p
SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='eq_with_param_names';$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
(2 rows)
-- valid distribution with distribution_arg_name -- case insensitive
@ -703,7 +703,7 @@ WHERE pg_dist_partition.logicalrelid = 'replicated_table_func_test_4'::regclass
-- function with a macaddr8 dist. arg can be colocated with macaddr
-- column of a distributed table. In general, if there is a coercion
-- path, we rely on postgres for implicit coersions, and users for explicit coersions
-- path, we rely on <user> for implicit coersions, and users for explicit coersions
-- to coerce the values
SELECT create_distributed_function('eq8(macaddr8, macaddr8)', '$1', colocate_with:='replicated_table_func_test_4');
create_distributed_function
@ -837,7 +837,7 @@ DROP USER functionuser;
SELECT run_command_on_workers($$DROP USER functionuser$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(2 rows)

View File

@ -4,8 +4,8 @@ CREATE SCHEMA proc_conflict;
SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE SCHEMA")
(localhost,57638,t,"CREATE SCHEMA")
(<host>,xxxxx,t,"CREATE SCHEMA")
(<host>,xxxxx,t,"CREATE SCHEMA")
(2 rows)
\c - - - :worker_1_port

View File

@ -63,19 +63,19 @@ CREATE TABLE distributed_result_info AS
SELECT * FROM distributed_result_info ORDER BY resultId;
resultid | nodeport | rowcount | targetshardid | targetshardindex
---------------------------------------------------------------------
test_from_4213581_to_0 | 57637 | 33 | 4213584 | 0
test_from_4213582_to_0 | 57638 | 16 | 4213584 | 0
test_from_4213582_to_1 | 57638 | 15 | 4213585 | 1
test_from_4213583_to_1 | 57637 | 36 | 4213585 | 1
test_from_4213581_to_0 | xxxxx | 33 | 4213584 | 0
test_from_4213582_to_0 | xxxxx | 16 | 4213584 | 0
test_from_4213582_to_1 | xxxxx | 15 | 4213585 | 1
test_from_4213583_to_1 | xxxxx | 36 | 4213585 | 1
(4 rows)
-- fetch from workers
SELECT nodeport, fetch_intermediate_results((array_agg(resultId)), 'localhost', nodeport) > 0 AS fetched
SELECT nodeport, fetch_intermediate_results((array_agg(resultId)), '<host>', nodeport) > 0 AS fetched
FROM distributed_result_info GROUP BY nodeport ORDER BY nodeport;
nodeport | fetched
---------------------------------------------------------------------
57637 | t
57638 | t
xxxxx | t
xxxxx | t
(2 rows)
-- read all fetched result files
@ -169,31 +169,31 @@ CREATE TABLE distributed_result_info AS
SELECT * FROM distributed_result_info ORDER BY resultId;
resultid | nodeport | rowcount | targetshardid | targetshardindex
---------------------------------------------------------------------
test_from_4213588_to_0 | 57638 | 7 | 4213592 | 0
test_from_4213588_to_1 | 57638 | 6 | 4213593 | 1
test_from_4213588_to_2 | 57638 | 7 | 4213594 | 2
test_from_4213588_to_3 | 57638 | 4 | 4213595 | 3
test_from_4213589_to_0 | 57637 | 7 | 4213592 | 0
test_from_4213589_to_1 | 57637 | 6 | 4213593 | 1
test_from_4213589_to_2 | 57637 | 8 | 4213594 | 2
test_from_4213589_to_3 | 57637 | 4 | 4213595 | 3
test_from_4213590_to_0 | 57638 | 8 | 4213592 | 0
test_from_4213590_to_1 | 57638 | 6 | 4213593 | 1
test_from_4213590_to_2 | 57638 | 8 | 4213594 | 2
test_from_4213590_to_3 | 57638 | 4 | 4213595 | 3
test_from_4213591_to_0 | 57637 | 8 | 4213592 | 0
test_from_4213591_to_1 | 57637 | 6 | 4213593 | 1
test_from_4213591_to_2 | 57637 | 7 | 4213594 | 2
test_from_4213591_to_3 | 57637 | 4 | 4213595 | 3
test_from_4213588_to_0 | xxxxx | 7 | 4213592 | 0
test_from_4213588_to_1 | xxxxx | 6 | 4213593 | 1
test_from_4213588_to_2 | xxxxx | 7 | 4213594 | 2
test_from_4213588_to_3 | xxxxx | 4 | 4213595 | 3
test_from_4213589_to_0 | xxxxx | 7 | 4213592 | 0
test_from_4213589_to_1 | xxxxx | 6 | 4213593 | 1
test_from_4213589_to_2 | xxxxx | 8 | 4213594 | 2
test_from_4213589_to_3 | xxxxx | 4 | 4213595 | 3
test_from_4213590_to_0 | xxxxx | 8 | 4213592 | 0
test_from_4213590_to_1 | xxxxx | 6 | 4213593 | 1
test_from_4213590_to_2 | xxxxx | 8 | 4213594 | 2
test_from_4213590_to_3 | xxxxx | 4 | 4213595 | 3
test_from_4213591_to_0 | xxxxx | 8 | 4213592 | 0
test_from_4213591_to_1 | xxxxx | 6 | 4213593 | 1
test_from_4213591_to_2 | xxxxx | 7 | 4213594 | 2
test_from_4213591_to_3 | xxxxx | 4 | 4213595 | 3
(16 rows)
-- fetch from workers
SELECT nodeport, fetch_intermediate_results((array_agg(resultId)), 'localhost', nodeport) > 0 AS fetched
SELECT nodeport, fetch_intermediate_results((array_agg(resultId)), '<host>', nodeport) > 0 AS fetched
FROM distributed_result_info GROUP BY nodeport ORDER BY nodeport;
nodeport | fetched
---------------------------------------------------------------------
57637 | t
57638 | t
xxxxx | t
xxxxx | t
(2 rows)
-- Read all fetched result files. Sum(x) should be 4550, verified by

View File

@ -5,8 +5,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
SELECT run_command_on_workers($$CREATE USER procedureuser;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(2 rows)
CREATE SCHEMA procedure_tests AUTHORIZATION procedureuser;
@ -58,8 +58,8 @@ SELECT wait_until_metadata_sync();
SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | CALL
localhost | 57638 | t | CALL
<host> | xxxxx | t | CALL
<host> | xxxxx | t | CALL
(2 rows)
SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info(text)');
@ -125,15 +125,15 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests.raise_info2(te
SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
<host> | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
<host> | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
(2 rows)
SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info2('hello');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | CALL
localhost | 57638 | t | CALL
<host> | xxxxx | t | CALL
<host> | xxxxx | t | CALL
(2 rows)
ALTER PROCEDURE raise_info2(text) RENAME TO raise_info;
@ -154,8 +154,8 @@ WHERE proname = 'raise_info';
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(procedureuser,procedure_tests,raise_info)")
(localhost,57638,t,"(procedureuser,procedure_tests,raise_info)")
(<host>,xxxxx,t,"(procedureuser,procedure_tests,raise_info)")
(<host>,xxxxx,t,"(procedureuser,procedure_tests,raise_info)")
(2 rows)
-- change the schema of the procedure and verify the old schema doesn't exist anymore while
@ -170,15 +170,15 @@ SELECT public.verify_function_is_same_on_workers('procedure_tests2.raise_info(te
SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
<host> | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
<host> | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
(2 rows)
SELECT * FROM run_command_on_workers($$CALL procedure_tests2.raise_info('hello');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | CALL
localhost | 57638 | t | CALL
<host> | xxxxx | t | CALL
<host> | xxxxx | t | CALL
(2 rows)
ALTER PROCEDURE procedure_tests2.raise_info(text) SET SCHEMA procedure_tests;
@ -187,8 +187,8 @@ DROP PROCEDURE raise_info(text);
SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello');$$) ORDER BY 1,2;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
localhost | 57638 | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
<host> | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
<host> | xxxxx | f | ERROR: procedure procedure_tests.raise_info(unknown) does not exist
(2 rows)
SET client_min_messages TO error; -- suppress cascading objects dropping
@ -196,23 +196,23 @@ DROP SCHEMA procedure_tests CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(2 rows)
DROP SCHEMA procedure_tests2 CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(2 rows)
DROP USER procedureuser;
SELECT run_command_on_workers($$DROP USER procedureuser;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(2 rows)

View File

@ -5,8 +5,8 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
SELECT run_command_on_workers($$CREATE USER typeuser;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(2 rows)
CREATE SCHEMA type_tests AUTHORIZATION typeuser;
@ -110,8 +110,8 @@ SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'type_tests.te2'::regtype;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"yes,no")
(localhost,57638,t,"yes,no")
(<host>,xxxxx,t,"yes,no")
(<host>,xxxxx,t,"yes,no")
(2 rows)
-- test some combination of types without ddl propagation, this will prevent the workers
@ -160,8 +160,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'te4' and typowner
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te4' and typowner = usesysid;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(te4,typeuser)")
(localhost,57638,t,"(te4,typeuser)")
(<host>,xxxxx,t,"(te4,typeuser)")
(<host>,xxxxx,t,"(te4,typeuser)")
(2 rows)
ALTER TYPE tc6 OWNER TO typeuser;
@ -174,8 +174,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'tc6' and typowner
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc6' and typowner = usesysid;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(tc6,typeuser)")
(localhost,57638,t,"(tc6,typeuser)")
(<host>,xxxxx,t,"(tc6,typeuser)")
(<host>,xxxxx,t,"(tc6,typeuser)")
(2 rows)
-- create a type as a different user
@ -206,8 +206,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'tc7' and typowner
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc7' and typowner = usesysid;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(tc7,typeuser)")
(localhost,57638,t,"(tc7,typeuser)")
(<host>,xxxxx,t,"(tc7,typeuser)")
(<host>,xxxxx,t,"(tc7,typeuser)")
(2 rows)
SELECT typname, usename FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid;
@ -219,8 +219,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'te5' and typowner
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te5' and typowner = usesysid;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(te5,typeuser)")
(localhost,57638,t,"(te5,typeuser)")
(<host>,xxxxx,t,"(te5,typeuser)")
(<host>,xxxxx,t,"(te5,typeuser)")
(2 rows)
SELECT typname, usename FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid;
@ -232,8 +232,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'tc8' and typowner
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'tc8' and typowner = usesysid;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(tc8,typeuser)")
(localhost,57638,t,"(tc8,typeuser)")
(<host>,xxxxx,t,"(tc8,typeuser)")
(<host>,xxxxx,t,"(tc8,typeuser)")
(2 rows)
SELECT typname, usename FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid;
@ -245,8 +245,8 @@ SELECT typname, usename FROM pg_type, pg_user where typname = 'te6' and typowner
SELECT run_command_on_workers($$SELECT row(typname, usename) FROM pg_type, pg_user where typname = 'te6' and typowner = usesysid;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"(te6,typeuser)")
(localhost,57638,t,"(te6,typeuser)")
(<host>,xxxxx,t,"(te6,typeuser)")
(<host>,xxxxx,t,"(te6,typeuser)")
(2 rows)
-- deleting the enum cascade will remove the type from the table and the workers
@ -264,8 +264,8 @@ SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5')
SELECT run_command_on_workers($$SELECT typname FROM pg_type, pg_user where typname IN ('te3','tc3','tc4','tc5') and typowner = usesysid ORDER BY typname;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
(2 rows)
-- make sure attribute names are quoted correctly, no errors indicates types are propagated correctly
@ -338,8 +338,8 @@ SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'f
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- verify they are still distributed when required
@ -359,8 +359,8 @@ SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'f
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname IN ('feature_flag_composite_type', 'feature_flag_enum_type');$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
(2 rows)
RESET citus.enable_create_type_propagation;
@ -370,23 +370,23 @@ DROP SCHEMA type_tests CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(2 rows)
DROP SCHEMA type_tests2 CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(2 rows)
DROP USER typeuser;
SELECT run_command_on_workers($$DROP USER typeuser;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(2 rows)

View File

@ -3,8 +3,8 @@ CREATE SCHEMA type_conflict;
SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE SCHEMA")
(localhost,57638,t,"CREATE SCHEMA")
(<host>,xxxxx,t,"CREATE SCHEMA")
(<host>,xxxxx,t,"CREATE SCHEMA")
(2 rows)
-- create a type on a worker that should not cause data loss once overwritten with a type

View File

@ -40,8 +40,8 @@ SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"yes,no")
(localhost,57638,t,"yes,no")
(<host>,xxxxx,t,"yes,no")
(<host>,xxxxx,t,"yes,no")
(2 rows)
BEGIN;
@ -57,8 +57,8 @@ SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"yes,no,maybe")
(localhost,57638,t,"yes,no,maybe")
(<host>,xxxxx,t,"yes,no,maybe")
(<host>,xxxxx,t,"yes,no,maybe")
(2 rows)
-- clear objects
@ -67,7 +67,7 @@ DROP SCHEMA xact_enum_type CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(2 rows)

View File

@ -41,8 +41,8 @@ SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"yes,no")
(localhost,57638,t,"yes,no")
(<host>,xxxxx,t,"yes,no")
(<host>,xxxxx,t,"yes,no")
(2 rows)
BEGIN;
@ -59,8 +59,8 @@ SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE
SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumsortorder ASC) FROM pg_enum WHERE enumtypid = 'xact_enum_type.xact_enum_edit'::regtype;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"yes,no")
(localhost,57638,t,"yes,no")
(<host>,xxxxx,t,"yes,no")
(<host>,xxxxx,t,"yes,no")
(2 rows)
-- clear objects
@ -69,7 +69,7 @@ DROP SCHEMA xact_enum_type CASCADE;
SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP SCHEMA")
(localhost,57638,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(2 rows)

View File

@ -16,8 +16,8 @@ WHERE name = 'uuid-ossp'
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
(2 rows)
SET client_min_messages TO WARNING;
@ -27,12 +27,12 @@ RESET client_min_messages;
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- show that extension recreation on new nodes works also fine with extension names that require escaping
SELECT 1 from master_remove_node('localhost', :worker_2_port);
SELECT 1 from master_remove_node('<host>', :worker_2_port);
?column?
---------------------------------------------------------------------
1
@ -49,7 +49,7 @@ WHERE name = 'uuid-ossp'
\gset
:uuid_present_command;
-- and add the other node
SELECT 1 from master_add_node('localhost', :worker_2_port);
SELECT 1 from master_add_node('<host>', :worker_2_port);
?column?
---------------------------------------------------------------------
1
@ -59,8 +59,8 @@ SELECT 1 from master_add_node('localhost', :worker_2_port);
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
(2 rows)
SET client_min_messages TO WARNING;

View File

@ -21,8 +21,8 @@ WHERE name = 'uuid-ossp'
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
(2 rows)
SET client_min_messages TO WARNING;
@ -33,12 +33,12 @@ RESET client_min_messages;
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- show that extension recreation on new nodes works also fine with extension names that require escaping
SELECT 1 from master_remove_node('localhost', :worker_2_port);
SELECT 1 from master_remove_node('<host>', :worker_2_port);
?column?
---------------------------------------------------------------------
1
@ -60,7 +60,7 @@ WHERE name = 'uuid-ossp'
(1 row)
-- and add the other node
SELECT 1 from master_add_node('localhost', :worker_2_port);
SELECT 1 from master_add_node('<host>', :worker_2_port);
?column?
---------------------------------------------------------------------
1
@ -70,8 +70,8 @@ SELECT 1 from master_add_node('localhost', :worker_2_port);
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
(2 rows)
SET client_min_messages TO WARNING;

View File

@ -35,10 +35,10 @@ SELECT citus.dump_network_traffic();
dump_network_traffic
---------------------------------------------------------------------
(0,coordinator,"[initial message]")
(0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
(0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=<user>)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=<user>)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
(0,coordinator,"[""Query(query=SELECT worker_apply_shard_ddl_command (100400, 'CREATE TABLE public.copy_test (key integer, value integer)'))""]")
(0,worker,"[""RowDescription(fieldcount=1,fields=['F(name=worker_apply_shard_ddl_command,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']")
(0,coordinator,"[""Query(query=SELECT worker_apply_shard_ddl_command (100400, 'ALTER TABLE public.copy_test OWNER TO postgres'))""]")
(0,coordinator,"[""Query(query=SELECT worker_apply_shard_ddl_command (100400, 'ALTER TABLE public.copy_test OWNER TO <user>'))""]")
(0,worker,"[""RowDescription(fieldcount=1,fields=['F(name=worker_apply_shard_ddl_command,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']")
(0,coordinator,"[""Query(query=BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(0, XX, 'XXXX-XX-XX XX:XX:XX.XXXXXX-XX');)""]")
(0,worker,"['CommandComplete(command=BEGIN)', ""RowDescription(fieldcount=1,fields=['F(name=assign_distributed_transaction_id,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=in_transaction_block)']")
@ -74,14 +74,14 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
ORDER BY placementid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | xxxxx | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | 9060 | 101
(2 rows)
SELECT count(1) FROM copy_test;
@ -99,15 +99,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id").
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
ERROR: failure on connection marked as essential: <host>:xxxxx
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
ORDER BY placementid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | xxxxx | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | 9060 | 101
(2 rows)
SELECT count(1) FROM copy_test;
@ -127,14 +127,14 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
ORDER BY placementid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | xxxxx | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | 9060 | 101
(2 rows)
SELECT count(1) FROM copy_test;
@ -151,14 +151,14 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()');
(1 row)
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
ORDER BY placementid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | xxxxx | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | 9060 | 101
(2 rows)
SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()');
@ -168,7 +168,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT|COPY").kill()');
(1 row)
SELECT count(1) FROM copy_test;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -191,8 +191,8 @@ SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
ORDER BY placementid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | xxxxx | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | 9060 | 101
(2 rows)
SELECT count(1) FROM copy_test;
@ -208,17 +208,17 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W
WARNING: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
ERROR: failure on connection marked as essential: <host>:xxxxx
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
ORDER BY placementid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | xxxxx | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | 9060 | 101
(2 rows)
SELECT count(1) FROM copy_test;
@ -238,17 +238,17 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W
WARNING: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
ERROR: failure on connection marked as essential: <host>:xxxxx
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
ORDER BY placementid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | xxxxx | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | 9060 | 101
(2 rows)
SELECT count(1) FROM copy_test;
@ -266,19 +266,19 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: failed to commit transaction on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT * FROM pg_dist_shard s, pg_dist_shard_placement p
WHERE (s.shardid = p.shardid) AND s.logicalrelid = 'copy_test'::regclass
ORDER BY placementid;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue | shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 57637 | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | localhost | 9060 | 101
copy_test | 100408 | t | 0 | 3 | 100408 | 1 | 8192 | localhost | 57637 | 112
copy_test | 100408 | t | 0 | 3 | 100408 | 3 | 8192 | localhost | 9060 | 113
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | xxxxx | 100
copy_test | 100400 | t | 0 | 3 | 100400 | 1 | 8192 | <host> | 9060 | 101
copy_test | 100408 | t | 0 | 3 | 100408 | 1 | 8192 | <host> | xxxxx | 112
copy_test | 100408 | t | 0 | 3 | 100408 | 3 | 8192 | <host> | 9060 | 113
(4 rows)
SELECT count(1) FROM copy_test;

View File

@ -36,7 +36,7 @@ SELECT citus.dump_network_traffic();
dump_network_traffic
---------------------------------------------------------------------
(0,coordinator,"[initial message]")
(0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
(0,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=<user>)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=<user>)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
(0,coordinator,"[""Query(query=BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(0, XX, 'XXXX-XX-XX XX:XX:XX.XXXXXX-XX');)""]")
(0,worker,"['CommandComplete(command=BEGIN)', ""RowDescription(fieldcount=1,fields=['F(name=assign_distributed_transaction_id,tableoid=0,colattrnum=0,typoid=2278,typlen=4,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=in_transaction_block)']")
(0,coordinator,"['Query(query=COPY public.copy_test_XXXXXX (key, value) FROM STDIN WITH (FORMAT BINARY))']")
@ -46,7 +46,7 @@ SELECT citus.dump_network_traffic();
(0,coordinator,"['Query(query=COMMIT)']")
(0,worker,"['CommandComplete(command=COMMIT)', 'ReadyForQuery(state=idle)']")
(1,coordinator,"[initial message]")
(1,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=postgres)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=postgres)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
(1,worker,"['AuthenticationOk()', 'ParameterStatus(application_name=citus)', 'ParameterStatus(client_encoding=UTF8)', 'ParameterStatus(DateStyle=ISO, MDY)', 'ParameterStatus(integer_datetimes=on)', 'ParameterStatus(IntervalStyle=<user>)', 'ParameterStatus(is_superuser=on)', 'ParameterStatus(server_encoding=UTF8)', 'ParameterStatus(server_version=XXX)', 'ParameterStatus(session_authorization=<user>)', 'ParameterStatus(standard_conforming_strings=on)', 'ParameterStatus(TimeZone=XXX)', 'BackendKeyData(XXX)', 'ReadyForQuery(state=idle)']")
(1,coordinator,"['Query(query=SELECT count(1) AS count FROM public.copy_test_XXXXXX copy_test)']")
(1,worker,"[""RowDescription(fieldcount=1,fields=['F(name=count,tableoid=0,colattrnum=0,typoid=20,typlen=8,typmod=-1,format_code=0)'])"", 'DataRow(columncount=1,columns=[""C(length=0,value=b\\'\\')""])', 'CommandComplete(command=SELECT 1)', 'ReadyForQuery(state=idle)']")
(14 rows)
@ -61,9 +61,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction").kil
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
COPY copy_test, line 1: "0, 0"
ERROR: failure on connection marked as essential: localhost:xxxxx
ERROR: failure on connection marked as essential: <host>:xxxxx
CONTEXT: COPY copy_test, line 1: "0, 0"
-- ==== kill the connection when we try to start the COPY ====
-- the query should abort
@ -77,7 +77,7 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
COPY copy_test, line 1: "0, 0"
-- ==== kill the connection when we first start sending data ====
-- the query should abort
@ -88,7 +88,7 @@ SELECT citus.mitmproxy('conn.onCopyData().killall()'); -- raw rows from the clie
(1 row)
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
-- ==== kill the connection when the worker confirms it's received the data ====
-- the query should abort
SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").killall()');
@ -98,7 +98,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").killall()');
(1 row)
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
-- ==== kill the connection when we try to send COMMIT ====
-- the query should succeed, and the placement should be marked inactive
SELECT citus.mitmproxy('conn.allow()');
@ -129,10 +129,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()');
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: failed to commit transaction on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- the shard is marked invalid
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
@ -196,8 +196,8 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN (
) ORDER BY nodeport, placementid;
shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
100400 | 1 | 0 | localhost | 9060 | 100
100400 | 3 | 0 | localhost | 57637 | 101
100400 | 1 | 0 | <host> | 9060 | 100
100400 | 3 | 0 | <host> | xxxxx | 101
(2 rows)
-- ==== okay, run some tests where there's only one active shard ====
@ -219,7 +219,7 @@ SELECT citus.mitmproxy('conn.killall()');
(1 row)
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -250,9 +250,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="assign_distributed_transaction_id").
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
COPY copy_test, line 1: "0, 0"
ERROR: failure on connection marked as essential: localhost:xxxxx
ERROR: failure on connection marked as essential: <host>:xxxxx
CONTEXT: COPY copy_test, line 1: "0, 0"
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
@ -280,7 +280,7 @@ COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' W
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
COPY copy_test, line 1: "0, 0"
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
@ -305,7 +305,7 @@ SELECT citus.mitmproxy('conn.onCopyData().killall()');
(1 row)
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -330,10 +330,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT$").killall()');
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: failed to commit transaction on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: could not commit transaction for shard xxxxx on any active node
ERROR: could not commit transaction on any active node
SELECT citus.mitmproxy('conn.allow()');
@ -357,8 +357,8 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN (
) ORDER BY nodeport, placementid;
shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
100400 | 1 | 0 | localhost | 9060 | 100
100400 | 3 | 0 | localhost | 57637 | 101
100400 | 1 | 0 | <host> | 9060 | 100
100400 | 3 | 0 | <host> | xxxxx | 101
(2 rows)
-- the COMMIT makes it through but the connection dies before we get a response
@ -370,10 +370,10 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COMMIT").killall()');
COPY copy_test FROM PROGRAM 'echo 0, 0 && echo 1, 1 && echo 2, 4 && echo 3, 9' WITH CSV;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: failed to commit transaction on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: could not commit transaction for shard xxxxx on any active node
ERROR: could not commit transaction on any active node
SELECT citus.mitmproxy('conn.allow()');
@ -387,8 +387,8 @@ SELECT * FROM pg_dist_shard_placement WHERE shardid IN (
) ORDER BY nodeport, placementid;
shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------
100400 | 1 | 0 | localhost | 9060 | 100
100400 | 3 | 0 | localhost | 57637 | 101
100400 | 1 | 0 | <host> | 9060 | 100
100400 | 3 | 0 | <host> | xxxxx | 101
(2 rows)
SELECT * FROM copy_test;

View File

@ -16,8 +16,8 @@ SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 9060
localhost | 57637
<host> | 9060
<host> | xxxxx
(2 rows)
-- verify there are no tables that could prevent add/remove node operations
@ -52,8 +52,8 @@ ORDER BY placementid;
200000 | 1
(2 rows)
SELECT master_disable_node('localhost', :worker_2_proxy_port);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back.
SELECT master_disable_node('<host>', :worker_2_proxy_port);
NOTICE: Node <host>:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('<host>', 9060) to activate this node back.
master_disable_node
---------------------------------------------------------------------
@ -63,7 +63,7 @@ SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 57637
<host> | xxxxx
(1 row)
SELECT shardid, shardstate
@ -82,12 +82,12 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()');
(1 row)
SELECT master_activate_node('localhost', :worker_2_proxy_port);
NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
SELECT master_activate_node('<host>', :worker_2_proxy_port);
NOTICE: Replicating reference table "user_table" to the node <host>:xxxxx
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -99,7 +99,7 @@ SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 57637
<host> | xxxxx
(1 row)
SELECT shardid, shardstate
@ -118,17 +118,17 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA").kill()');
(1 row)
SELECT master_activate_node('localhost', :worker_2_proxy_port);
SELECT master_activate_node('<host>', :worker_2_proxy_port);
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- verify node is not activated
SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 57637
<host> | xxxxx
(1 row)
SELECT shardid, shardstate
@ -147,15 +147,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backen
(1 row)
SELECT master_activate_node('localhost', :worker_2_proxy_port);
NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
SELECT master_activate_node('<host>', :worker_2_proxy_port);
NOTICE: Replicating reference table "user_table" to the node <host>:xxxxx
ERROR: canceling statement due to user request
-- verify node is not activated
SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 57637
<host> | xxxxx
(1 row)
SELECT shardid, shardstate
@ -174,11 +174,11 @@ SELECT citus.mitmproxy('conn.allow()');
(1 row)
-- master_remove_node fails when there are shards on that worker
SELECT master_remove_node('localhost', :worker_2_proxy_port);
SELECT master_remove_node('<host>', :worker_2_proxy_port);
ERROR: you cannot remove the primary node of a node group which has shard placements
-- drop event table and re-run remove
DROP TABLE event_table;
SELECT master_remove_node('localhost', :worker_2_proxy_port);
SELECT master_remove_node('<host>', :worker_2_proxy_port);
master_remove_node
---------------------------------------------------------------------
@ -189,7 +189,7 @@ SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 57637
<host> | xxxxx
(1 row)
SELECT shardid, shardstate
@ -204,13 +204,13 @@ ORDER BY placementid;
-- test master_add_inactive_node
-- it does not create any network activity therefore can not
-- be injected failure through network
SELECT master_add_inactive_node('localhost', :worker_2_proxy_port);
SELECT master_add_inactive_node('<host>', :worker_2_proxy_port);
master_add_inactive_node
---------------------------------------------------------------------
3
(1 row)
SELECT master_remove_node('localhost', :worker_2_proxy_port);
SELECT master_remove_node('<host>', :worker_2_proxy_port);
master_remove_node
---------------------------------------------------------------------
@ -233,18 +233,18 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()');
(1 row)
SELECT master_add_node('localhost', :worker_2_proxy_port);
NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
SELECT master_add_node('<host>', :worker_2_proxy_port);
NOTICE: Replicating reference table "user_table" to the node <host>:xxxxx
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- verify node is not added
SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 57637
<host> | xxxxx
(1 row)
SELECT shardid, shardstate
@ -262,15 +262,15 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backen
(1 row)
SELECT master_add_node('localhost', :worker_2_proxy_port);
NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
SELECT master_add_node('<host>', :worker_2_proxy_port);
NOTICE: Replicating reference table "user_table" to the node <host>:xxxxx
ERROR: canceling statement due to user request
-- verify node is not added
SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 57637
<host> | xxxxx
(1 row)
SELECT shardid, shardstate
@ -289,8 +289,8 @@ SELECT citus.mitmproxy('conn.allow()');
(1 row)
SELECT master_add_node('localhost', :worker_2_proxy_port);
NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
SELECT master_add_node('<host>', :worker_2_proxy_port);
NOTICE: Replicating reference table "user_table" to the node <host>:xxxxx
master_add_node
---------------------------------------------------------------------
6
@ -301,8 +301,8 @@ SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 9060
localhost | 57637
<host> | 9060
<host> | xxxxx
(2 rows)
SELECT shardid, shardstate
@ -316,7 +316,7 @@ ORDER BY placementid;
(2 rows)
-- fail master_add_node by failing copy out operation
SELECT master_remove_node('localhost', :worker_1_port);
SELECT master_remove_node('<host>', :worker_1_port);
master_remove_node
---------------------------------------------------------------------
@ -328,16 +328,16 @@ SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()');
(1 row)
SELECT master_add_node('localhost', :worker_1_port);
NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
ERROR: could not copy table "user_table_200000" from "localhost:xxxxx"
CONTEXT: while executing command on localhost:xxxxx
SELECT master_add_node('<host>', :worker_1_port);
NOTICE: Replicating reference table "user_table" to the node <host>:xxxxx
ERROR: could not copy table "user_table_200000" from "<host>:xxxxx"
CONTEXT: while executing command on <host>:xxxxx
-- verify node is not added
SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 9060
<host> | 9060
(1 row)
SELECT citus.mitmproxy('conn.allow()');
@ -346,8 +346,8 @@ SELECT citus.mitmproxy('conn.allow()');
(1 row)
SELECT master_add_node('localhost', :worker_1_port);
NOTICE: Replicating reference table "user_table" to the node localhost:xxxxx
SELECT master_add_node('<host>', :worker_1_port);
NOTICE: Replicating reference table "user_table" to the node <host>:xxxxx
master_add_node
---------------------------------------------------------------------
8
@ -358,8 +358,8 @@ SELECT * FROM master_get_active_worker_nodes()
ORDER BY 1, 2;
node_name | node_port
---------------------------------------------------------------------
localhost | 9060
localhost | 57637
<host> | 9060
<host> | xxxxx
(2 rows)
SELECT shardid, shardstate
@ -379,7 +379,7 @@ SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASC
ORDER BY nodeport;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 9060 | t | DROP SCHEMA
localhost | 57637 | t | DROP SCHEMA
<host> | 9060 | t | DROP SCHEMA
<host> | xxxxx | t | DROP SCHEMA
(2 rows)

View File

@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.delay(500)');
(1 row)
ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no);
ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms
ERROR: could not establish any connections to the node <host>:xxxxx after 400 ms
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -169,7 +169,7 @@ SELECT citus.mitmproxy('conn.delay(500)');
(1 row)
SELECT count(*) FROM single_replicatated;
ERROR: could not establish any connections to the node localhost:xxxxx after 400 ms
ERROR: could not establish any connections to the node <host>:xxxxx after 400 ms
SET citus.force_max_query_parallelization TO OFF;
-- one similar test, but this time on modification queries
-- to see that connection establishement failures could
@ -224,7 +224,7 @@ RESET client_min_messages;
-- verify get_global_active_transactions works when a timeout happens on a connection
SELECT get_global_active_transactions();
WARNING: could not establish connection after 400 ms
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
get_global_active_transactions
---------------------------------------------------------------------
(0 rows)

View File

@ -36,7 +36,7 @@ SELECT citus.mitmproxy('conn.kill()');
(1 row)
\COPY test_table FROM stdin delimiter ',';
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -69,7 +69,7 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()');
(1 row)
\COPY test_table FROM stdin delimiter ',';
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -124,7 +124,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 1").kill()');
(1 row)
\COPY test_table FROM stdin delimiter ',';
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -179,7 +179,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()');
\COPY test_table FROM stdin delimiter ',';
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -239,7 +239,7 @@ BEGIN;
\COPY test_table FROM stdin delimiter ',';
ROLLBACK;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -276,22 +276,22 @@ SELECT citus.mitmproxy('conn.kill()');
(1 row)
\COPY test_table_2 FROM stdin delimiter ',';
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: COPY test_table_2, line 1: "1,2"
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: COPY test_table_2, line 2: "3,4"
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: COPY test_table_2, line 3: "6,7"
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -341,7 +341,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()');
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
COPY test_table_2, line 1: "1,2"
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
@ -386,7 +386,7 @@ SELECT citus.mitmproxy('conn.onCopyData().kill()');
(1 row)
\COPY test_table_2 FROM stdin delimiter ',';
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------

View File

@ -35,7 +35,7 @@ SELECT citus.mitmproxy('conn.kill()');
(1 row)
\copy test_table FROM STDIN DELIMITER ','
ERROR: failure on connection marked as essential: localhost:xxxxx
ERROR: failure on connection marked as essential: <host>:xxxxx
CONTEXT: COPY test_table, line 1: "1,2"
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
@ -63,7 +63,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
\copy test_table FROM STDIN DELIMITER ','
ERROR: failure on connection marked as essential: localhost:xxxxx
ERROR: failure on connection marked as essential: <host>:xxxxx
CONTEXT: COPY test_table, line 1: "1,2"
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
@ -122,7 +122,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^COPY").kill()');
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
COPY test_table, line 1: "1,2"
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
@ -178,7 +178,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COPY 3").kill()');
(1 row)
\copy test_table FROM STDIN DELIMITER ','
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -236,7 +236,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="FROM STDIN WITH").killall()');
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
COPY test_table, line 1: "1,2"
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
@ -265,7 +265,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()');
\copy test_table FROM STDIN DELIMITER ','
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -396,7 +396,7 @@ SET LOCAL client_min_messages TO WARNING;
\copy test_table FROM STDIN DELIMITER ','
ROLLBACK;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -429,7 +429,7 @@ SET LOCAL client_min_messages TO WARNING;
\copy test_table FROM STDIN DELIMITER ','
ROLLBACK;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------

View File

@ -26,7 +26,7 @@ SELECT citus.mitmproxy('conn.kill()');
(1 row)
SELECT create_distributed_table('test_table', 'id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -62,7 +62,7 @@ SELECT create_distributed_table('test_table', 'id');
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
count
---------------------------------------------------------------------
@ -72,8 +72,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,1)
(<host>,9060,t,0)
(<host>,xxxxx,t,1)
(2 rows)
-- cancel as soon as the coordinator sends CREATE SCHEMA
@ -97,15 +97,15 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,1)
(<host>,9060,t,0)
(<host>,xxxxx,t,1)
(2 rows)
SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_non_empty_failure$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,"DROP SCHEMA")
(localhost,57637,t,"DROP SCHEMA")
(<host>,9060,t,"DROP SCHEMA")
(<host>,xxxxx,t,"DROP SCHEMA")
(2 rows)
-- this triggers a schema creation which prevents further transactions around dependency propagation
@ -125,7 +125,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
SELECT create_distributed_table('test_table', 'id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -144,8 +144,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,1)
(localhost,57637,t,1)
(<host>,9060,t,1)
(<host>,xxxxx,t,1)
(2 rows)
-- cancel as soon as the coordinator sends begin
@ -175,8 +175,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,1)
(localhost,57637,t,1)
(<host>,9060,t,1)
(<host>,xxxxx,t,1)
(2 rows)
DROP TABLE test_table ;
@ -190,7 +190,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()');
(1 row)
SELECT create_distributed_table('test_table', 'id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -211,7 +211,7 @@ SELECT create_distributed_table('test_table', 'id');
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
count
---------------------------------------------------------------------
@ -227,7 +227,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()');
SELECT create_distributed_table('test_table', 'id');
NOTICE: Copying data from local table...
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
count
---------------------------------------------------------------------
@ -278,7 +278,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()');
SELECT create_distributed_table('test_table', 'id');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
count
---------------------------------------------------------------------
@ -467,7 +467,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -480,8 +480,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Now, cancel the connection just after the COPY started to
@ -514,7 +514,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
count
---------------------------------------------------------------------
@ -524,8 +524,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Now, cancel the connection when we issue CREATE TABLE on
@ -555,7 +555,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_comma
(1 row)
SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -568,8 +568,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Now run the same tests with 1pc
@ -593,7 +593,7 @@ SELECT citus.mitmproxy('conn.kill()');
(1 row)
SELECT create_distributed_table('test_table', 'id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -612,8 +612,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- in the first test, cancel the first connection we sent from the coordinator
@ -640,8 +640,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- this triggers a schema creation which prevents further transactions around dependency propagation
@ -661,7 +661,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
SELECT create_distributed_table('test_table', 'id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -680,8 +680,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,1)
(localhost,57637,t,1)
(<host>,9060,t,1)
(<host>,xxxxx,t,1)
(2 rows)
-- cancel as soon as the coordinator sends begin
@ -711,8 +711,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,1)
(localhost,57637,t,1)
(<host>,9060,t,1)
(<host>,xxxxx,t,1)
(2 rows)
DROP TABLE test_table ;
@ -726,7 +726,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()');
(1 row)
SELECT create_distributed_table('test_table', 'id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -747,7 +747,7 @@ SELECT create_distributed_table('test_table', 'id');
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
count
---------------------------------------------------------------------
@ -762,7 +762,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY").kill()');
(1 row)
SELECT create_distributed_table('test_table', 'id');
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
count
---------------------------------------------------------------------
@ -940,7 +940,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -980,7 +980,7 @@ SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass;
count
---------------------------------------------------------------------
@ -990,8 +990,8 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'create_distributed_table_non_empty_failure' and table_name LIKE 'test_table%'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
SELECT citus.mitmproxy('conn.allow()');

View File

@ -40,7 +40,7 @@ SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE ind
WHERE nodeport = :worker_2_proxy_port;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 9060 | t | 0
<host> | 9060 | t | 0
(1 row)
DROP TABLE index_test;
@ -151,7 +151,7 @@ SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE ind
WHERE nodeport = :worker_2_proxy_port;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 9060 | t | 4
<host> | 9060 | t | 4
(1 row)
RESET SEARCH_PATH;
@ -162,6 +162,6 @@ SELECT * FROM run_command_on_workers($$SELECT count(*) FROM pg_indexes WHERE ind
WHERE nodeport = :worker_2_proxy_port;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 9060 | t | 0
<host> | 9060 | t | 0
(1 row)

View File

@ -25,7 +25,7 @@ SELECT citus.mitmproxy('conn.onQuery().kill()');
(1 row)
SELECT create_reference_table('ref_table');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -43,7 +43,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="BEGIN").kill()');
(1 row)
SELECT create_reference_table('ref_table');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -76,7 +76,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="SELECT 1").kill()');
(1 row)
SELECT create_reference_table('ref_table');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -110,7 +110,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="COPY 3").kill()');
SELECT create_reference_table('ref_table');
NOTICE: Copying data from local table...
ERROR: failed to COPY to shard xxxxx on localhost:xxxxx
ERROR: failed to COPY to shard xxxxx on <host>:xxxxx
SELECT count(*) FROM pg_dist_shard_placement;
count
---------------------------------------------------------------------
@ -145,7 +145,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki
SELECT create_reference_table('ref_table');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT count(*) FROM pg_dist_shard_placement;
count
---------------------------------------------------------------------
@ -175,7 +175,7 @@ SELECT shardid, nodeport, shardstate FROM pg_dist_shard_placement ORDER BY shard
shardid | nodeport | shardstate
---------------------------------------------------------------------
10000008 | 9060 | 1
10000008 | 57637 | 1
10000008 | xxxxx | 1
(2 rows)
SET client_min_messages TO NOTICE;
@ -202,8 +202,8 @@ SELECT create_reference_table('ref_table');
WARNING: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
ERROR: failure on connection marked as essential: localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
ERROR: failure on connection marked as essential: <host>:xxxxx
COMMIT;
-- kill on ROLLBACK, should be rollbacked
SELECT citus.mitmproxy('conn.onQuery(query="^ROLLBACK").kill()');
@ -222,7 +222,7 @@ NOTICE: Copying data from local table...
ROLLBACK;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodeport;
shardid | shardstate | shardlength | nodename | nodeport | placementid
---------------------------------------------------------------------

View File

@ -20,7 +20,7 @@ SELECT citus.mitmproxy('conn.kill()');
(1 row)
SELECT create_distributed_table('test_table','id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -39,8 +39,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- kill as soon as the coordinator sends CREATE SCHEMA
@ -57,7 +57,7 @@ SELECT create_distributed_table('test_table', 'id');
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -73,8 +73,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'failure_create_table'$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,1)
(<host>,9060,t,0)
(<host>,xxxxx,t,1)
(2 rows)
-- this is merely used to get the schema creation propagated. Without there are failures
@ -89,7 +89,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
SELECT create_distributed_table('test_table','id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -108,8 +108,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Now, kill the connection after sending create table command with worker_apply_shard_ddl_command UDF
@ -120,7 +120,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_comman
(1 row)
SELECT create_distributed_table('test_table','id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -139,8 +139,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Kill the connection while creating a distributed table in sequential mode on sending create command
@ -154,7 +154,7 @@ BEGIN;
(1 row)
SELECT create_distributed_table('test_table', 'id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -174,8 +174,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Now, cancel the connection while creating transaction
@ -204,8 +204,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
DROP TABLE test_table;
@ -225,7 +225,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()');
(1 row)
SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -244,8 +244,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").cancel(' || pg_backend_pid() || ')');
@ -271,8 +271,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Kill and cancel the connection after worker sends "PREPARE TRANSACTION" ack with colocate_with option
@ -284,7 +284,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki
SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table');
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -300,8 +300,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')');
@ -327,8 +327,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- drop tables and schema and recreate to start from a non-distributed schema again
@ -347,7 +347,7 @@ SELECT citus.mitmproxy('conn.kill()');
BEGIN;
SELECT create_distributed_table('test_table','id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -367,8 +367,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- this is merely used to get the schema creation propagated. Without there are failures
@ -384,7 +384,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
BEGIN;
SELECT create_distributed_table('test_table','id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -404,8 +404,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Now, cancel the connection while creating the transaction on
@ -443,8 +443,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- drop tables and schema and recreate to start from a non-distributed schema again
@ -463,7 +463,7 @@ SELECT citus.mitmproxy('conn.kill()');
BEGIN;
SELECT create_distributed_table('test_table','id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -483,8 +483,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Kill connection while sending create table command with 1pc.
@ -496,7 +496,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()');
BEGIN;
SELECT create_distributed_table('test_table','id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -516,8 +516,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- this is merely used to get the schema creation propagated. Without there are failures
@ -533,7 +533,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
BEGIN;
SELECT create_distributed_table('test_table','id');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -553,8 +553,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Now, cancel the connection while creating transactions on
@ -591,8 +591,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
DROP TABLE test_table;
@ -615,7 +615,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
SELECT master_create_worker_shards('test_table_2', 4, 2);
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -640,8 +640,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Kill the connection after worker sends "PREPARE TRANSACTION" ack
@ -653,7 +653,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").k
SELECT master_create_worker_shards('test_table_2', 4, 2);
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -669,8 +669,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
-- Cancel the connection after sending prepare transaction in master_create_worker_shards
@ -704,8 +704,8 @@ SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(<host>,9060,t,0)
(<host>,xxxxx,t,0)
(2 rows)
DROP SCHEMA failure_create_table CASCADE;

View File

@ -57,7 +57,7 @@ FROM
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- kill at the second copy (pull)
SELECT citus.mitmproxy('conn.onQuery(query="SELECT user_id FROM cte_failure.events_table_16000002").kill()');
mitmproxy
@ -88,7 +88,7 @@ FROM
ORDER BY 1 DESC LIMIT 5
) as foo
WHERE foo.user_id = cte.user_id;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -122,7 +122,7 @@ FROM
ORDER BY 1 DESC LIMIT 5
) as foo
WHERE foo.user_id = cte.user_id;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -262,7 +262,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()');
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
INSERT INTO users_table SELECT * FROM cte_delete;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -295,7 +295,7 @@ INSERT INTO users_table SELECT * FROM cte_delete;
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- verify contents are the same
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
@ -378,7 +378,7 @@ BEGIN;
SET LOCAL citus.multi_shard_modify_mode = 'sequential';
WITH cte_delete as (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *)
INSERT INTO users_table SELECT * FROM cte_delete;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -37,7 +37,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()');
(1 row)
ALTER TABLE test_table ADD COLUMN new_column INT;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -71,7 +71,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
ALTER TABLE test_table ADD COLUMN new_column INT;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -104,7 +104,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil
(1 row)
ALTER TABLE test_table ADD COLUMN new_column INT;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -159,10 +159,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,value}")
(localhost,9060,100802,t,"{key,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,value}")
(<host>,9060,100802,t,"{key,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- manually drop & re-create the table for the next tests
@ -203,10 +203,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,new_column,value}")
(localhost,9060,100802,t,"{key,new_column,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,new_column,value}")
(<host>,9060,100802,t,"{key,new_column,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- the following tests rely the column not exists, so drop manually
@ -223,15 +223,15 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()');
ALTER TABLE test_table ADD COLUMN new_column INT;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: failed to commit transaction on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: failed to commit transaction on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: could not commit transaction for shard xxxxx on any active node
WARNING: could not commit transaction for shard xxxxx on any active node
SELECT citus.mitmproxy('conn.allow()');
@ -250,10 +250,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,new_column,value}")
(localhost,9060,100802,t,"{key,new_column,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,new_column,value}")
(<host>,9060,100802,t,"{key,new_column,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- now cancel just after the worker sends response to
@ -289,9 +289,9 @@ SET LOCAL client_min_messages TO WARNING;
ALTER TABLE test_table DROP COLUMN new_column;
ROLLBACK;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- now cancel just after the worker sends response to
-- but Postgres doesn't accepts interrupts during COMMIT and ROLLBACK
-- so should not cancel at all, so not an effective test but adding in
@ -332,10 +332,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,new_column,value}")
(localhost,9060,100802,t,"{key,new_column,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,new_column,value}")
(<host>,9060,100802,t,"{key,new_column,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- now, lets test with 2PC
@ -349,7 +349,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()');
(1 row)
ALTER TABLE test_table DROP COLUMN new_column;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -383,7 +383,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
ALTER TABLE test_table DROP COLUMN new_column;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -416,7 +416,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil
(1 row)
ALTER TABLE test_table DROP COLUMN new_column;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -450,7 +450,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki
ALTER TABLE test_table DROP COLUMN new_column;
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -466,10 +466,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,new_column,value}")
(localhost,9060,100802,t,"{key,new_column,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,new_column,value}")
(<host>,9060,100802,t,"{key,new_column,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- we should be able to recover the transaction and
@ -483,10 +483,10 @@ SELECT recover_prepared_transactions();
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,new_column,value}")
(localhost,9060,100802,t,"{key,new_column,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,new_column,value}")
(<host>,9060,100802,t,"{key,new_column,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- cancelling on PREPARE should be fine, everything should be rollbacked
@ -513,10 +513,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,new_column,value}")
(localhost,9060,100802,t,"{key,new_column,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,new_column,value}")
(<host>,9060,100802,t,"{key,new_column,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- we should be able to recover the transaction and
@ -530,10 +530,10 @@ SELECT recover_prepared_transactions();
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,new_column,value}")
(localhost,9060,100802,t,"{key,new_column,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,new_column,value}")
(<host>,9060,100802,t,"{key,new_column,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- killing on command complete of COMMIT PREPARE, we should see that the command succeeds
@ -560,10 +560,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,value}")
(localhost,9060,100802,t,"{key,value}")
(localhost,57637,100801,t,"{key,value}")
(localhost,57637,100803,t,"{key,value}")
(<host>,9060,100800,t,"{key,value}")
(<host>,9060,100802,t,"{key,value}")
(<host>,xxxxx,100801,t,"{key,value}")
(<host>,xxxxx,100803,t,"{key,value}")
(4 rows)
-- we shouldn't have any prepared transactions in the workers
@ -576,10 +576,10 @@ SELECT recover_prepared_transactions();
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,value}")
(localhost,9060,100802,t,"{key,value}")
(localhost,57637,100801,t,"{key,value}")
(localhost,57637,100803,t,"{key,value}")
(<host>,9060,100800,t,"{key,value}")
(<host>,9060,100802,t,"{key,value}")
(<host>,xxxxx,100801,t,"{key,value}")
(<host>,xxxxx,100803,t,"{key,value}")
(4 rows)
-- kill as soon as the coordinator sends COMMIT
@ -607,10 +607,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,value}")
(localhost,9060,100802,t,"{key,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,value}")
(<host>,9060,100802,t,"{key,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- we should be able to recover the transaction and
@ -624,10 +624,10 @@ SELECT recover_prepared_transactions();
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,new_column,value}")
(localhost,9060,100802,t,"{key,new_column,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,new_column,value}")
(<host>,9060,100802,t,"{key,new_column,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- finally, test failing on ROLLBACK with 2PC
@ -658,10 +658,10 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,new_column,value}")
(localhost,9060,100802,t,"{key,new_column,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,new_column,value}")
(<host>,9060,100802,t,"{key,new_column,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- but now kill just after the worker sends response to
@ -692,10 +692,10 @@ SELECT recover_prepared_transactions();
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,new_column,value}")
(localhost,9060,100802,t,"{key,new_column,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(<host>,9060,100800,t,"{key,new_column,value}")
(<host>,9060,100802,t,"{key,new_column,value}")
(<host>,xxxxx,100801,t,"{key,new_column,value}")
(<host>,xxxxx,100803,t,"{key,new_column,value}")
(4 rows)
-- another set of tests with 2PC and replication factor = 2
@ -720,7 +720,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()');
(1 row)
ALTER TABLE test_table ADD COLUMN new_column INT;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -754,7 +754,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
ALTER TABLE test_table ADD COLUMN new_column INT;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -787,7 +787,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil
(1 row)
ALTER TABLE test_table ADD COLUMN new_column INT;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -821,7 +821,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki
ALTER TABLE test_table ADD COLUMN new_column INT;
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -842,14 +842,14 @@ SELECT recover_prepared_transactions();
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100804,t,"{key,value}")
(localhost,9060,100805,t,"{key,value}")
(localhost,9060,100806,t,"{key,value}")
(localhost,9060,100807,t,"{key,value}")
(localhost,57637,100804,t,"{key,value}")
(localhost,57637,100805,t,"{key,value}")
(localhost,57637,100806,t,"{key,value}")
(localhost,57637,100807,t,"{key,value}")
(<host>,9060,100804,t,"{key,value}")
(<host>,9060,100805,t,"{key,value}")
(<host>,9060,100806,t,"{key,value}")
(<host>,9060,100807,t,"{key,value}")
(<host>,xxxxx,100804,t,"{key,value}")
(<host>,xxxxx,100805,t,"{key,value}")
(<host>,xxxxx,100806,t,"{key,value}")
(<host>,xxxxx,100807,t,"{key,value}")
(8 rows)
-- killing on command complete of COMMIT PREPARE, we should see that the command succeeds
@ -876,14 +876,14 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100804,t,"{key,new_column,value}")
(localhost,9060,100805,t,"{key,new_column,value}")
(localhost,9060,100806,t,"{key,new_column,value}")
(localhost,9060,100807,t,"{key,new_column,value}")
(localhost,57637,100804,t,"{key,new_column,value}")
(localhost,57637,100805,t,"{key,new_column,value}")
(localhost,57637,100806,t,"{key,new_column,value}")
(localhost,57637,100807,t,"{key,new_column,value}")
(<host>,9060,100804,t,"{key,new_column,value}")
(<host>,9060,100805,t,"{key,new_column,value}")
(<host>,9060,100806,t,"{key,new_column,value}")
(<host>,9060,100807,t,"{key,new_column,value}")
(<host>,xxxxx,100804,t,"{key,new_column,value}")
(<host>,xxxxx,100805,t,"{key,new_column,value}")
(<host>,xxxxx,100806,t,"{key,new_column,value}")
(<host>,xxxxx,100807,t,"{key,new_column,value}")
(8 rows)
-- we shouldn't have any prepared transactions in the workers
@ -896,14 +896,14 @@ SELECT recover_prepared_transactions();
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100804,t,"{key,new_column,value}")
(localhost,9060,100805,t,"{key,new_column,value}")
(localhost,9060,100806,t,"{key,new_column,value}")
(localhost,9060,100807,t,"{key,new_column,value}")
(localhost,57637,100804,t,"{key,new_column,value}")
(localhost,57637,100805,t,"{key,new_column,value}")
(localhost,57637,100806,t,"{key,new_column,value}")
(localhost,57637,100807,t,"{key,new_column,value}")
(<host>,9060,100804,t,"{key,new_column,value}")
(<host>,9060,100805,t,"{key,new_column,value}")
(<host>,9060,100806,t,"{key,new_column,value}")
(<host>,9060,100807,t,"{key,new_column,value}")
(<host>,xxxxx,100804,t,"{key,new_column,value}")
(<host>,xxxxx,100805,t,"{key,new_column,value}")
(<host>,xxxxx,100806,t,"{key,new_column,value}")
(<host>,xxxxx,100807,t,"{key,new_column,value}")
(8 rows)
-- kill as soon as the coordinator sends COMMIT
@ -931,14 +931,14 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100804,t,"{key,new_column,value}")
(localhost,9060,100805,t,"{key,new_column,value}")
(localhost,9060,100806,t,"{key,new_column,value}")
(localhost,9060,100807,t,"{key,new_column,value}")
(localhost,57637,100804,t,"{key,value}")
(localhost,57637,100805,t,"{key,value}")
(localhost,57637,100806,t,"{key,value}")
(localhost,57637,100807,t,"{key,value}")
(<host>,9060,100804,t,"{key,new_column,value}")
(<host>,9060,100805,t,"{key,new_column,value}")
(<host>,9060,100806,t,"{key,new_column,value}")
(<host>,9060,100807,t,"{key,new_column,value}")
(<host>,xxxxx,100804,t,"{key,value}")
(<host>,xxxxx,100805,t,"{key,value}")
(<host>,xxxxx,100806,t,"{key,value}")
(<host>,xxxxx,100807,t,"{key,value}")
(8 rows)
-- we should be able to recover the transaction and
@ -952,14 +952,14 @@ SELECT recover_prepared_transactions();
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100804,t,"{key,value}")
(localhost,9060,100805,t,"{key,value}")
(localhost,9060,100806,t,"{key,value}")
(localhost,9060,100807,t,"{key,value}")
(localhost,57637,100804,t,"{key,value}")
(localhost,57637,100805,t,"{key,value}")
(localhost,57637,100806,t,"{key,value}")
(localhost,57637,100807,t,"{key,value}")
(<host>,9060,100804,t,"{key,value}")
(<host>,9060,100805,t,"{key,value}")
(<host>,9060,100806,t,"{key,value}")
(<host>,9060,100807,t,"{key,value}")
(<host>,xxxxx,100804,t,"{key,value}")
(<host>,xxxxx,100805,t,"{key,value}")
(<host>,xxxxx,100806,t,"{key,value}")
(<host>,xxxxx,100807,t,"{key,value}")
(8 rows)
-- finally, test failing on ROLLBACK with 2PC
@ -990,14 +990,14 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100804,t,"{key,value}")
(localhost,9060,100805,t,"{key,value}")
(localhost,9060,100806,t,"{key,value}")
(localhost,9060,100807,t,"{key,value}")
(localhost,57637,100804,t,"{key,value}")
(localhost,57637,100805,t,"{key,value}")
(localhost,57637,100806,t,"{key,value}")
(localhost,57637,100807,t,"{key,value}")
(<host>,9060,100804,t,"{key,value}")
(<host>,9060,100805,t,"{key,value}")
(<host>,9060,100806,t,"{key,value}")
(<host>,9060,100807,t,"{key,value}")
(<host>,xxxxx,100804,t,"{key,value}")
(<host>,xxxxx,100805,t,"{key,value}")
(<host>,xxxxx,100806,t,"{key,value}")
(<host>,xxxxx,100807,t,"{key,value}")
(8 rows)
-- but now kill just after the worker sends response to
@ -1028,14 +1028,14 @@ SELECT recover_prepared_transactions();
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100804,t,"{key,value}")
(localhost,9060,100805,t,"{key,value}")
(localhost,9060,100806,t,"{key,value}")
(localhost,9060,100807,t,"{key,value}")
(localhost,57637,100804,t,"{key,value}")
(localhost,57637,100805,t,"{key,value}")
(localhost,57637,100806,t,"{key,value}")
(localhost,57637,100807,t,"{key,value}")
(<host>,9060,100804,t,"{key,value}")
(<host>,9060,100805,t,"{key,value}")
(<host>,9060,100806,t,"{key,value}")
(<host>,9060,100807,t,"{key,value}")
(<host>,xxxxx,100804,t,"{key,value}")
(<host>,xxxxx,100805,t,"{key,value}")
(<host>,xxxxx,100806,t,"{key,value}")
(<host>,xxxxx,100807,t,"{key,value}")
(8 rows)
-- now do some tests with sequential mode
@ -1048,7 +1048,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
ALTER TABLE test_table ADD COLUMN new_column INT;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -1081,7 +1081,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil
(1 row)
ALTER TABLE test_table ADD COLUMN new_column INT;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -1093,7 +1093,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").aft
(1 row)
ALTER TABLE test_table ADD COLUMN new_column INT;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -50,16 +50,16 @@ SELECT * FROM distributed_result_info ORDER BY resultId;
resultid | nodeport | rowcount | targetshardid | targetshardindex
---------------------------------------------------------------------
test_from_100800_to_0 | 9060 | 22 | 100805 | 0
test_from_100801_to_0 | 57637 | 2 | 100805 | 0
test_from_100801_to_1 | 57637 | 15 | 100806 | 1
test_from_100801_to_0 | xxxxx | 2 | 100805 | 0
test_from_100801_to_1 | xxxxx | 15 | 100806 | 1
test_from_100802_to_1 | 9060 | 10 | 100806 | 1
test_from_100802_to_2 | 9060 | 5 | 100807 | 2
test_from_100803_to_2 | 57637 | 18 | 100807 | 2
test_from_100803_to_3 | 57637 | 4 | 100808 | 3
test_from_100803_to_2 | xxxxx | 18 | 100807 | 2
test_from_100803_to_3 | xxxxx | 4 | 100808 | 3
test_from_100804_to_3 | 9060 | 24 | 100808 | 3
(8 rows)
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], 'localhost', :worker_2_port) > 0 AS fetched;
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], '<host>', :worker_2_port) > 0 AS fetched;
fetched
---------------------------------------------------------------------
t
@ -73,7 +73,7 @@ SELECT count(*), sum(x) FROM
(1 row)
ROLLBACk;
-- with failure, results from 100802 should be retried and succeed on 57637
-- with failure, results from 100802 should be retried and succeed on xxxxx
SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result.*test_from_100802").kill()');
mitmproxy
---------------------------------------------------------------------
@ -85,7 +85,7 @@ CREATE TABLE distributed_result_info AS
SELECT resultId, nodeport, rowcount, targetShardId, targetShardIndex
FROM partition_task_list_results('test', $$ SELECT * FROM source_table $$, 'target_table')
NATURAL JOIN pg_dist_node;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -93,23 +93,23 @@ SELECT * FROM distributed_result_info ORDER BY resultId;
resultid | nodeport | rowcount | targetshardid | targetshardindex
---------------------------------------------------------------------
test_from_100800_to_0 | 9060 | 22 | 100805 | 0
test_from_100801_to_0 | 57637 | 2 | 100805 | 0
test_from_100801_to_1 | 57637 | 15 | 100806 | 1
test_from_100802_to_1 | 57637 | 10 | 100806 | 1
test_from_100802_to_2 | 57637 | 5 | 100807 | 2
test_from_100803_to_2 | 57637 | 18 | 100807 | 2
test_from_100803_to_3 | 57637 | 4 | 100808 | 3
test_from_100801_to_0 | xxxxx | 2 | 100805 | 0
test_from_100801_to_1 | xxxxx | 15 | 100806 | 1
test_from_100802_to_1 | xxxxx | 10 | 100806 | 1
test_from_100802_to_2 | xxxxx | 5 | 100807 | 2
test_from_100803_to_2 | xxxxx | 18 | 100807 | 2
test_from_100803_to_3 | xxxxx | 4 | 100808 | 3
test_from_100804_to_3 | 9060 | 24 | 100808 | 3
(8 rows)
-- fetch from worker 2 should fail
SAVEPOINT s1;
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], 'localhost', :worker_2_port) > 0 AS fetched;
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], '<host>', :worker_2_port) > 0 AS fetched;
ERROR: could not open file "base/pgsql_job_cache/xx_x_xxx/test_from_100802_to_1.data": No such file or directory
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
ROLLBACK TO SAVEPOINT s1;
-- fetch from worker 1 should succeed
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], 'localhost', :worker_1_port) > 0 AS fetched;
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], '<host>', :worker_1_port) > 0 AS fetched;
fetched
---------------------------------------------------------------------
t

View File

@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown"
(1 row)
INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -98,7 +98,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown"
(1 row)
INSERT INTO events_table SELECT * FROM events_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -54,7 +54,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result").kill
(1 row)
INSERT INTO target_table SELECT * FROM source_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -70,11 +70,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result").kill
(1 row)
INSERT INTO target_table SELECT * FROM replicated_source_table;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -106,7 +106,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="fetch_intermediate_results").kill()'
(1 row)
INSERT INTO target_table SELECT * FROM source_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -122,7 +122,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="fetch_intermediate_results").kill()'
(1 row)
INSERT INTO target_table SELECT * FROM replicated_source_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -143,7 +143,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()')
(1 row)
INSERT INTO target_table SELECT * FROM source_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -159,7 +159,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()')
(1 row)
INSERT INTO target_table SELECT * FROM replicated_source_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -179,7 +179,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()')
(1 row)
INSERT INTO replicated_target_table SELECT * FROM source_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -56,7 +56,7 @@ INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_tab
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- kill data push
SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()');
mitmproxy
@ -68,7 +68,7 @@ INSERT INTO events_summary SELECT event_id, event_type, count(*) FROM events_tab
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- cancel coordinator pull query
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')');
mitmproxy
@ -112,7 +112,7 @@ INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- kill data push
SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()');
mitmproxy
@ -124,7 +124,7 @@ INSERT INTO events_reference SELECT event_type, count(*) FROM events_table GROUP
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- cancel coordinator pull query
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')');
mitmproxy
@ -170,7 +170,7 @@ INSERT INTO events_reference_distributed SELECT * FROM events_reference;
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- kill data push
SELECT citus.mitmproxy('conn.onQuery(query="^COPY coordinator_insert_select").kill()');
mitmproxy
@ -182,7 +182,7 @@ INSERT INTO events_reference_distributed SELECT * FROM events_reference;
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- cancel coordinator pull query
SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')');
mitmproxy

View File

@ -33,7 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE").kill()');
BEGIN;
DELETE FROM dml_test WHERE id = 1;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -96,7 +96,7 @@ BEGIN;
DELETE FROM dml_test WHERE id = 1;
DELETE FROM dml_test WHERE id = 2;
INSERT INTO dml_test VALUES (5, 'Epsilon');
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -154,7 +154,7 @@ DELETE FROM dml_test WHERE id = 1;
DELETE FROM dml_test WHERE id = 2;
INSERT INTO dml_test VALUES (5, 'Epsilon');
UPDATE dml_test SET name = 'alpha' WHERE id = 1;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -208,7 +208,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()');
-- error message that is caused during commit.
-- we'll test for the txn side-effects to ensure it didn't run
SELECT master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY['<host>']::text[],
ARRAY[:master_port]::int[],
ARRAY['
BEGIN;
@ -223,7 +223,7 @@ COMMIT;
);
master_run_on_worker
---------------------------------------------------------------------
(localhost,57636,t,BEGIN)
(<host>,xxxxx,t,BEGIN)
(1 row)
SELECT citus.mitmproxy('conn.allow()');
@ -392,10 +392,10 @@ UPDATE dml_test SET name = 'alpha' WHERE id = 1;
UPDATE dml_test SET name = 'gamma' WHERE id = 3;
COMMIT;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: failed to commit transaction on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
--- should see all changes, but they only went to one placement (other is unhealthy)
SELECT * FROM dml_test ORDER BY id ASC;
id | name
@ -444,7 +444,7 @@ UPDATE dml_test SET name = 'alpha' WHERE id = 1;
UPDATE dml_test SET name = 'gamma' WHERE id = 3;
COMMIT;
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
--- shouldn't see any changes after failed COMMIT
SELECT * FROM dml_test ORDER BY id ASC;
id | name

View File

@ -43,7 +43,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
(1 row)
INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3);
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -58,7 +58,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
(1 row)
INSERT INTO distributed_table VALUES (1,7), (5,8);
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -73,7 +73,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
(1 row)
INSERT INTO distributed_table VALUES (1,11), (6,12);
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -93,7 +93,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()');
(1 row)
INSERT INTO distributed_table VALUES (1,15), (6,16);
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -113,7 +113,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
(1 row)
INSERT INTO distributed_table VALUES (2,19),(1,20);
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -63,7 +63,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()');
-- issue a multi shard delete
DELETE FROM t2 WHERE b = 2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -82,7 +82,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").
(1 row)
DELETE FROM t2 WHERE b = 2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -145,7 +145,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
-- issue a multi shard update
UPDATE t2 SET c = 4 WHERE b = 2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -164,7 +164,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill(
(1 row)
UPDATE t2 SET c = 4 WHERE b = 2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -221,7 +221,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()');
-- issue a multi shard delete
DELETE FROM t2 WHERE b = 2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -240,7 +240,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005").
(1 row)
DELETE FROM t2 WHERE b = 2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -303,7 +303,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
-- issue a multi shard update
UPDATE t2 SET c = 4 WHERE b = 2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -322,7 +322,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill(
(1 row)
UPDATE t2 SET c = 4 WHERE b = 2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -396,7 +396,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()');
(1 row)
DELETE FROM r1 WHERE a = 2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -414,7 +414,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()');
(1 row)
DELETE FROM t2 WHERE b = 2;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -465,7 +465,7 @@ RETURNING *;
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
--- verify nothing is updated
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
@ -496,7 +496,7 @@ UPDATE t3 SET c = q.c FROM (
SELECT b, max(c) as c FROM t2 GROUP BY b) q
WHERE t3.b = q.b
RETURNING *;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -552,7 +552,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t3_201013").kill(
(1 row)
UPDATE t3 SET b = 2 WHERE b = 1;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -587,7 +587,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO
-- following will fail
UPDATE t3 SET b = 2 WHERE b = 1;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -606,7 +606,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO
(1 row)
UPDATE t3 SET b = 1 WHERE b = 2 RETURNING *;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -626,7 +626,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO
(1 row)
UPDATE t3 SET b = 2 WHERE b = 1;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -661,7 +661,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO
-- following will fail
UPDATE t3 SET b = 2 WHERE b = 1;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -36,7 +36,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
SELECT start_metadata_sync_to_node('<host>', :worker_2_proxy_port);
ERROR: canceling statement due to user request
SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()');
mitmproxy
@ -44,11 +44,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
SELECT start_metadata_sync_to_node('<host>', :worker_2_proxy_port);
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- Failure to drop all tables in pg_dist_partition
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')');
mitmproxy
@ -56,7 +56,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_tabl
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
SELECT start_metadata_sync_to_node('<host>', :worker_2_proxy_port);
ERROR: canceling statement due to user request
SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").kill()');
mitmproxy
@ -64,11 +64,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_tabl
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
SELECT start_metadata_sync_to_node('<host>', :worker_2_proxy_port);
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- Failure to truncate pg_dist_node in the worker
SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')');
mitmproxy
@ -76,7 +76,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").can
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
SELECT start_metadata_sync_to_node('<host>', :worker_2_proxy_port);
ERROR: canceling statement due to user request
SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kill()');
mitmproxy
@ -84,11 +84,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kil
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
SELECT start_metadata_sync_to_node('<host>', :worker_2_proxy_port);
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- Failure to populate pg_dist_node in the worker
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel(' || :pid || ')');
mitmproxy
@ -96,7 +96,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").cancel('
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
SELECT start_metadata_sync_to_node('<host>', :worker_2_proxy_port);
ERROR: canceling statement due to user request
SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").kill()');
mitmproxy
@ -104,11 +104,11 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO pg_dist_node").kill()')
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
SELECT start_metadata_sync_to_node('<host>', :worker_2_proxy_port);
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- Verify that coordinator knows worker does not have valid metadata
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port;
hasmetadata
@ -123,7 +123,7 @@ SELECT citus.mitmproxy('conn.allow()');
(1 row)
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
SELECT start_metadata_sync_to_node('<host>', :worker_2_proxy_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
@ -147,7 +147,7 @@ SELECT create_distributed_table('t2', 'id');
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.onParse(query="^INSERT INTO pg_dist_shard").cancel(' || :pid || ')');
mitmproxy
---------------------------------------------------------------------

View File

@ -33,7 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
(1 row)
INSERT INTO ref_table VALUES (5, 6);
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -51,7 +51,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
(1 row)
UPDATE ref_table SET key=7 RETURNING value;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -71,7 +71,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
BEGIN;
DELETE FROM ref_table WHERE key=5;
UPDATE ref_table SET key=value;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -29,7 +29,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
(1 row)
INSERT INTO partitioned_table VALUES (0, 0);
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -39,15 +39,15 @@ BEGIN;
INSERT INTO artists VALUES (5, 'Asher Lev');
SAVEPOINT s1;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: connection error: localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: connection not open
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
DELETE FROM artists WHERE id=4;
ERROR: current transaction is aborted, commands ignored until end of transaction block
RELEASE SAVEPOINT s1;
@ -73,17 +73,17 @@ DELETE FROM artists WHERE id=4;
RELEASE SAVEPOINT s1;
WARNING: AbortSubTransaction while in COMMIT state
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: connection error: localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: connection not open
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: savepoint "savepoint_2" does not exist
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
ROLLBACK;
SELECT * FROM artists WHERE id IN (4, 5);
id | name
@ -104,9 +104,9 @@ SAVEPOINT s1;
DELETE FROM artists WHERE id=4;
ROLLBACK TO SAVEPOINT s1;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
COMMIT;
ERROR: could not make changes to shard xxxxx on any node
SELECT * FROM artists WHERE id IN (4, 5);
@ -131,15 +131,15 @@ INSERT INTO artists VALUES (5, 'Jacob Kahn');
RELEASE SAVEPOINT s2;
WARNING: AbortSubTransaction while in COMMIT state
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: connection error: localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: connection not open
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
COMMIT;
SELECT * FROM artists WHERE id IN (4, 5);
id | name
@ -162,9 +162,9 @@ SAVEPOINT s2;
DELETE FROM artists WHERE id=5;
ROLLBACK TO SAVEPOINT s2;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
COMMIT;
ERROR: could not make changes to shard xxxxx on any node
SELECT * FROM artists WHERE id IN (4, 5);
@ -213,7 +213,7 @@ ROLLBACK TO SAVEPOINT s1;
WARNING: connection not open
WARNING: connection not open
WARNING: connection not open
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
WARNING: connection not open
WARNING: connection not open
COMMIT;
@ -248,7 +248,7 @@ BEGIN;
INSERT INTO researchers VALUES (7, 4, 'Jan Plaza');
SAVEPOINT s1;
WARNING: connection not open
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
WARNING: connection not open
WARNING: connection not open
ERROR: connection not open
@ -290,7 +290,7 @@ WARNING: connection not open
WARNING: connection not open
RELEASE SAVEPOINT s1;
COMMIT;
ERROR: failure on connection marked as essential: localhost:xxxxx
ERROR: failure on connection marked as essential: <host>:xxxxx
-- should see correct results from healthy placement and one bad placement
SELECT * FROM researchers WHERE lab_id = 4;
id | lab_id | name
@ -321,7 +321,7 @@ ROLLBACK TO s1;
RELEASE SAVEPOINT s1;
WARNING: AbortSubTransaction while in COMMIT state
WARNING: connection not open
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
WARNING: connection not open
WARNING: connection not open
WARNING: savepoint "savepoint_3" does not exist

View File

@ -5,13 +5,13 @@ SELECT citus.mitmproxy('conn.allow()');
(1 row)
-- add the workers
SELECT master_add_node('localhost', :worker_1_port);
SELECT master_add_node('<host>', :worker_1_port);
master_add_node
---------------------------------------------------------------------
1
(1 row)
SELECT master_add_node('localhost', :worker_2_proxy_port); -- an mitmproxy which forwards to the second worker
SELECT master_add_node('<host>', :worker_2_proxy_port); -- an mitmproxy which forwards to the second worker
master_add_node
---------------------------------------------------------------------
2

View File

@ -27,7 +27,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()');
(1 row)
INSERT INTO mod_test VALUES (2, 6);
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -63,7 +63,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()');
(1 row)
UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -102,7 +102,7 @@ INSERT INTO mod_test VALUES (2, 6);
INSERT INTO mod_test VALUES (2, 7);
DELETE FROM mod_test WHERE key=2 AND value = '7';
UPDATE mod_test SET value='ok' WHERE key=2;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -28,7 +28,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
(1 row)
SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -38,7 +38,7 @@ DETAIL: server closed the connection unexpectedly
(1 row)
SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -57,7 +57,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT").kill()');
BEGIN;
INSERT INTO select_test VALUES (3, 'more data');
SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -69,7 +69,7 @@ DETAIL: server closed the connection unexpectedly
INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -160,7 +160,7 @@ SELECT * FROM select_test WHERE key = 3;
INSERT INTO select_test VALUES (3, 'even more data');
SELECT * FROM select_test WHERE key = 3;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -187,7 +187,7 @@ SELECT recover_prepared_transactions();
ERROR: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- bug from https://github.com/citusdata/citus/issues/1926
SET citus.max_cached_conns_per_worker TO 0; -- purge cache
DROP TABLE select_test;
@ -215,7 +215,7 @@ SELECT * FROM select_test WHERE key = 1;
(1 row)
SELECT * FROM select_test WHERE key = 1;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()');
(1 row)
TRUNCATE test_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -102,7 +102,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
TRUNCATE test_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -159,7 +159,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test
(1 row)
TRUNCATE test_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -284,15 +284,15 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^COMMIT").kill()');
TRUNCATE test_table;
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: failed to commit transaction on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: failed to commit transaction on <host>:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
WARNING: could not commit transaction for shard xxxxx on any active node
WARNING: could not commit transaction for shard xxxxx on any active node
SELECT citus.mitmproxy('conn.allow()');
@ -365,7 +365,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="PREPARE TRANSACTION").kill()');
TRUNCATE reference_table CASCADE;
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -433,7 +433,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).kill()');
(1 row)
TRUNCATE reference_table CASCADE;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -506,7 +506,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki
TRUNCATE reference_table CASCADE;
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -577,7 +577,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()');
(1 row)
TRUNCATE test_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -635,7 +635,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
TRUNCATE test_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -692,7 +692,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.tes
(1 row)
TRUNCATE test_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -750,7 +750,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").k
TRUNCATE test_table;
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
@ -956,7 +956,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()');
(1 row)
TRUNCATE test_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -1014,7 +1014,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R
(1 row)
TRUNCATE test_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -1071,7 +1071,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test
(1 row)
TRUNCATE test_table;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -1129,7 +1129,7 @@ SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").ki
TRUNCATE test_table;
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------

View File

@ -31,7 +31,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()');
(1 row)
VACUUM vacuum_test;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -42,7 +42,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()');
(1 row)
ANALYZE vacuum_test;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -111,7 +111,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()');
(1 row)
VACUUM vacuum_test, other_vacuum_test;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -31,7 +31,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()');
(1 row)
VACUUM vacuum_test;
ERROR: connection error: localhost:xxxxx
ERROR: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
@ -42,7 +42,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()');
(1 row)
ANALYZE vacuum_test;
WARNING: connection error: localhost:xxxxx
WARNING: connection error: <host>:xxxxx
DETAIL: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.

View File

@ -473,7 +473,7 @@ DETAIL: Reference relation "transitive_reference_table" is modified, which migh
UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 1;
ERROR: insert or update on table "on_update_fkey_table_xxxxxxx" violates foreign key constraint "fkey_xxxxxxx"
DETAIL: Key (value_1)=(101) is not present in table "reference_table_2380001".
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 2;
ERROR: current transaction is aborted, commands ignored until end of transaction block
UPDATE on_update_fkey_table SET value_1 = 101 WHERE id = 3;

View File

@ -454,7 +454,7 @@ ALTER TABLE referencing_table ADD CONSTRAINT fkey_ref FOREIGN KEY (ref_id) REFER
INSERT INTO referencing_table VALUES(1, 1);
ERROR: insert or update on table "referencing_table_xxxxxxx" violates foreign key constraint "fkey_ref_xxxxxxx"
DETAIL: Key (ref_id)=(X) is not present in table "referenced_table_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- test insert to referencing while there is corresponding value in referenced table
INSERT INTO referenced_table SELECT x, x from generate_series(1,1000) as f(x);
INSERT INTO referencing_table SELECT x, x from generate_series(1,500) as f(x);
@ -463,7 +463,7 @@ INSERT INTO referencing_table SELECT x, x from generate_series(1,500) as f(x);
DELETE FROM referenced_table WHERE id > 3;
ERROR: update or delete on table "referenced_table_xxxxxxx" violates foreign key constraint "fkey_ref_xxxxxxx" on table "referencing_table_xxxxxxx"
DETAIL: Key (id)=(X) is still referenced from table "referencing_table_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- test delete from referenced table while there is NO corresponding value in referencing table
DELETE FROM referenced_table WHERE id = 501;
-- test cascading truncate
@ -1570,7 +1570,7 @@ INSERT INTO test_table_2 VALUES (4,2147483648);
-- should fail since there is a bigint out of integer range > (2^32 - 1)
ALTER TABLE test_table_2 ALTER COLUMN value_1 SET DATA TYPE int;
ERROR: integer out of range
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
SELECT count(*) FROM table_fkeys_in_workers WHERE relid LIKE 'fkey_reference_table.%' AND refd_relid LIKE 'fkey_reference_table.%';
count
---------------------------------------------------------------------
@ -1816,7 +1816,7 @@ ALTER TABLE referencing_table_4 ADD CONSTRAINT fkey_to_ref FOREIGN KEY (value_1)
INSERT INTO referencing_table VALUES (0, 5);
ERROR: insert or update on table "referencing_table_4_7000540" violates foreign key constraint "fkey_xxxxxxx"
DETAIL: Key (id)=(X) is not present in table "referencing_table_0_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
-- should succeed on partitioning_test_0
INSERT INTO referencing_table VALUES (0, 1);
SELECT * FROM referencing_table;
@ -1829,7 +1829,7 @@ SELECT * FROM referencing_table;
INSERT INTO referencing_table VALUES (0, 5);
ERROR: insert or update on table "referencing_table_4_7000540" violates foreign key constraint "fkey_to_ref_7000540"
DETAIL: Key (value_1)=(5) is not present in table "referenced_table_xxxxxxx".
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
INSERT INTO referenced_table VALUES(5,5);
-- should succeed since both of the foreign constraints are positive
INSERT INTO referencing_table VALUES (0, 5);

View File

@ -62,14 +62,14 @@ SELECT create_distributed_table('another_dist_schema.dist_table', 'id');
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema';
nspname | nspacl
---------------------------------------------------------------------
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres}
dist_schema | {<user>=UC/<user>,role_1=U*C*/<user>,role_2=U/<user>,role_3=U*C/role_1,=UC/role_1,=U/<user>}
(1 row)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema';
nspname | nspacl
---------------------------------------------------------------------
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres}
dist_schema | {<user>=UC/<user>,role_1=U*C*/<user>,role_2=U/<user>,role_3=U*C/role_1,=UC/role_1,=U/<user>}
(1 row)
\c - - - :master_port
@ -78,17 +78,17 @@ GRANT ALL ON SCHEMA dist_schema, another_dist_schema, non_dist_schema TO role_1,
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres}
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres,role_3=U*C*/postgres}
non_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres}
another_dist_schema | {<user>=UC/<user>,role_1=U*C*/<user>,role_2=U*C*/<user>,role_3=U*C*/<user>}
dist_schema | {<user>=UC/<user>,role_1=U*C*/<user>,role_2=U*C*/<user>,role_3=U*C/role_1,=UC/role_1,=U/<user>,role_3=U*C*/<user>}
non_dist_schema | {<user>=UC/<user>,role_1=U*C*/<user>,role_2=U*C*/<user>,role_3=U*C*/<user>}
(3 rows)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C*/postgres}
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/postgres,role_3=U*C/role_1,=UC/role_1,=U/postgres,role_3=U*C*/postgres}
another_dist_schema | {<user>=UC/<user>,role_1=U*C*/<user>,role_2=U*C*/<user>,role_3=U*C*/<user>}
dist_schema | {<user>=UC/<user>,role_1=U*C*/<user>,role_2=U*C*/<user>,role_3=U*C/role_1,=UC/role_1,=U/<user>,role_3=U*C*/<user>}
(2 rows)
\c - - - :master_port
@ -97,17 +97,17 @@ REVOKE ALL ON SCHEMA dist_schema, another_dist_schema, non_dist_schema FROM role
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres}
dist_schema | {postgres=UC/postgres}
non_dist_schema | {postgres=UC/postgres}
another_dist_schema | {<user>=UC/<user>}
dist_schema | {<user>=UC/<user>}
non_dist_schema | {<user>=UC/<user>}
(3 rows)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres}
dist_schema | {postgres=UC/postgres}
another_dist_schema | {<user>=UC/<user>}
dist_schema | {<user>=UC/<user>}
(2 rows)
\c - - - :master_port
@ -116,17 +116,17 @@ GRANT USAGE, CREATE ON SCHEMA dist_schema, another_dist_schema, non_dist_schema
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
non_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
another_dist_schema | {<user>=UC/<user>,role_1=UC/<user>,role_2=UC/<user>,role_3=UC/<user>}
dist_schema | {<user>=UC/<user>,role_1=UC/<user>,role_2=UC/<user>,role_3=UC/<user>}
non_dist_schema | {<user>=UC/<user>,role_1=UC/<user>,role_2=UC/<user>,role_3=UC/<user>}
(3 rows)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
dist_schema | {postgres=UC/postgres,role_1=UC/postgres,role_2=UC/postgres,role_3=UC/postgres}
another_dist_schema | {<user>=UC/<user>,role_1=UC/<user>,role_2=UC/<user>,role_3=UC/<user>}
dist_schema | {<user>=UC/<user>,role_1=UC/<user>,role_2=UC/<user>,role_3=UC/<user>}
(2 rows)
\c - - - :master_port
@ -135,17 +135,17 @@ REVOKE USAGE, CREATE ON SCHEMA dist_schema, another_dist_schema, non_dist_schema
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
non_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
another_dist_schema | {<user>=UC/<user>,role_3=UC/<user>}
dist_schema | {<user>=UC/<user>,role_3=UC/<user>}
non_dist_schema | {<user>=UC/<user>,role_3=UC/<user>}
(3 rows)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
another_dist_schema | {<user>=UC/<user>,role_3=UC/<user>}
dist_schema | {<user>=UC/<user>,role_3=UC/<user>}
(2 rows)
\c - - - :master_port
@ -155,8 +155,8 @@ GRANT USAGE ON SCHEMA dist_schema TO role_1, role_3 WITH GRANT OPTION;
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
dist_schema | {postgres=UC/postgres,role_3=U*C/postgres,role_1=U*/postgres}
another_dist_schema | {<user>=UC/<user>,role_3=UC/<user>}
dist_schema | {<user>=UC/<user>,role_3=U*C/<user>,role_1=U*/<user>}
(2 rows)
\c - - - :master_port
@ -166,8 +166,8 @@ REVOKE GRANT OPTION FOR USAGE ON SCHEMA dist_schema FROM role_3;
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
dist_schema | {postgres=UC/postgres,role_3=UC/postgres,role_1=U*/postgres}
another_dist_schema | {<user>=UC/<user>,role_3=UC/<user>}
dist_schema | {<user>=UC/<user>,role_3=UC/<user>,role_1=U*/<user>}
(2 rows)
\c - - - :master_port
@ -181,8 +181,8 @@ GRANT CREATE ON SCHEMA dist_schema TO CURRENT_USER;
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
another_dist_schema | {postgres=UC/postgres,role_3=UC/postgres}
dist_schema | {postgres=UC/postgres,role_3=UC/postgres,role_1=U*C/postgres}
another_dist_schema | {<user>=UC/<user>,role_3=UC/<user>}
dist_schema | {<user>=UC/<user>,role_3=UC/<user>,role_1=U*C/<user>}
(2 rows)
\c - - - :master_port
@ -212,7 +212,7 @@ SELECT run_command_on_coordinator_and_workers('DROP SCHEMA non_dist_schema');
-- test if the grantors are propagated correctly
-- first remove one of the worker nodes
SET citus.shard_replication_factor TO 1;
SELECT master_remove_node('localhost', :worker_2_port);
SELECT master_remove_node('<host>', :worker_2_port);
master_remove_node
---------------------------------------------------------------------
@ -243,19 +243,19 @@ SELECT create_distributed_table('grantor_schema.grantor_table', 'id');
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
grantor_schema | {<user>=UC/<user>,role_1=U*C*/<user>,=C/<user>,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
(1 row)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
grantor_schema | {<user>=UC/<user>,role_1=U*C*/<user>,=C/<user>,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
(1 row)
\c - - - :master_port
-- add the previously removed node
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
SELECT 1 FROM master_add_node('<host>', :worker_2_port);
?column?
---------------------------------------------------------------------
1
@ -265,14 +265,14 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
grantor_schema | {<user>=UC/<user>,role_1=U*C*/<user>,=C/<user>,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
(1 row)
\c - - - :worker_2_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
grantor_schema | {<user>=UC/<user>,role_1=U*C*/<user>,=C/<user>,role_2=U*C/role_1,role_3=UC/role_1,=UC/role_1,role_3=U/role_2}
(1 row)
\c - - - :master_port
@ -282,14 +282,14 @@ REVOKE USAGE ON SCHEMA grantor_schema FROM role_1 CASCADE;
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
grantor_schema | {postgres=UC/postgres,role_1=C*/postgres,=C/postgres,role_2=C/role_1,role_3=C/role_1,=C/role_1}
grantor_schema | {<user>=UC/<user>,role_1=C*/<user>,=C/<user>,role_2=C/role_1,role_3=C/role_1,=C/role_1}
(1 row)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
grantor_schema | {postgres=UC/postgres,role_1=C*/postgres,=C/postgres,role_2=C/role_1,role_3=C/role_1,=C/role_1}
grantor_schema | {<user>=UC/<user>,role_1=C*/<user>,=C/<user>,role_2=C/role_1,role_3=C/role_1,=C/role_1}
(1 row)
\c - - - :master_port
@ -305,14 +305,14 @@ RESET ROLE;
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3}
grantor_schema | {<user>=UC/<user>,role_1=U*C*/<user>,=C/<user>,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3}
(1 row)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
grantor_schema | {postgres=UC/postgres,role_1=U*C*/postgres,=C/postgres,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3}
grantor_schema | {<user>=UC/<user>,role_1=U*C*/<user>,=C/<user>,role_2=UC/role_1,role_3=U*C/role_1,=C/role_1,role_2=U/role_3}
(1 row)
\c - - - :master_port
@ -338,14 +338,14 @@ SELECT create_distributed_table('dist_schema.dist_table', 'id');
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/role_1}
dist_schema | {<user>=UC/<user>,role_1=U*C*/<user>,role_2=U*C*/role_1}
(1 row)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
dist_schema | {postgres=UC/postgres,role_1=U*C*/postgres,role_2=U*C*/role_1}
dist_schema | {<user>=UC/<user>,role_1=U*C*/<user>,role_2=U*C*/role_1}
(1 row)
\c - - - :master_port
@ -359,7 +359,7 @@ SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema CASCADE')
-- test grants on public schema
-- first remove one of the worker nodes
SET citus.shard_replication_factor TO 1;
SELECT master_remove_node('localhost', :worker_2_port);
SELECT master_remove_node('<host>', :worker_2_port);
master_remove_node
---------------------------------------------------------------------
@ -382,19 +382,19 @@ RESET ROLE;
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
public | {<user>=UC/<user>,=UC/<user>,role_1=U*C*/<user>,=U/role_1}
(1 row)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
public | {<user>=UC/<user>,=UC/<user>,role_1=U*C*/<user>,=U/role_1}
(1 row)
\c - - - :master_port
-- add the previously removed node
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
SELECT 1 FROM master_add_node('<host>', :worker_2_port);
?column?
---------------------------------------------------------------------
1
@ -404,14 +404,14 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
public | {<user>=UC/<user>,=UC/<user>,role_1=U*C*/<user>,=U/role_1}
(1 row)
\c - - - :worker_2_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
public | {postgres=UC/postgres,=UC/postgres,role_1=U*C*/postgres,=U/role_1}
public | {<user>=UC/<user>,=UC/<user>,role_1=U*C*/<user>,=U/role_1}
(1 row)
\c - - - :master_port
@ -421,14 +421,14 @@ REVOKE CREATE, USAGE ON SCHEMA PUBLIC FROM role_1 CASCADE;
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
public | {postgres=UC/postgres,=UC/postgres}
public | {<user>=UC/<user>,=UC/<user>}
(1 row)
\c - - - :worker_1_port
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
nspname | nspacl
---------------------------------------------------------------------
public | {postgres=UC/postgres,=UC/postgres}
public | {<user>=UC/<user>,=UC/<user>}
(1 row)
\c - - - :master_port

View File

@ -33,7 +33,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT * FROM source_table;
Task Count: 64
Tasks Shown: One of 64
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Seq Scan on source_table_4213581 source_table
(8 rows)

View File

@ -567,7 +567,7 @@ EXPLAIN INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a;
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate (cost=43.90..45.90 rows=200 width=8)
Group Key: a
-> Seq Scan on source_table_4213606 source_table (cost=0.00..32.60 rows=2260 width=8)
@ -747,7 +747,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT * FROM source_table;
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Seq Scan on source_table_4213613 source_table
(8 rows)
@ -760,7 +760,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT * FROM source_table WHERE b
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Seq Scan on source_table_4213613 source_table
Filter: (b IS NOT NULL)
(9 rows)
@ -791,14 +791,14 @@ SELECT * FROM target_table ORDER BY b;
SELECT * FROM run_command_on_placements('target_table', 'select count(*) from %s') ORDER BY shardid, nodeport;
nodename | nodeport | shardid | success | result
---------------------------------------------------------------------
localhost | 57637 | 4213617 | t | 1
localhost | 57638 | 4213617 | t | 1
localhost | 57637 | 4213618 | t | 2
localhost | 57638 | 4213618 | t | 2
localhost | 57637 | 4213619 | t | 3
localhost | 57638 | 4213619 | t | 3
localhost | 57637 | 4213620 | t | 4
localhost | 57638 | 4213620 | t | 4
<host> | xxxxx | 4213617 | t | 1
<host> | xxxxx | 4213617 | t | 1
<host> | xxxxx | 4213618 | t | 2
<host> | xxxxx | 4213618 | t | 2
<host> | xxxxx | 4213619 | t | 3
<host> | xxxxx | 4213619 | t | 3
<host> | xxxxx | 4213620 | t | 4
<host> | xxxxx | 4213620 | t | 4
(8 rows)
--
@ -885,14 +885,14 @@ SELECT * FROM target_table ORDER BY a;
SELECT * FROM run_command_on_placements('target_table', 'select count(*) from %s') ORDER BY shardid, nodeport;
nodename | nodeport | shardid | success | result
---------------------------------------------------------------------
localhost | 57637 | 4213625 | t | 2
localhost | 57638 | 4213625 | t | 2
localhost | 57637 | 4213626 | t | 3
localhost | 57638 | 4213626 | t | 3
localhost | 57637 | 4213627 | t | 3
localhost | 57638 | 4213627 | t | 3
localhost | 57637 | 4213628 | t | 2
localhost | 57638 | 4213628 | t | 2
<host> | xxxxx | 4213625 | t | 2
<host> | xxxxx | 4213625 | t | 2
<host> | xxxxx | 4213626 | t | 3
<host> | xxxxx | 4213626 | t | 3
<host> | xxxxx | 4213627 | t | 3
<host> | xxxxx | 4213627 | t | 3
<host> | xxxxx | 4213628 | t | 2
<host> | xxxxx | 4213628 | t | 2
(8 rows)
DROP TABLE source_table, target_table;
@ -947,7 +947,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT a AS aa, b AS aa, 1 AS aa, 2
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Seq Scan on source_table_4213629 source_table
(8 rows)
@ -973,7 +973,7 @@ EXPLAIN (costs off) INSERT INTO target_table SELECT a AS aa, b AS aa, 1 AS aa, 2
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> Seq Scan on source_table_4213629 source_table
(8 rows)
@ -1149,7 +1149,7 @@ DO UPDATE SET
Task Count: 4
Tasks Shown: One of 4
-> Task
Node: host=localhost port=xxxxx dbname=regression
Node: host=<host> port=xxxxx dbname=<db>
-> HashAggregate
Group Key: c1, c2, c3, c4, '-1'::double precision, insert_select_repartition.dist_func(c1, 4)
-> Seq Scan on source_table_4213644 source_table

View File

@ -51,8 +51,8 @@ FROM
some_values_1 JOIN table_2 USING (key);
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
2
@ -60,7 +60,7 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
-- a very basic case, where the intermediate result
-- should only go to one worker because the final query is a router
-- we use random() to prevent postgres inline the CTE(s)
-- we use random() to prevent <user> inline the CTE(s)
WITH some_values_1 AS
(SELECT key, random() FROM table_1 WHERE value IN ('3', '4'))
SELECT
@ -69,7 +69,7 @@ FROM
some_values_1 JOIN table_2 USING (key) WHERE table_2.key = 1;
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
0
@ -86,8 +86,8 @@ FROM
some_values_1 JOIN ref_table USING (key);
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
2
@ -107,7 +107,7 @@ DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT key, random() AS random FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
0
@ -126,9 +126,9 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3)
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
1
@ -148,9 +148,9 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3)
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
1
@ -170,9 +170,9 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3)
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
0
@ -193,8 +193,8 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 1)
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
0
@ -212,10 +212,10 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.<>) 3)
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
1
@ -235,10 +235,10 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 3)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.<>) 3)
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
0
@ -255,8 +255,8 @@ FROM
(some_values_1 JOIN ref_table USING (key)) JOIN table_2 USING (key);
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.ref_table WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) JOIN intermediate_result_pruning.table_2 USING (key))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
2
@ -290,7 +290,7 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be written to local file
count
---------------------------------------------------------------------
@ -318,9 +318,9 @@ DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) top_cte JOIN intermediate_result_pruning.table_2 USING (key))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be written to local file
count
---------------------------------------------------------------------
@ -348,8 +348,8 @@ DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT DISTINCT key FROM (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) top_cte JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (table_2.key OPERATOR(pg_catalog.=) 2)
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be written to local file
count
---------------------------------------------------------------------
@ -369,12 +369,12 @@ DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS
DEBUG: generating subplan XXX_2 for CTE some_values_2: SELECT some_values_1.key, random() AS random FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.table_2 USING (key)) WHERE (some_values_1.key OPERATOR(pg_catalog.=) 1)
DEBUG: generating subplan XXX_3 for CTE some_values_3: SELECT some_values_2.key FROM (((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_2 JOIN intermediate_result_pruning.table_2 USING (key)) JOIN (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 USING (key))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT some_values_3.key, ref_table.key, ref_table.value FROM ((SELECT intermediate_result.key FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) some_values_3 JOIN intermediate_result_pruning.ref_table ON (true))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_3 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_3 will be sent to <host>:xxxxx
key | key | value
---------------------------------------------------------------------
(0 rows)
@ -482,13 +482,13 @@ DEBUG: generating subplan XXX_5 for subquery SELECT min(table_1.value) AS min F
DEBUG: generating subplan XXX_6 for subquery SELECT avg((level_6.min)::integer) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min text)) level_6, intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) (level_6.min)::integer) GROUP BY table_1.value
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar
DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_4 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_4 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_5 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_5 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_3 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_3 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_4 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_4 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_5 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_5 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_6 will be written to local file
count
---------------------------------------------------------------------
@ -541,10 +541,10 @@ DEBUG: generating subplan XXX_5 for subquery SELECT min(table_1.value) AS min F
DEBUG: generating subplan XXX_6 for subquery SELECT avg((level_6.min)::integer) AS avg FROM (SELECT intermediate_result.min FROM read_intermediate_result('XXX_5'::text, 'binary'::citus_copy_format) intermediate_result(min text)) level_6, intermediate_result_pruning.table_1 WHERE ((table_1.key OPERATOR(pg_catalog.=) (level_6.min)::integer) AND (table_1.key OPERATOR(pg_catalog.=) 4)) GROUP BY table_1.value
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.avg FROM read_intermediate_result('XXX_6'::text, 'binary'::citus_copy_format) intermediate_result(avg numeric)) bar
DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_4 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_5 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_3 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_4 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_5 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_6 will be written to local file
count
---------------------------------------------------------------------
@ -616,8 +616,8 @@ DEBUG: generating subplan XXX_2 for subquery SELECT key FROM intermediate_resul
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer) INTERSECT SELECT intermediate_result.key FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer)
DEBUG: generating subplan XXX_2 for CTE cte_2: SELECT count(*) AS count FROM (intermediate_result_pruning.table_1 JOIN (SELECT intermediate_result.key FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer)) cte_1 USING (key))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count FROM (SELECT intermediate_result.count FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) cte_2
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_2 will be written to local file
DEBUG: Subplan XXX_2 will be written to local file
@ -638,8 +638,8 @@ WHERE
foo.key != bar.key;
DEBUG: generating subplan XXX_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key)
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
14
@ -656,7 +656,7 @@ WHERE
foo.key != bar.key;
DEBUG: generating subplan XXX_1 for subquery SELECT key, random() AS random FROM intermediate_result_pruning.table_2
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT table_1.key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (table_1.key OPERATOR(pg_catalog.=) 1)) foo, (SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) bar WHERE (foo.key OPERATOR(pg_catalog.<>) bar.key)
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
4
@ -682,8 +682,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM intermed
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data
DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_2 will be written to local file
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
key | value
---------------------------------------------------------------------
3 | 3
@ -712,8 +712,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM intermed
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data
DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_2 will be written to local file
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
key | value
---------------------------------------------------------------------
3 | 3
@ -738,7 +738,7 @@ DEBUG: generating subplan XXX_1 for subquery SELECT min(key) AS min FROM interm
DEBUG: Plan XXX query after replacing subqueries and CTEs: DELETE FROM intermediate_result_pruning.table_2 WHERE (((value)::integer OPERATOR(pg_catalog.>=) (SELECT intermediate_result.min FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min integer))) AND (key OPERATOR(pg_catalog.=) 6)) RETURNING key, value
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM (SELECT intermediate_result.key, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, value text)) raw_data
DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
key | value
---------------------------------------------------------------------
6 | 6
@ -760,7 +760,7 @@ DEBUG: volatile functions are not allowed in distributed INSERT ... SELECT quer
DEBUG: generating subplan XXX_1 for subquery SELECT value FROM intermediate_result_pruning.table_1 WHERE (random() OPERATOR(pg_catalog.>) (1)::double precision)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT key, value FROM intermediate_result_pruning.table_2 WHERE ((value OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value text))) AND (key OPERATOR(pg_catalog.=) 1))
DEBUG: Collecting INSERT ... SELECT results on coordinator
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
-- a similar query, with more complex subquery
INSERT INTO table_1
SELECT * FROM table_2 where key = 1 AND
@ -793,7 +793,7 @@ DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_2 will be written to local file
DEBUG: Subplan XXX_2 will be written to local file
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to <host>:xxxxx
-- same query, cte is on the FROM clause
-- and this time the final query (and top-level intermediate result)
-- hits all the shards because table_2.key != 1
@ -830,8 +830,8 @@ DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_1 will be written to local file
DEBUG: Subplan XXX_2 will be written to local file
DEBUG: Subplan XXX_2 will be written to local file
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_3 will be sent to <host>:xxxxx
-- append partitioned/heap-type
SET citus.replication_model TO statement;
-- do not print out 'building index pg_toast_xxxxx_index' messages
@ -889,7 +889,7 @@ WHERE
data IN (SELECT data FROM range_partitioned);
DEBUG: generating subplan XXX_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) 'A'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer))))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
0
@ -905,8 +905,8 @@ WHERE
data IN (SELECT data FROM range_partitioned);
DEBUG: generating subplan XXX_1 for subquery SELECT data FROM intermediate_result_pruning.range_partitioned
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.>=) 'A'::text) AND (range_column OPERATOR(pg_catalog.<=) 'K'::text) AND (data OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer))))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
0
@ -925,8 +925,8 @@ WHERE
range_partitioned.data IN (SELECT data FROM some_data);
DEBUG: generating subplan XXX_1 for CTE some_data: SELECT data FROM intermediate_result_pruning.range_partitioned
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM intermediate_result_pruning.range_partitioned WHERE ((range_column OPERATOR(pg_catalog.=) ANY (ARRAY['A'::text, 'E'::text])) AND (data OPERATOR(pg_catalog.=) ANY (SELECT some_data.data FROM (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer)) some_data)))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
DEBUG: Subplan XXX_1 will be sent to <host>:xxxxx
count
---------------------------------------------------------------------
0

View File

@ -242,7 +242,7 @@ END;
-- pipe query output into a result file and create a table to check the result
COPY (SELECT s, s*s FROM generate_series(1,5) s)
TO PROGRAM
$$psql -h localhost -p 57636 -U postgres -d regression -c "BEGIN; COPY squares FROM STDIN WITH (format result); CREATE TABLE intermediate_results.squares AS SELECT * FROM read_intermediate_result('squares', 'text') AS res(x int, x2 int); END;"$$
$$psql -h <host> -p xxxxx -U <user> -d regression -c "BEGIN; COPY squares FROM STDIN WITH (format result); CREATE TABLE intermediate_results.squares AS SELECT * FROM read_intermediate_result('squares', 'text') AS res(x int, x2 int); END;"$$
WITH (FORMAT text);
SELECT * FROM squares ORDER BY x;
x | x2
@ -429,7 +429,7 @@ SELECT broadcast_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_s
5
(1 row)
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_2_port);
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], '<host>', :worker_2_port);
fetch_intermediate_results
---------------------------------------------------------------------
111
@ -445,7 +445,7 @@ SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2
5 | 25
(5 rows)
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_1_port);
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], '<host>', :worker_1_port);
fetch_intermediate_results
---------------------------------------------------------------------
111
@ -464,14 +464,14 @@ SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2
END;
-- multiple results, and some error cases
BEGIN;
SELECT store_intermediate_result_on_node('localhost', :worker_1_port,
SELECT store_intermediate_result_on_node('<host>', :worker_1_port,
'squares_1', 'SELECT s, s*s FROM generate_series(1, 2) s');
store_intermediate_result_on_node
---------------------------------------------------------------------
(1 row)
SELECT store_intermediate_result_on_node('localhost', :worker_1_port,
SELECT store_intermediate_result_on_node('<host>', :worker_1_port,
'squares_2', 'SELECT s, s*s FROM generate_series(3, 4) s');
store_intermediate_result_on_node
---------------------------------------------------------------------
@ -484,16 +484,16 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[],
ERROR: result "squares_1" does not exist
ROLLBACK TO SAVEPOINT s1;
-- fetch from worker 2 should fail
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_2_port);
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], '<host>', :worker_2_port);
ERROR: could not open file "base/pgsql_job_cache/xx_x_xxx/squares_1.data": No such file or directory
CONTEXT: while executing command on localhost:xxxxx
CONTEXT: while executing command on <host>:xxxxx
ROLLBACK TO SAVEPOINT s1;
-- still, results aren't available on coordinator yet
SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int);
ERROR: result "squares_1" does not exist
ROLLBACK TO SAVEPOINT s1;
-- fetch from worker 1 should succeed
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port);
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], '<host>', :worker_1_port);
fetch_intermediate_results
---------------------------------------------------------------------
114
@ -509,7 +509,7 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[],
(4 rows)
-- fetching again should succeed
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port);
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], '<host>', :worker_1_port);
fetch_intermediate_results
---------------------------------------------------------------------
114
@ -526,14 +526,14 @@ SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[],
ROLLBACK TO SAVEPOINT s1;
-- empty result id list should succeed
SELECT * FROM fetch_intermediate_results(ARRAY[]::text[], 'localhost', :worker_1_port);
SELECT * FROM fetch_intermediate_results(ARRAY[]::text[], '<host>', :worker_1_port);
fetch_intermediate_results
---------------------------------------------------------------------
0
(1 row)
-- null in result id list should error gracefully
SELECT * FROM fetch_intermediate_results(ARRAY[NULL, 'squares_1', 'squares_2']::text[], 'localhost', :worker_1_port);
SELECT * FROM fetch_intermediate_results(ARRAY[NULL, 'squares_1', 'squares_2']::text[], '<host>', :worker_1_port);
ERROR: worker array object cannot contain null values
END;
-- results should have been deleted after transaction commit

View File

@ -11,7 +11,7 @@ step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -33,8 +33,8 @@ step s2-print-content:
nodeport success result
57637 t 10
57638 t 10
xxxxx t 10
xxxxx t 10
master_remove_node
@ -54,7 +54,7 @@ step s2-copy-to-reference-table:
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -73,8 +73,8 @@ step s2-print-content:
nodeport success result
57637 t 10
57638 t 10
xxxxx t 10
xxxxx t 10
master_remove_node
@ -91,7 +91,7 @@ step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -113,8 +113,8 @@ step s2-print-content:
nodeport success result
57637 t 6
57638 t 6
xxxxx t 6
xxxxx t 6
master_remove_node
@ -134,7 +134,7 @@ step s2-insert-to-reference-table:
INSERT INTO test_reference_table VALUES (6);
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -153,8 +153,8 @@ step s2-print-content:
nodeport success result
57637 t 6
57638 t 6
xxxxx t 6
xxxxx t 6
master_remove_node
@ -171,7 +171,7 @@ step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -193,8 +193,8 @@ step s2-print-index-count:
nodeport success result
57637 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
master_remove_node
@ -214,7 +214,7 @@ step s2-ddl-on-reference-table:
CREATE INDEX reference_index ON test_reference_table(test_id);
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -233,8 +233,8 @@ step s2-print-index-count:
nodeport success result
57637 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
master_remove_node
@ -251,7 +251,7 @@ step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -276,8 +276,8 @@ step s2-print-content-2:
nodeport success result
57637 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
master_remove_node
@ -300,7 +300,7 @@ create_reference_table
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -319,8 +319,8 @@ step s2-print-content-2:
nodeport success result
57637 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
master_remove_node
@ -334,7 +334,7 @@ step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -356,8 +356,8 @@ step s2-print-content:
nodeport success result
57637 t 5
57638 t 5
xxxxx t 5
xxxxx t 5
master_remove_node
@ -374,7 +374,7 @@ step s2-copy-to-reference-table:
COPY test_reference_table FROM PROGRAM 'echo 1 && echo 2 && echo 3 && echo 4 && echo 5';
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -393,8 +393,8 @@ step s2-print-content:
nodeport success result
57637 t 5
57638 t 5
xxxxx t 5
xxxxx t 5
master_remove_node
@ -408,7 +408,7 @@ step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -430,8 +430,8 @@ step s2-print-content:
nodeport success result
57637 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
master_remove_node
@ -448,7 +448,7 @@ step s2-insert-to-reference-table:
INSERT INTO test_reference_table VALUES (6);
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -467,8 +467,8 @@ step s2-print-content:
nodeport success result
57637 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
master_remove_node
@ -482,7 +482,7 @@ step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -504,8 +504,8 @@ step s2-print-index-count:
nodeport success result
57637 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
master_remove_node
@ -522,7 +522,7 @@ step s2-ddl-on-reference-table:
CREATE INDEX reference_index ON test_reference_table(test_id);
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -541,8 +541,8 @@ step s2-print-index-count:
nodeport success result
57637 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
master_remove_node
@ -556,7 +556,7 @@ step s1-begin:
BEGIN;
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -581,8 +581,8 @@ step s2-print-content-2:
nodeport success result
57637 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
master_remove_node
@ -602,7 +602,7 @@ create_reference_table
step s1-add-second-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -621,8 +621,8 @@ step s2-print-content-2:
nodeport success result
57637 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
master_remove_node

View File

@ -8,13 +8,13 @@ step s1-begin:
BEGIN;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
1
step s2-remove-node-1:
SELECT * FROM master_remove_node('localhost', 57637);
SELECT * FROM master_remove_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -39,13 +39,13 @@ step s1-begin:
BEGIN;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
1
step s2-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -59,8 +59,8 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 t
localhost 57638 t
<host> xxxxx t
<host> xxxxx t
master_remove_node
@ -74,13 +74,13 @@ step s1-begin:
BEGIN;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
1
step s2-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -94,7 +94,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 t
<host> xxxxx t
master_remove_node
@ -107,13 +107,13 @@ step s1-begin:
BEGIN;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
1
step s2-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s1-abort:
ABORT;
@ -127,7 +127,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57638 t
<host> xxxxx t
master_remove_node
@ -140,13 +140,13 @@ step s1-begin:
BEGIN;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
1
step s2-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s1-abort:
ABORT;
@ -160,7 +160,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 t
<host> xxxxx t
master_remove_node
@ -170,13 +170,13 @@ starting permutation: s1-add-node-1 s1-add-node-2 s1-begin s1-remove-node-1 s2-r
1
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
1
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -185,13 +185,13 @@ step s1-begin:
BEGIN;
step s1-remove-node-1:
SELECT * FROM master_remove_node('localhost', 57637);
SELECT * FROM master_remove_node('<host>', xxxxx);
master_remove_node
step s2-remove-node-2:
SELECT * FROM master_remove_node('localhost', 57638);
SELECT * FROM master_remove_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -213,7 +213,7 @@ starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-remove-node-1 s
1
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -222,19 +222,19 @@ step s1-begin:
BEGIN;
step s1-remove-node-1:
SELECT * FROM master_remove_node('localhost', 57637);
SELECT * FROM master_remove_node('<host>', xxxxx);
master_remove_node
step s2-remove-node-1:
SELECT * FROM master_remove_node('localhost', 57637);
SELECT * FROM master_remove_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
step s2-remove-node-1: <... completed>
error in steps s1-commit s2-remove-node-1: ERROR: node at "localhost:xxxxx" does not exist
error in steps s1-commit s2-remove-node-1: ERROR: node at "<host>:xxxxx" does not exist
step s1-show-nodes:
SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport;
@ -248,7 +248,7 @@ starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-activate-node
1
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -257,13 +257,13 @@ step s1-begin:
BEGIN;
step s1-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637);
SELECT 1 FROM master_activate_node('<host>', xxxxx);
?column?
1
step s2-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637);
SELECT 1 FROM master_activate_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -277,7 +277,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 t
<host> xxxxx t
master_remove_node
@ -287,7 +287,7 @@ starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1
1
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -296,13 +296,13 @@ step s1-begin:
BEGIN;
step s1-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
?column?
1
step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -316,7 +316,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 f
<host> xxxxx f
master_remove_node
@ -326,7 +326,7 @@ starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-activate-
1
step s1-add-inactive-1:
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
SELECT 1 FROM master_add_inactive_node('<host>', xxxxx);
?column?
@ -335,13 +335,13 @@ step s1-begin:
BEGIN;
step s1-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637);
SELECT 1 FROM master_activate_node('<host>', xxxxx);
?column?
1
step s2-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637);
SELECT 1 FROM master_activate_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -355,7 +355,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 t
<host> xxxxx t
master_remove_node
@ -365,7 +365,7 @@ starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-disable-no
1
step s1-add-inactive-1:
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
SELECT 1 FROM master_add_inactive_node('<host>', xxxxx);
?column?
@ -374,13 +374,13 @@ step s1-begin:
BEGIN;
step s1-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
?column?
1
step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -394,7 +394,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 f
<host> xxxxx f
master_remove_node
@ -404,7 +404,7 @@ starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-activate-node-
1
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -413,13 +413,13 @@ step s1-begin:
BEGIN;
step s1-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
?column?
1
step s2-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637);
SELECT 1 FROM master_activate_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -433,7 +433,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 t
<host> xxxxx t
master_remove_node
@ -443,7 +443,7 @@ starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-disable-node-
1
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -452,13 +452,13 @@ step s1-begin:
BEGIN;
step s1-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637);
SELECT 1 FROM master_activate_node('<host>', xxxxx);
?column?
1
step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -472,7 +472,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 f
<host> xxxxx f
master_remove_node
@ -482,7 +482,7 @@ starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-activate-n
1
step s1-add-inactive-1:
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
SELECT 1 FROM master_add_inactive_node('<host>', xxxxx);
?column?
@ -491,13 +491,13 @@ step s1-begin:
BEGIN;
step s1-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
?column?
1
step s2-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637);
SELECT 1 FROM master_activate_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -511,7 +511,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 t
<host> xxxxx t
master_remove_node
@ -521,7 +521,7 @@ starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-n
1
step s1-add-inactive-1:
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
SELECT 1 FROM master_add_inactive_node('<host>', xxxxx);
?column?
@ -530,13 +530,13 @@ step s1-begin:
BEGIN;
step s1-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637);
SELECT 1 FROM master_activate_node('<host>', xxxxx);
?column?
1
step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -550,7 +550,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 f
<host> xxxxx f
master_remove_node
@ -560,7 +560,7 @@ starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-n
1
step s1-add-inactive-1:
SELECT 1 FROM master_add_inactive_node('localhost', 57637);
SELECT 1 FROM master_add_inactive_node('<host>', xxxxx);
?column?
@ -569,13 +569,13 @@ step s1-begin:
BEGIN;
step s1-activate-node-1:
SELECT 1 FROM master_activate_node('localhost', 57637);
SELECT 1 FROM master_activate_node('<host>', xxxxx);
?column?
1
step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
<waiting ...>
step s1-abort:
ABORT;
@ -589,7 +589,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 f
<host> xxxxx f
master_remove_node
@ -599,7 +599,7 @@ starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1
1
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -608,13 +608,13 @@ step s1-begin:
BEGIN;
step s1-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
?column?
1
step s2-disable-node-1:
SELECT 1 FROM master_disable_node('localhost', 57637);
SELECT 1 FROM master_disable_node('<host>', xxxxx);
<waiting ...>
step s1-abort:
ABORT;
@ -628,7 +628,7 @@ step s1-show-nodes:
nodename nodeport isactive
localhost 57637 f
<host> xxxxx f
master_remove_node

View File

@ -3,8 +3,8 @@ Parsed test spec with 2 sessions
starting permutation: s1-enable-propagation s2-enable-propagation s1-begin s1-alter-role-1 s2-add-node s1-commit
run_command_on_workers
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
step s1-enable-propagation:
SET citus.enable_alter_role_propagation to ON;
@ -18,7 +18,7 @@ step s1-alter-role-1:
ALTER ROLE alter_role_1 NOSUPERUSER;
step s2-add-node:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -29,14 +29,14 @@ step s2-add-node: <... completed>
1
run_command_on_workers
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
starting permutation: s1-enable-propagation s2-enable-propagation s1-begin s1-add-node s2-alter-role-1 s1-commit
run_command_on_workers
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
step s1-enable-propagation:
SET citus.enable_alter_role_propagation to ON;
@ -47,7 +47,7 @@ step s1-begin:
BEGIN;
step s1-add-node:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -61,14 +61,14 @@ step s1-commit:
step s2-alter-role-1: <... completed>
run_command_on_workers
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
starting permutation: s1-enable-propagation s2-enable-propagation s1-begin s1-alter-role-1 s2-alter-role-1 s1-commit
run_command_on_workers
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
step s1-enable-propagation:
SET citus.enable_alter_role_propagation to ON;
@ -91,14 +91,14 @@ step s2-alter-role-1: <... completed>
error in steps s1-commit s2-alter-role-1: ERROR: tuple concurrently updated
run_command_on_workers
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
starting permutation: s1-enable-propagation s2-enable-propagation s1-begin s1-alter-role-1 s2-alter-role-2 s1-commit
run_command_on_workers
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
(<host>,xxxxx,t,"CREATE ROLE")
step s1-enable-propagation:
SET citus.enable_alter_role_propagation to ON;
@ -119,5 +119,5 @@ step s1-commit:
run_command_on_workers
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")
(<host>,xxxxx,t,"DROP ROLE")

View File

@ -177,8 +177,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -198,8 +198,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -218,8 +218,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -238,8 +238,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -259,8 +259,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -279,8 +279,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
create_distributed_table
@ -518,8 +518,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -539,8 +539,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -560,8 +560,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -581,8 +581,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -601,8 +601,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
create_distributed_table

View File

@ -34,7 +34,7 @@ query query_hostname query_hostport master_query_host_namemaster_query_
ALTER TABLE test_table ADD COLUMN x INT;
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead <user> regression
step s3-view-worker:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
@ -42,16 +42,16 @@ query query_hostname query_hostport master_query_host_namemaster_query_
SELECT worker_apply_shard_ddl_command (1300004, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
')<host> xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
SELECT worker_apply_shard_ddl_command (1300003, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
')<host> xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
SELECT worker_apply_shard_ddl_command (1300002, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
')<host> xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
SELECT worker_apply_shard_ddl_command (1300001, 'public', '
ALTER TABLE test_table ADD COLUMN x INT;
')localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
')<host> xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
step s2-rollback:
ROLLBACK;
@ -96,13 +96,13 @@ query query_hostname query_hostport master_query_host_namemaster_query_
INSERT INTO test_table VALUES (100, 100);
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead <user> regression
step s3-view-worker:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)<host> xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
step s2-rollback:
ROLLBACK;
@ -150,16 +150,16 @@ query query_hostname query_hostport master_query_host_namemaster_query_
SELECT count(*) FROM test_table;
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead <user> regression
step s3-view-worker:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT count(*) AS count FROM test_table_1300014 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM test_table_1300014 test_table WHERE truelocalhost xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
SELECT count(*) AS count FROM test_table_1300013 test_table WHERE truelocalhost xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
SELECT count(*) AS count FROM test_table_1300012 test_table WHERE truelocalhost xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
SELECT count(*) AS count FROM test_table_1300011 test_table WHERE truelocalhost xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
step s2-rollback:
ROLLBACK;
@ -207,13 +207,13 @@ query query_hostname query_hostport master_query_host_namemaster_query_
SELECT count(*) FROM test_table WHERE column1 = 55;
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead <user> regression
step s3-view-worker:
SELECT query, query_hostname, query_hostport, master_query_host_name, master_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC;
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column1 OPERATOR(pg_catalog.=) 55)<host> xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
step s2-rollback:
ROLLBACK;

View File

@ -2,8 +2,8 @@ Parsed test spec with 1 sessions
starting permutation: s1a
step s1a:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?

View File

@ -8,19 +8,19 @@ step s2-load-cache:
COPY test_hash_table FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
step s1-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), '<host>', xxxxx, '<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -30,19 +30,19 @@ error in steps s2-commit s1-repair-placement: ERROR: target placement must be i
starting permutation: s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
step s1-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), '<host>', xxxxx, '<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;

View File

@ -18,13 +18,13 @@ count
1
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -51,8 +51,8 @@ step s2-print-content:
nodeport success result
57637 t 5
57638 t 5
xxxxx t 5
xxxxx t 5
starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content
step s1-load-cache:
@ -72,13 +72,13 @@ count
1
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -105,8 +105,8 @@ step s2-print-content:
nodeport success result
57637 t
57638 t
xxxxx t
xxxxx t
starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content
step s1-load-cache:
@ -123,13 +123,13 @@ count
0
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -156,8 +156,8 @@ step s2-print-content:
nodeport success result
57637 t 10
57638 t 10
xxxxx t 10
xxxxx t 10
starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content
step s1-load-cache:
@ -174,13 +174,13 @@ count
0
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -207,8 +207,8 @@ step s2-print-content:
nodeport success result
57637 t 5
57638 t 5
xxxxx t 5
xxxxx t 5
starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count
step s1-load-cache:
@ -225,13 +225,13 @@ count
0
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -256,10 +256,10 @@ step s2-print-index-count:
nodeport success result
57637 t 1
57637 t 1
57638 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
xxxxx t 1
xxxxx t 1
starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content
step s1-insert:
@ -276,13 +276,13 @@ count
1
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -309,8 +309,8 @@ step s2-print-content:
nodeport success result
57637 t 5
57638 t 5
xxxxx t 5
xxxxx t 5
starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content
step s1-insert:
@ -327,13 +327,13 @@ count
1
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -360,8 +360,8 @@ step s2-print-content:
nodeport success result
57637 t
57638 t
xxxxx t
xxxxx t
starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content
step s1-begin:
@ -375,13 +375,13 @@ count
0
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -408,8 +408,8 @@ step s2-print-content:
nodeport success result
57637 t 10
57638 t 10
xxxxx t 10
xxxxx t 10
starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content
step s1-begin:
@ -423,13 +423,13 @@ count
0
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -456,8 +456,8 @@ step s2-print-content:
nodeport success result
57637 t 5
57638 t 5
xxxxx t 5
xxxxx t 5
starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count
step s1-begin:
@ -471,13 +471,13 @@ count
0
step s2-set-placement-inactive:
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638;
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair-placement:
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -502,7 +502,7 @@ step s2-print-index-count:
nodeport success result
57637 t 1
57637 t 1
57638 t 1
57638 t 1
xxxxx t 1
xxxxx t 1
xxxxx t 1
xxxxx t 1

View File

@ -2,7 +2,7 @@ Parsed test spec with 3 sessions
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -73,7 +73,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -122,7 +122,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -140,7 +140,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node

View File

@ -166,7 +166,7 @@ step s1-begin:
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-add-node:
SELECT 1 FROM master_add_inactive_node('localhost', 9999);
SELECT 1 FROM master_add_inactive_node('<host>', 9999);
?column?
@ -191,7 +191,7 @@ step s1-begin:
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-remove-node:
SELECT master_remove_node('localhost', 9999);
SELECT master_remove_node('<host>', 9999);
master_remove_node

View File

@ -3,12 +3,12 @@ Parsed test spec with 2 sessions
starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select
node_name node_port
localhost 57637
<host> xxxxx
step s1-begin:
BEGIN;
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -38,10 +38,10 @@ step s1-show-placements:
nodename nodeport
localhost 57637
localhost 57637
localhost 57638
localhost 57638
<host> xxxxx
<host> xxxxx
<host> xxxxx
<host> xxxxx
step s2-select:
SELECT * FROM dist_table;
@ -55,12 +55,12 @@ master_remove_node
starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select
node_name node_port
localhost 57637
<host> xxxxx
step s1-begin:
BEGIN;
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -90,10 +90,10 @@ step s1-show-placements:
nodename nodeport
localhost 57637
localhost 57637
localhost 57637
localhost 57637
<host> xxxxx
<host> xxxxx
<host> xxxxx
<host> xxxxx
step s2-select:
SELECT * FROM dist_table;
@ -106,7 +106,7 @@ master_remove_node
starting permutation: s2-begin s2-create-table-1 s1-add-node-2 s2-commit s1-show-placements s2-select
node_name node_port
localhost 57637
<host> xxxxx
step s2-begin:
BEGIN;
@ -120,7 +120,7 @@ create_distributed_table
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -141,10 +141,10 @@ step s1-show-placements:
nodename nodeport
localhost 57637
localhost 57637
localhost 57637
localhost 57637
<host> xxxxx
<host> xxxxx
<host> xxxxx
<host> xxxxx
step s2-select:
SELECT * FROM dist_table;
@ -158,9 +158,9 @@ master_remove_node
starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select
node_name node_port
localhost 57637
<host> xxxxx
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -169,7 +169,7 @@ step s1-begin:
BEGIN;
step s1-remove-node-2:
SELECT * FROM master_remove_node('localhost', 57638);
SELECT * FROM master_remove_node('<host>', xxxxx);
master_remove_node
@ -199,10 +199,10 @@ step s1-show-placements:
nodename nodeport
localhost 57637
localhost 57637
localhost 57637
localhost 57637
<host> xxxxx
<host> xxxxx
<host> xxxxx
<host> xxxxx
step s2-select:
SELECT * FROM dist_table;
@ -215,9 +215,9 @@ master_remove_node
starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select
node_name node_port
localhost 57637
<host> xxxxx
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -226,7 +226,7 @@ step s1-begin:
BEGIN;
step s1-remove-node-2:
SELECT * FROM master_remove_node('localhost', 57638);
SELECT * FROM master_remove_node('<host>', xxxxx);
master_remove_node
@ -256,10 +256,10 @@ step s1-show-placements:
nodename nodeport
localhost 57637
localhost 57637
localhost 57638
localhost 57638
<host> xxxxx
<host> xxxxx
<host> xxxxx
<host> xxxxx
step s2-select:
SELECT * FROM dist_table;
@ -273,9 +273,9 @@ master_remove_node
starting permutation: s1-add-node-2 s2-begin s2-create-table-1 s1-remove-node-2 s2-commit s1-show-placements s2-select
node_name node_port
localhost 57637
<host> xxxxx
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -293,7 +293,7 @@ create_distributed_table
step s1-remove-node-2:
SELECT * FROM master_remove_node('localhost', 57638);
SELECT * FROM master_remove_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -312,10 +312,10 @@ step s1-show-placements:
nodename nodeport
localhost 57637
localhost 57637
localhost 57638
localhost 57638
<host> xxxxx
<host> xxxxx
<host> xxxxx
<host> xxxxx
step s2-select:
SELECT * FROM dist_table;
@ -329,9 +329,9 @@ master_remove_node
starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-2 s1-commit s2-select
node_name node_port
localhost 57637
<host> xxxxx
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -340,7 +340,7 @@ step s1-begin:
BEGIN;
step s1-remove-node-2:
SELECT * FROM master_remove_node('localhost', 57638);
SELECT * FROM master_remove_node('<host>', xxxxx);
master_remove_node
@ -367,9 +367,9 @@ master_remove_node
starting permutation: s1-add-node-2 s2-begin s2-create-table-2 s1-remove-node-2 s2-commit s2-select
node_name node_port
localhost 57637
<host> xxxxx
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -387,7 +387,7 @@ create_distributed_table
step s1-remove-node-2:
SELECT * FROM master_remove_node('localhost', 57638);
SELECT * FROM master_remove_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -407,9 +407,9 @@ master_remove_node
starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-append-table s1-commit s2-select
node_name node_port
localhost 57637
<host> xxxxx
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -418,7 +418,7 @@ step s1-begin:
BEGIN;
step s1-remove-node-2:
SELECT * FROM master_remove_node('localhost', 57638);
SELECT * FROM master_remove_node('<host>', xxxxx);
master_remove_node
@ -451,9 +451,9 @@ master_remove_node
starting permutation: s1-add-node-2 s2-begin s2-create-append-table s1-remove-node-2 s2-commit s2-select
node_name node_port
localhost 57637
<host> xxxxx
step s1-add-node-2:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -474,7 +474,7 @@ create_distributed_table
1
step s1-remove-node-2:
SELECT * FROM master_remove_node('localhost', 57638);
SELECT * FROM master_remove_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;

View File

@ -16,8 +16,8 @@ step s2-commit: COMMIT;
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -36,8 +36,8 @@ error in steps s1-commit s2-ddl-create-index-concurrently: ERROR: relation "ddl
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -57,13 +57,13 @@ step s2-commit: COMMIT;
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -83,13 +83,13 @@ step s2-commit: COMMIT;
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -109,13 +109,13 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -133,13 +133,13 @@ step s2-ddl-create-index-concurrently: <... completed>
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -159,8 +159,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -180,8 +180,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -201,13 +201,13 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -225,13 +225,13 @@ step s2-ddl-create-index-concurrently: <... completed>
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -251,8 +251,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -273,8 +273,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -296,8 +296,8 @@ step s2-commit: COMMIT;
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -317,8 +317,8 @@ step s2-commit: COMMIT;
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -343,8 +343,8 @@ step s2-commit: COMMIT;
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,4)
(localhost,57638,t,4)
(<host>,xxxxx,t,4)
(<host>,xxxxx,t,4)
restore_isolation_tester_func
@ -367,8 +367,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -388,8 +388,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -414,8 +414,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -438,8 +438,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -459,8 +459,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -485,8 +485,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -508,8 +508,8 @@ step s2-commit: COMMIT;
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -529,8 +529,8 @@ step s2-commit: COMMIT;
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -555,8 +555,8 @@ step s2-commit: COMMIT;
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,4)
(localhost,57638,t,4)
(<host>,xxxxx,t,4)
(<host>,xxxxx,t,4)
restore_isolation_tester_func
@ -576,8 +576,8 @@ step s1-commit: COMMIT;
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -595,8 +595,8 @@ step s2-ddl-create-index-concurrently: <... completed>
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -619,8 +619,8 @@ step s2-ddl-create-index-concurrently: <... completed>
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%''');
run_command_on_workers
(localhost,57637,t,4)
(localhost,57638,t,4)
(<host>,xxxxx,t,4)
(<host>,xxxxx,t,4)
restore_isolation_tester_func
@ -642,8 +642,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -663,8 +663,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -689,8 +689,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -712,8 +712,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -733,8 +733,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -759,8 +759,8 @@ step s2-commit: COMMIT;
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func

View File

@ -77,8 +77,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -103,8 +103,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
restore_isolation_tester_func
@ -126,8 +126,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -151,8 +151,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -177,8 +177,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -202,8 +202,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -313,8 +313,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
restore_isolation_tester_func
@ -339,8 +339,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
restore_isolation_tester_func
@ -364,8 +364,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func
@ -390,8 +390,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -415,8 +415,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
restore_isolation_tester_func

View File

@ -2,7 +2,7 @@ Parsed test spec with 3 sessions
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -81,7 +81,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -99,7 +99,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -156,7 +156,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -174,7 +174,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -236,7 +236,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -254,7 +254,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -314,7 +314,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -332,7 +332,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -393,7 +393,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -411,7 +411,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -469,7 +469,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select-for-udpate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -487,7 +487,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node

View File

@ -86,8 +86,8 @@ step s1-verify-current-xact-is-on-worker:
nodeport xact_exists
57637 t
57638 t
xxxxx t
xxxxx t
step s1-commit:
COMMIT;

View File

@ -1,11 +1,11 @@
Parsed test spec with 2 sessions
starting permutation: s2-invalidate-57637 s1-begin s1-insertone s2-repair s1-commit
starting permutation: s2-invalidate-xxxxx s1-begin s1-insertone s2-repair s1-commit
master_create_worker_shards
step s2-invalidate-57637:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s1-begin:
BEGIN;
@ -14,7 +14,7 @@ step s1-insertone:
INSERT INTO test_dml_vs_repair VALUES(1, 1);
step s2-repair:
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637);
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), '<host>', xxxxx, '<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -24,15 +24,15 @@ master_copy_shard_placement
starting permutation: s1-insertone s2-invalidate-57637 s1-begin s1-insertall s2-repair s1-commit
starting permutation: s1-insertone s2-invalidate-xxxxx s1-begin s1-insertall s2-repair s1-commit
master_create_worker_shards
step s1-insertone:
INSERT INTO test_dml_vs_repair VALUES(1, 1);
step s2-invalidate-57637:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s1-begin:
BEGIN;
@ -41,7 +41,7 @@ step s1-insertall:
INSERT INTO test_dml_vs_repair SELECT test_id, data+1 FROM test_dml_vs_repair;
step s2-repair:
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637);
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), '<host>', xxxxx, '<host>', xxxxx);
<waiting ...>
step s1-commit:
COMMIT;
@ -51,18 +51,18 @@ master_copy_shard_placement
starting permutation: s2-invalidate-57637 s2-begin s2-repair s1-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display
starting permutation: s2-invalidate-xxxxx s2-begin s2-repair s1-insertone s2-commit s2-invalidate-xxxxx s1-display s2-invalidate-xxxxx s2-revalidate-xxxxx s1-display
master_create_worker_shards
step s2-invalidate-57637:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s2-begin:
BEGIN;
step s2-repair:
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637);
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -74,8 +74,8 @@ step s2-commit:
COMMIT;
step s1-insertone: <... completed>
step s2-invalidate-57638:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s1-display:
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
@ -83,11 +83,11 @@ step s1-display:
test_id data
1 1
step s2-invalidate-57637:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s2-revalidate-57638:
UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
step s2-revalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s1-display:
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
@ -96,12 +96,12 @@ test_id data
1 1
starting permutation: s2-invalidate-57637 s1-prepared-insertone s2-begin s2-repair s1-prepared-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display
starting permutation: s2-invalidate-xxxxx s1-prepared-insertone s2-begin s2-repair s1-prepared-insertone s2-commit s2-invalidate-xxxxx s1-display s2-invalidate-xxxxx s2-revalidate-xxxxx s1-display
master_create_worker_shards
step s2-invalidate-57637:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s1-prepared-insertone:
EXECUTE insertone;
@ -110,7 +110,7 @@ step s2-begin:
BEGIN;
step s2-repair:
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637);
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -122,8 +122,8 @@ step s2-commit:
COMMIT;
step s1-prepared-insertone: <... completed>
step s2-invalidate-57638:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s1-display:
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
@ -132,11 +132,11 @@ test_id data
1 1
1 1
step s2-invalidate-57637:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s2-revalidate-57638:
UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
step s2-revalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s1-display:
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
@ -146,12 +146,12 @@ test_id data
1 1
1 1
starting permutation: s2-invalidate-57637 s1-insertone s1-prepared-insertall s2-begin s2-repair s1-prepared-insertall s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display
starting permutation: s2-invalidate-xxxxx s1-insertone s1-prepared-insertall s2-begin s2-repair s1-prepared-insertall s2-commit s2-invalidate-xxxxx s1-display s2-invalidate-xxxxx s2-revalidate-xxxxx s1-display
master_create_worker_shards
step s2-invalidate-57637:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s1-insertone:
INSERT INTO test_dml_vs_repair VALUES(1, 1);
@ -163,7 +163,7 @@ step s2-begin:
BEGIN;
step s2-repair:
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637);
SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), '<host>', xxxxx, '<host>', xxxxx);
master_copy_shard_placement
@ -175,8 +175,8 @@ step s2-commit:
COMMIT;
step s1-prepared-insertall: <... completed>
step s2-invalidate-57638:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s1-display:
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;
@ -187,11 +187,11 @@ test_id data
1 2
1 2
1 3
step s2-invalidate-57637:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637;
step s2-invalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s2-revalidate-57638:
UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638;
step s2-revalidate-xxxxx:
UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = xxxxx;
step s1-display:
SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id;

View File

@ -2,7 +2,7 @@ Parsed test spec with 3 sessions
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-alter s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -77,7 +77,7 @@ step s1-index:
CREATE INDEX dist_table_index ON dist_table (id);
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -115,7 +115,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -133,7 +133,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -181,7 +181,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node

View File

@ -37,8 +37,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
restore_isolation_tester_func
@ -62,8 +62,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
restore_isolation_tester_func
@ -84,8 +84,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
restore_isolation_tester_func
@ -108,8 +108,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -133,8 +133,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -157,8 +157,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -220,8 +220,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
restore_isolation_tester_func
@ -244,8 +244,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
restore_isolation_tester_func
@ -267,8 +267,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -291,8 +291,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func
@ -314,8 +314,8 @@ ERROR: relation "drop_hash" does not exist
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
restore_isolation_tester_func

View File

@ -5,7 +5,7 @@ step s1-begin:
BEGIN;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -32,16 +32,16 @@ extname extversion nspname
seg 1.1 public
run_command_on_workers
(localhost,57637,t,seg)
(localhost,57638,t,seg)
(<host>,xxxxx,t,seg)
(<host>,xxxxx,t,seg)
run_command_on_workers
(localhost,57637,t,1.1)
(localhost,57638,t,1.1)
(<host>,xxxxx,t,1.1)
(<host>,xxxxx,t,1.1)
run_command_on_workers
(localhost,57637,t,public)
(localhost,57638,t,public)
(<host>,xxxxx,t,public)
(<host>,xxxxx,t,public)
master_remove_node
@ -52,7 +52,7 @@ step s1-begin:
BEGIN;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -79,16 +79,16 @@ extname extversion nspname
seg 1.2 public
run_command_on_workers
(localhost,57637,t,seg)
(localhost,57638,t,seg)
(<host>,xxxxx,t,seg)
(<host>,xxxxx,t,seg)
run_command_on_workers
(localhost,57637,t,1.2)
(localhost,57638,t,1.2)
(<host>,xxxxx,t,1.2)
(<host>,xxxxx,t,1.2)
run_command_on_workers
(localhost,57637,t,public)
(localhost,57638,t,public)
(<host>,xxxxx,t,public)
(<host>,xxxxx,t,public)
master_remove_node
@ -96,7 +96,7 @@ master_remove_node
starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-drop-extension s1-commit s1-print
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -105,7 +105,7 @@ step s1-begin:
BEGIN;
step s1-remove-node-1:
SELECT 1 FROM master_remove_node('localhost', 57637);
SELECT 1 FROM master_remove_node('<host>', xxxxx);
?column?
@ -131,13 +131,13 @@ extname extversion nspname
run_command_on_workers
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
master_remove_node
@ -147,7 +147,7 @@ step s1-begin:
BEGIN;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -174,16 +174,16 @@ extname extversion nspname
seg 1.3 schema1
run_command_on_workers
(localhost,57637,t,seg)
(localhost,57638,t,seg)
(<host>,xxxxx,t,seg)
(<host>,xxxxx,t,seg)
run_command_on_workers
(localhost,57637,t,1.2)
(localhost,57638,t,1.3)
(<host>,xxxxx,t,1.2)
(<host>,xxxxx,t,1.3)
run_command_on_workers
(localhost,57637,t,public)
(localhost,57638,t,schema1)
(<host>,xxxxx,t,public)
(<host>,xxxxx,t,schema1)
master_remove_node
@ -194,7 +194,7 @@ step s1-begin:
BEGIN;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -220,16 +220,16 @@ extname extversion nspname
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
master_remove_node
@ -237,7 +237,7 @@ master_remove_node
starting permutation: s1-add-node-1 s1-create-extension-with-schema2 s1-begin s1-remove-node-1 s2-alter-extension-set-schema3 s1-commit s1-print
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -249,7 +249,7 @@ step s1-begin:
BEGIN;
step s1-remove-node-1:
SELECT 1 FROM master_remove_node('localhost', 57637);
SELECT 1 FROM master_remove_node('<host>', xxxxx);
?column?
@ -276,20 +276,20 @@ extname extversion nspname
seg 1.3 schema3
run_command_on_workers
(localhost,57638,t,seg)
(<host>,xxxxx,t,seg)
run_command_on_workers
(localhost,57638,t,1.3)
(<host>,xxxxx,t,1.3)
run_command_on_workers
(localhost,57638,t,schema3)
(<host>,xxxxx,t,schema3)
master_remove_node
starting permutation: s1-add-node-1 s2-drop-extension s1-begin s1-remove-node-1 s2-create-extension-with-schema1 s1-commit s1-print
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -301,7 +301,7 @@ step s1-begin:
BEGIN;
step s1-remove-node-1:
SELECT 1 FROM master_remove_node('localhost', 57637);
SELECT 1 FROM master_remove_node('<host>', xxxxx);
?column?
@ -328,20 +328,20 @@ extname extversion nspname
seg 1.3 schema1
run_command_on_workers
(localhost,57638,t,seg)
(<host>,xxxxx,t,seg)
run_command_on_workers
(localhost,57638,t,1.3)
(<host>,xxxxx,t,1.3)
run_command_on_workers
(localhost,57638,t,schema1)
(<host>,xxxxx,t,schema1)
master_remove_node
starting permutation: s2-add-node-1 s2-drop-extension s2-remove-node-1 s2-begin s2-create-extension-version-11 s1-add-node-1 s2-commit s1-print
step s2-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -350,7 +350,7 @@ step s2-drop-extension:
drop extension seg;
step s2-remove-node-1:
SELECT 1 FROM master_remove_node('localhost', 57637);
SELECT 1 FROM master_remove_node('<host>', xxxxx);
?column?
@ -362,7 +362,7 @@ step s2-create-extension-version-11:
CREATE extension seg VERSION "1.1";
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -385,16 +385,16 @@ extname extversion nspname
seg 1.1 public
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
master_remove_node
@ -405,7 +405,7 @@ step s2-drop-extension:
drop extension seg;
step s2-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -414,7 +414,7 @@ step s2-create-extension-version-11:
CREATE extension seg VERSION "1.1";
step s2-remove-node-1:
SELECT 1 FROM master_remove_node('localhost', 57637);
SELECT 1 FROM master_remove_node('<host>', xxxxx);
?column?
@ -426,7 +426,7 @@ step s2-alter-extension-update-to-version-12:
ALTER extension seg update to "1.2";
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -450,16 +450,16 @@ extname extversion nspname
seg 1.2 public
run_command_on_workers
(localhost,57637,t,seg)
(localhost,57638,t,seg)
(<host>,xxxxx,t,seg)
(<host>,xxxxx,t,seg)
run_command_on_workers
(localhost,57637,t,1.1)
(localhost,57638,t,1.2)
(<host>,xxxxx,t,1.1)
(<host>,xxxxx,t,1.2)
run_command_on_workers
(localhost,57637,t,public)
(localhost,57638,t,public)
(<host>,xxxxx,t,public)
(<host>,xxxxx,t,public)
master_remove_node
@ -467,7 +467,7 @@ master_remove_node
starting permutation: s2-add-node-1 s2-begin s2-drop-extension s1-remove-node-1 s2-commit s1-print
step s2-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -479,7 +479,7 @@ step s2-drop-extension:
drop extension seg;
step s1-remove-node-1:
SELECT 1 FROM master_remove_node('localhost', 57637);
SELECT 1 FROM master_remove_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -502,13 +502,13 @@ extname extversion nspname
run_command_on_workers
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
master_remove_node
@ -521,7 +521,7 @@ step s2-create-extension-with-schema1:
CREATE extension seg with schema schema1;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -544,16 +544,16 @@ extname extversion nspname
seg 1.3 schema1
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
master_remove_node
@ -564,7 +564,7 @@ step s2-drop-extension:
drop extension seg;
step s2-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -579,7 +579,7 @@ step s2-alter-extension-version-13:
ALTER extension seg update to "1.3";
step s1-remove-node-1:
SELECT 1 FROM master_remove_node('localhost', 57637);
SELECT 1 FROM master_remove_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -603,13 +603,13 @@ extname extversion nspname
seg 1.3 schema2
run_command_on_workers
(localhost,57638,t,seg)
(<host>,xxxxx,t,seg)
run_command_on_workers
(localhost,57638,t,1.3)
(<host>,xxxxx,t,1.3)
run_command_on_workers
(localhost,57638,t,schema2)
(<host>,xxxxx,t,schema2)
master_remove_node
@ -619,7 +619,7 @@ step s2-drop-extension:
drop extension seg;
step s2-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -631,7 +631,7 @@ step s2-create-extension-version-11:
CREATE extension seg VERSION "1.1";
step s1-remove-node-1:
SELECT 1 FROM master_remove_node('localhost', 57637);
SELECT 1 FROM master_remove_node('<host>', xxxxx);
?column?
@ -654,13 +654,13 @@ extname extversion nspname
seg 1.1 public
run_command_on_workers
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
master_remove_node
@ -670,7 +670,7 @@ step s2-drop-extension:
drop extension seg;
step s2-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
?column?
@ -679,7 +679,7 @@ step s2-create-extension-version-11:
CREATE extension seg VERSION "1.1";
step s2-remove-node-1:
SELECT 1 FROM master_remove_node('localhost', 57637);
SELECT 1 FROM master_remove_node('<host>', xxxxx);
?column?
@ -691,7 +691,7 @@ step s2-drop-extension:
drop extension seg;
step s1-add-node-1:
SELECT 1 FROM master_add_node('localhost', 57637);
SELECT 1 FROM master_add_node('<host>', xxxxx);
<waiting ...>
step s2-commit:
COMMIT;
@ -714,16 +714,16 @@ extname extversion nspname
run_command_on_workers
(localhost,57637,t,seg)
(localhost,57638,t,"")
(<host>,xxxxx,t,seg)
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57637,t,1.3)
(localhost,57638,t,"")
(<host>,xxxxx,t,1.3)
(<host>,xxxxx,t,"")
run_command_on_workers
(localhost,57637,t,schema2)
(localhost,57638,t,"")
(<host>,xxxxx,t,schema2)
(<host>,xxxxx,t,"")
master_remove_node

View File

@ -3,8 +3,8 @@ Parsed test spec with 3 sessions
starting permutation: s1-grant s1-begin-insert s2-begin-insert s3-as-admin s3-as-user-1 s3-as-readonly s3-as-monitor s1-commit s2-commit
run_command_on_workers
(localhost,57637,t,"GRANT ROLE")
(localhost,57638,t,"GRANT ROLE")
(<host>,xxxxx,t,"GRANT ROLE")
(<host>,xxxxx,t,"GRANT ROLE")
step s1-grant:
GRANT ALL ON test_table TO test_user_1;
SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_1');
@ -82,5 +82,5 @@ step s2-commit:
run_command_on_workers
(localhost,57637,f,"ERROR: role ""test_user_1"" cannot be dropped because some objects depend on it")
(localhost,57638,f,"ERROR: role ""test_user_1"" cannot be dropped because some objects depend on it")
(<host>,xxxxx,f,"ERROR: role ""test_user_1"" cannot be dropped because some objects depend on it")
(<host>,xxxxx,f,"ERROR: role ""test_user_1"" cannot be dropped because some objects depend on it")

View File

@ -8,7 +8,7 @@ step s1-update-ref-table-from-coordinator:
UPDATE ref_table SET value_1 = 15;
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -29,7 +29,7 @@ blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1
UPDATE ref_table SET value_1 = 15;
localhost coordinator_host57638 57636
<host> coordinator_host57638 xxxxx
step s1-commit:
COMMIT;
@ -55,7 +55,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -73,7 +73,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -92,7 +92,7 @@ step s3-select-distributed-waiting-queries:
blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1UPDATE ref_table SET value_1 = 12 WHERE user_id = 1localhost localhost 57638 57637
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1UPDATE ref_table SET value_1 = 12 WHERE user_id = 1localhost <host> xxxxx xxxxx
step s1-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
@ -127,7 +127,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table s2-start-session-level-connection s2-begin-on-worker s2-update-dist-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -145,7 +145,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -164,7 +164,7 @@ step s3-select-distributed-waiting-queries:
blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port
UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57638 57637
UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost <host> xxxxx xxxxx
step s1-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
@ -199,7 +199,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -217,7 +217,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -236,7 +236,7 @@ step s3-select-distributed-waiting-queries:
blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1DELETE FROM ref_table WHERE user_id = 1localhost localhost 57638 57637
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1DELETE FROM ref_table WHERE user_id = 1localhost <host> xxxxx xxxxx
step s1-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
@ -271,7 +271,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -289,7 +289,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -308,7 +308,7 @@ step s3-select-distributed-waiting-queries:
blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1INSERT INTO ref_table VALUES(8,81),(9,91)localhost localhost 57638 57637
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1INSERT INTO ref_table VALUES(8,81),(9,91)<host> <host> xxxxx xxxxx
step s1-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
@ -343,7 +343,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -361,7 +361,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -413,7 +413,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -431,7 +431,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -450,7 +450,7 @@ step s3-select-distributed-waiting-queries:
blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1COPY ref_table FROM PROGRAM 'echo 10, 101 && echo 11, 111' WITH CSVlocalhost localhost 57638 57637
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1COPY ref_table FROM PROGRAM 'echo 10, 101 && echo 11, 111' WITH CSVlocalhost <host> xxxxx xxxxx
step s1-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
@ -485,7 +485,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -503,7 +503,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -555,7 +555,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -573,7 +573,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -625,7 +625,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -643,7 +643,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -662,7 +662,7 @@ step s3-select-distributed-waiting-queries:
blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1SELECT * FROM ref_table FOR UPDATElocalhost localhost 57638 57637
UPDATE ref_table SET value_1 = 12 WHERE user_id = 1SELECT * FROM ref_table FOR UPDATElocalhost <host> xxxxx xxxxx
step s1-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
@ -697,7 +697,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s3-select-distributed-waiting-queries s2-commit-worker s1-commit s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -727,7 +727,7 @@ blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_
ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id);
INSERT INTO ref_table VALUES(8,81),(9,91)coordinator_hostlocalhost 57636 57638
INSERT INTO ref_table VALUES(8,81),(9,91)coordinator_hostlocalhost xxxxx xxxxx
step s2-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');
@ -767,7 +767,7 @@ blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_
UPDATE tt1 SET value_1 = 4;
UPDATE tt1 SET value_1 = 4;
coordinator_hostcoordinator_host57636 57636
coordinator_hostcoordinator_host57636 xxxxx
step s1-commit:
COMMIT;
@ -778,7 +778,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table s4-start-session-level-connection s4-begin-on-worker s4-update-dist-table s3-select-distributed-waiting-queries s1-commit-worker s4-commit-worker s1-stop-connection s4-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -796,7 +796,7 @@ run_commands_on_session_level_connection_to_node
step s4-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -815,7 +815,7 @@ step s3-select-distributed-waiting-queries:
blocked_statementcurrent_statement_in_blocking_processwaiting_node_nameblocking_node_namewaiting_node_portblocking_node_port
UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost localhost 57637 57637
UPDATE tt1 SET value_1 = 5UPDATE tt1 SET value_1 = 4localhost <host> xxxxx xxxxx
step s1-commit-worker:
SELECT run_commands_on_session_level_connection_to_node('COMMIT');

View File

@ -177,8 +177,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -198,8 +198,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -218,8 +218,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -238,8 +238,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -259,8 +259,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -279,8 +279,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
create_distributed_table
@ -611,8 +611,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -632,8 +632,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -653,8 +653,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -674,8 +674,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -694,8 +694,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
create_distributed_table

View File

@ -119,8 +119,8 @@ count
step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index-on-inserted s1-begin s1-insert-select s2-ddl-drop-index-on-inserted s1-commit s1-select-count s1-show-indexes-inserted
create_distributed_table
@ -145,8 +145,8 @@ count
step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-concurrently-on-inserted s1-commit s1-select-count s1-show-indexes-inserted
create_distributed_table
@ -170,8 +170,8 @@ count
step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-add-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted
create_distributed_table
@ -195,8 +195,8 @@ count
step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column-on-inserted s1-begin s1-insert-select s2-ddl-drop-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted
create_distributed_table
@ -221,8 +221,8 @@ count
step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-rename-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted s1-show-columns-inserted
create_distributed_table
@ -246,13 +246,13 @@ count
step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-insert-select s2-table-size-on-inserted s1-commit s1-select-count
create_distributed_table
@ -440,8 +440,8 @@ count
step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index-on-selected s1-begin s1-insert-select s2-ddl-drop-index-on-selected s1-commit s1-select-count s1-show-indexes-selected
create_distributed_table
@ -466,8 +466,8 @@ count
step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-concurrently-on-selected s1-commit s1-select-count s1-show-indexes-selected
create_distributed_table
@ -491,8 +491,8 @@ count
step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-add-column-on-selected s1-commit s1-select-count s1-show-columns-selected
create_distributed_table
@ -516,8 +516,8 @@ count
step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column-on-selected s1-begin s1-insert-select s2-ddl-drop-column-on-selected s1-commit s1-select-count s1-show-columns-selected
create_distributed_table
@ -542,8 +542,8 @@ count
step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-rename-column-on-selected s1-commit s1-select-count s1-show-columns-selected
create_distributed_table
@ -567,8 +567,8 @@ count
step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-insert-select s2-table-size-on-selected s1-commit s1-select-count
create_distributed_table
@ -759,8 +759,8 @@ count
step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index-on-inserted s1-begin s1-ddl-drop-index-on-inserted s2-insert-select s1-commit s1-select-count s1-show-indexes-inserted
create_distributed_table
@ -785,8 +785,8 @@ count
step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted
create_distributed_table
@ -810,8 +810,8 @@ count
step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column-on-inserted s1-begin s1-ddl-drop-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted
create_distributed_table
@ -836,8 +836,8 @@ count
step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted
create_distributed_table
@ -861,8 +861,8 @@ count
step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size-on-inserted s2-insert-select s1-commit s1-select-count
create_distributed_table
@ -1052,8 +1052,8 @@ count
step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index-on-selected s1-begin s1-ddl-drop-index-on-selected s2-insert-select s1-commit s1-select-count s1-show-indexes-selected
create_distributed_table
@ -1078,8 +1078,8 @@ count
step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected
create_distributed_table
@ -1104,8 +1104,8 @@ count
step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column-on-selected s1-begin s1-ddl-drop-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected
create_distributed_table
@ -1130,8 +1130,8 @@ count
step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected
create_distributed_table
@ -1155,8 +1155,8 @@ count
step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size-on-selected s2-insert-select s1-commit s1-select-count
create_distributed_table

View File

@ -2,7 +2,7 @@ Parsed test spec with 3 sessions
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-colocated-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -73,7 +73,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-colocated-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -91,7 +91,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -144,7 +144,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -162,7 +162,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -215,7 +215,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -233,7 +233,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -286,7 +286,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -304,7 +304,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -357,7 +357,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -375,7 +375,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -428,7 +428,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -446,7 +446,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -499,7 +499,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -517,7 +517,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -570,7 +570,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -588,7 +588,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -641,7 +641,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -659,7 +659,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -712,7 +712,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -767,7 +767,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -822,7 +822,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -840,7 +840,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -893,7 +893,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -911,7 +911,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node

View File

@ -143,8 +143,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-insert s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -164,8 +164,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-insert s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -184,8 +184,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-begin s1-insert s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -204,8 +204,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-insert s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -225,8 +225,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-insert s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -245,8 +245,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-insert s2-table-size s1-commit s1-select-count
create_distributed_table
@ -387,8 +387,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-insert s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -408,8 +408,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-insert s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -428,8 +428,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-insert s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -449,8 +449,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-insert s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -469,8 +469,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-insert s1-commit s1-select-count
create_distributed_table
@ -610,8 +610,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-insert-multi-row s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -631,8 +631,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -651,8 +651,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -671,8 +671,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-insert-multi-row s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -692,8 +692,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -712,8 +712,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-table-size s1-commit s1-select-count
create_distributed_table
@ -854,8 +854,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-insert-multi-row s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -875,8 +875,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -895,8 +895,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -916,8 +916,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -936,8 +936,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-insert-multi-row s1-commit s1-select-count
create_distributed_table

View File

@ -2,7 +2,7 @@ Parsed test spec with 3 sessions
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -73,7 +73,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -91,7 +91,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -144,7 +144,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -162,7 +162,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -215,7 +215,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -233,7 +233,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -286,7 +286,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -304,7 +304,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -357,7 +357,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -375,7 +375,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -428,7 +428,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -446,7 +446,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -499,7 +499,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-update-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -517,7 +517,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -570,7 +570,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -588,7 +588,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -641,7 +641,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -659,7 +659,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -713,7 +713,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -731,7 +731,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node

View File

@ -9,7 +9,7 @@ step s2-begin:
step s1-master_append_table_to_shard:
SELECT
master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636)
master_append_table_to_shard(shardid, 'table_to_be_appended', '<host>', xxxxx)
FROM
pg_dist_shard
WHERE
@ -20,7 +20,7 @@ master_append_table_to_shard
0.0426667
step s2-master_append_table_to_shard:
SELECT
master_append_table_to_shard(shardid, 'table_to_be_appended', 'localhost', 57636)
master_append_table_to_shard(shardid, 'table_to_be_appended', '<host>', xxxxx)
FROM
pg_dist_shard
WHERE

View File

@ -9,10 +9,10 @@ step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
step s2-begin: BEGIN;
step s2-update-node-1:
-- update a specific node by address
SELECT master_update_node(nodeid, 'localhost', nodeport + 10)
SELECT master_update_node(nodeid, '<host>', nodeport + 10)
FROM pg_dist_node
WHERE nodename = 'localhost'
AND nodeport = 57637;
WHERE nodename = '<host>'
AND nodeport = xxxxx;
<waiting ...>
step s1-abort: ABORT;
step s2-update-node-1: <... completed>
@ -34,10 +34,10 @@ step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
step s2-begin: BEGIN;
step s2-update-node-1-force:
-- update a specific node by address (force)
SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100)
SELECT master_update_node(nodeid, '<host>', nodeport + 10, force => true, lock_cooldown => 100)
FROM pg_dist_node
WHERE nodename = 'localhost'
AND nodeport = 57637;
WHERE nodename = '<host>'
AND nodeport = xxxxx;
<waiting ...>
step s2-update-node-1-force: <... completed>
master_update_node

View File

@ -9,10 +9,10 @@ step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
step s2-begin: BEGIN;
step s2-update-node-1:
-- update a specific node by address
SELECT master_update_node(nodeid, 'localhost', nodeport + 10)
SELECT master_update_node(nodeid, '<host>', nodeport + 10)
FROM pg_dist_node
WHERE nodename = 'localhost'
AND nodeport = 57637;
WHERE nodename = '<host>'
AND nodeport = xxxxx;
<waiting ...>
step s1-abort: ABORT;
step s2-update-node-1: <... completed>
@ -34,10 +34,10 @@ step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100);
step s2-begin: BEGIN;
step s2-update-node-1-force:
-- update a specific node by address (force)
SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100)
SELECT master_update_node(nodeid, '<host>', nodeport + 10, force => true, lock_cooldown => 100)
FROM pg_dist_node
WHERE nodename = 'localhost'
AND nodeport = 57637;
WHERE nodename = '<host>'
AND nodeport = xxxxx;
<waiting ...>
step s2-update-node-1-force: <... completed>
master_update_node

View File

@ -177,8 +177,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -198,8 +198,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -218,8 +218,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
create_distributed_table
@ -455,8 +455,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -476,8 +476,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -496,8 +496,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
create_distributed_table

View File

@ -177,8 +177,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -198,8 +198,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -218,8 +218,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -238,8 +238,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -259,8 +259,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -279,8 +279,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
create_distributed_table
@ -532,8 +532,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%''');
run_command_on_workers
(localhost,57637,t,2)
(localhost,57638,t,2)
(<host>,xxxxx,t,2)
(<host>,xxxxx,t,2)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -553,8 +553,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -574,8 +574,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -595,8 +595,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -615,8 +615,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
create_distributed_table

View File

@ -2,7 +2,7 @@ Parsed test spec with 2 sessions
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -20,15 +20,15 @@ run_commands_on_session_level_connection_to_node
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -37,7 +37,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t {"(ExclusiveLock,1)","(ShareLock,1)"}
<host> xxxxx t {"(ExclusiveLock,1)","(ShareLock,1)"}
step s2-rollback-worker:
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
@ -46,8 +46,8 @@ run_commands_on_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -56,7 +56,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t
<host> xxxxx t
step s1-stop-connection:
SELECT stop_session_level_connection_to_node();
@ -75,7 +75,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -93,15 +93,15 @@ run_commands_on_session_level_connection_to_node
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -110,7 +110,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t {"(ExclusiveLock,1)","(ShareLock,1)"}
<host> xxxxx t {"(ExclusiveLock,1)","(ShareLock,1)"}
step s2-rollback-worker:
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
@ -119,8 +119,8 @@ run_commands_on_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -129,7 +129,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t
<host> xxxxx t
step s1-stop-connection:
SELECT stop_session_level_connection_to_node();
@ -148,7 +148,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -166,15 +166,15 @@ run_commands_on_session_level_connection_to_node
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -183,7 +183,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t {"(ExclusiveLock,2)","(ShareLock,1)"}
<host> xxxxx t {"(ExclusiveLock,2)","(ShareLock,1)"}
step s2-rollback-worker:
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
@ -192,8 +192,8 @@ run_commands_on_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -202,7 +202,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t
<host> xxxxx t
step s1-stop-connection:
SELECT stop_session_level_connection_to_node();
@ -221,7 +221,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -239,15 +239,15 @@ run_commands_on_session_level_connection_to_node
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -256,7 +256,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t {"(ExclusiveLock,2)","(ShareLock,1)"}
<host> xxxxx t {"(ExclusiveLock,2)","(ShareLock,1)"}
step s2-rollback-worker:
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
@ -265,8 +265,8 @@ run_commands_on_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -275,7 +275,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t
<host> xxxxx t
step s1-stop-connection:
SELECT stop_session_level_connection_to_node();
@ -294,7 +294,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -312,15 +312,15 @@ run_commands_on_session_level_connection_to_node
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -329,7 +329,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t {"(ExclusiveLock,3)","(ShareLock,1)"}
<host> xxxxx t {"(ExclusiveLock,3)","(ShareLock,1)"}
step s2-rollback-worker:
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
@ -338,8 +338,8 @@ run_commands_on_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -348,7 +348,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t
<host> xxxxx t
step s1-stop-connection:
SELECT stop_session_level_connection_to_node();
@ -367,7 +367,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -385,15 +385,15 @@ run_commands_on_session_level_connection_to_node
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -402,7 +402,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t {"(ExclusiveLock,3)","(ShareLock,1)"}
<host> xxxxx t {"(ExclusiveLock,3)","(ShareLock,1)"}
step s2-rollback-worker:
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
@ -411,8 +411,8 @@ run_commands_on_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -421,7 +421,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t
<host> xxxxx t
step s1-stop-connection:
SELECT stop_session_level_connection_to_node();
@ -440,7 +440,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -458,15 +458,15 @@ run_commands_on_session_level_connection_to_node
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -475,7 +475,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t {"(RowExclusiveLock,1)","(ShareLock,1)"}
<host> xxxxx t {"(RowExclusiveLock,1)","(ShareLock,1)"}
step s2-rollback-worker:
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
@ -484,8 +484,8 @@ run_commands_on_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -494,7 +494,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t
<host> xxxxx t
step s1-stop-connection:
SELECT stop_session_level_connection_to_node();
@ -513,7 +513,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -531,15 +531,15 @@ run_commands_on_session_level_connection_to_node
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -548,7 +548,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t {"(RowExclusiveLock,2)","(ShareLock,1)"}
<host> xxxxx t {"(RowExclusiveLock,2)","(ShareLock,1)"}
step s2-rollback-worker:
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
@ -557,8 +557,8 @@ run_commands_on_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -567,7 +567,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t
<host> xxxxx t
step s1-stop-connection:
SELECT stop_session_level_connection_to_node();
@ -586,7 +586,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -604,15 +604,15 @@ run_commands_on_session_level_connection_to_node
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -621,7 +621,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t {"(RowExclusiveLock,3)","(ShareLock,1)"}
<host> xxxxx t {"(RowExclusiveLock,3)","(ShareLock,1)"}
step s2-rollback-worker:
SELECT run_commands_on_session_level_connection_to_node('ROLLBACK');
@ -630,8 +630,8 @@ run_commands_on_session_level_connection_to_node
step s1-view-locks:
SELECT * FROM master_run_on_worker(
ARRAY['localhost']::text[],
ARRAY[57637]::int[],
ARRAY['<host>']::text[],
ARRAY[xxxxx]::int[],
ARRAY[$$
SELECT array_agg(ROW(t.mode, t.count) ORDER BY t.mode) FROM
(SELECT mode, count(*) count FROM pg_locks
@ -640,7 +640,7 @@ step s1-view-locks:
node_name node_port success result
localhost 57637 t
<host> xxxxx t
step s1-stop-connection:
SELECT stop_session_level_connection_to_node();

View File

@ -5,7 +5,7 @@ create_reference_table
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -23,7 +23,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -74,7 +74,7 @@ create_reference_table
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -92,7 +92,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -149,7 +149,7 @@ create_reference_table
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -167,7 +167,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -217,7 +217,7 @@ create_reference_table
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -235,7 +235,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -286,7 +286,7 @@ create_reference_table
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -304,7 +304,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -361,7 +361,7 @@ create_reference_table
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -379,7 +379,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -427,7 +427,7 @@ create_reference_table
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -445,7 +445,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -496,7 +496,7 @@ create_reference_table
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node

View File

@ -8,7 +8,7 @@ step s1-add-primary-key:
ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id);
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -26,7 +26,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -82,7 +82,7 @@ create_reference_table
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -100,7 +100,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -160,7 +160,7 @@ step s1-add-primary-key:
ALTER TABLE ref_table ADD CONSTRAINT pri_key PRIMARY KEY (id);
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -178,7 +178,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -230,7 +230,7 @@ create_reference_table
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -248,7 +248,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node

View File

@ -180,8 +180,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_reference_table
@ -201,8 +201,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_reference_table
@ -221,8 +221,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_reference_table
@ -241,8 +241,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_reference_table
@ -262,8 +262,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_reference_table
@ -282,8 +282,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
create_reference_table
@ -504,8 +504,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
create_reference_table
@ -525,8 +525,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
create_reference_table
@ -546,8 +546,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
create_reference_table
@ -567,8 +567,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
create_reference_table
@ -587,8 +587,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
create_reference_table

View File

@ -2,7 +2,7 @@ Parsed test spec with 2 sessions
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -20,7 +20,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -68,7 +68,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -86,7 +86,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -134,7 +134,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -152,7 +152,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -200,7 +200,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -218,7 +218,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -265,7 +265,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -283,7 +283,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -331,7 +331,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -349,7 +349,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -396,7 +396,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -414,7 +414,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -461,7 +461,7 @@ restore_isolation_tester_func
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57637);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -479,7 +479,7 @@ run_commands_on_session_level_connection_to_node
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -527,7 +527,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node
@ -572,7 +572,7 @@ restore_isolation_tester_func
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-select-from-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection
step s2-start-session-level-connection:
SELECT start_session_level_connection_to_node('localhost', 57638);
SELECT start_session_level_connection_to_node('<host>', xxxxx);
start_session_level_connection_to_node

View File

@ -79,10 +79,10 @@ query query_hostname query_hostport master_query_host_namemaster_query_
update ref_table set a = a + 1;
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead <user> regression
update ref_table set a = a + 1;
localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
<host> xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
step s2-view-worker:
SELECT query, query_hostname, query_hostport, master_query_host_name,
master_query_host_port, state, wait_event_type, wait_event, usename, datname
@ -94,10 +94,10 @@ step s2-view-worker:
query query_hostname query_hostport master_query_host_namemaster_query_host_portstate wait_event_typewait_event usename datname
UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57638 coordinator_host57636 idle in transactionClient ClientRead postgres regression
UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57637 coordinator_host57636 idle in transactionClient ClientRead postgres regression
UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)localhost 57636 coordinator_host57636 idle in transactionClient ClientRead postgres regression
UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)<host> xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)<host> xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)coordinator_host57636 coordinator_host57636 idle in transactionClient ClientRead <user> regression
UPDATE public.ref_table_1400163 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)<host> xxxxx coordinator_host57636 idle in transactionClient ClientRead <user> regression
step s2-end:
END;

View File

@ -367,8 +367,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-router-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -391,8 +391,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -413,8 +413,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -436,8 +436,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-router-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -460,8 +460,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -483,8 +483,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-router-select s2-table-size s1-commit s1-select-count
create_distributed_table
@ -693,8 +693,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-router-select s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -717,8 +717,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-router-select s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -740,8 +740,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-router-select s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -764,8 +764,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-router-select s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -787,8 +787,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-router-select s1-commit s1-select-count
create_distributed_table
@ -1028,8 +1028,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-real-time-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -1056,8 +1056,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -1082,8 +1082,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1109,8 +1109,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-real-time-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1137,8 +1137,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1164,8 +1164,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-real-time-select s2-table-size s1-commit s1-select-count
create_distributed_table
@ -1370,8 +1370,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-real-time-select s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -1398,8 +1398,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-real-time-select s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1425,8 +1425,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-real-time-select s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1453,8 +1453,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-real-time-select s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1480,8 +1480,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-real-time-select s1-commit s1-select-count
create_distributed_table
@ -1718,8 +1718,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-task-tracker-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -1749,8 +1749,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -1778,8 +1778,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1808,8 +1808,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-task-tracker-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1839,8 +1839,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1869,8 +1869,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-table-size s1-commit s1-select-count
create_distributed_table
@ -2105,8 +2105,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,1)
(localhost,57638,t,1)
(<host>,xxxxx,t,1)
(<host>,xxxxx,t,1)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-task-tracker-select s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -2136,8 +2136,8 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%''');
run_command_on_workers
(localhost,57637,t,0)
(localhost,57638,t,0)
(<host>,xxxxx,t,0)
(<host>,xxxxx,t,0)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -2166,8 +2166,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -2197,8 +2197,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,"")
(localhost,57638,t,"")
(<host>,xxxxx,t,"")
(<host>,xxxxx,t,"")
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -2227,8 +2227,8 @@ count
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(<host>,xxxxx,t,new_column)
(<host>,xxxxx,t,new_column)
starting permutation: s1-initialize s1-begin s1-table-size s2-task-tracker-select s1-commit s1-select-count
create_distributed_table

Some files were not shown because too many files have changed in this diff Show More