citus/src/test/regress/expected/multi_citus_tools.out

691 lines
28 KiB
Plaintext

--
-- MULTI CITUS TOOLS
--
-- tests UDFs created for citus tools
--
CREATE SCHEMA tools;
SET SEARCH_PATH TO 'tools';
SET citus.next_shard_id TO 1240000;
-- test with invalid port, prevent OS dependent warning from being displayed
SET client_min_messages to ERROR;
SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[],
ARRAY['select count(*) from pg_dist_shard']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 666 | f | failed to connect to localhost:xxxxx
(1 row)
SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[],
ARRAY['select count(*) from pg_dist_shard']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 666 | f | failed to connect to localhost:xxxxx
(1 row)
RESET client_min_messages;
-- store worker node name and port
SELECT quote_literal(node_name) as node_name, node_port as node_port
FROM master_get_active_worker_nodes()
ORDER BY node_port
LIMIT 1 \gset
-- connect to the first worker and ask for shard count, should return 0
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select count(*) from pg_dist_shard']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 0
(1 row)
-- connect to the first worker and ask for shards, should fail with
-- expecting a single column error
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select * from pg_dist_shard']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | f | expected a single column in query target
(1 row)
-- query result may only contain a single row
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select a from generate_series(1,2) a']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | f | expected a single row in query result
(1 row)
-- send multiple queries
SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[],
ARRAY[:node_port, :node_port]::int[],
ARRAY['select a from generate_series(1,1) a',
'select a from generate_series(2,2) a']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 1
localhost | 57637 | t | 2
(2 rows)
-- send multiple queries, one fails
SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[],
ARRAY[:node_port, :node_port]::int[],
ARRAY['select a from generate_series(1,1) a',
'select a from generate_series(1,2) a']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 1
localhost | 57637 | f | expected a single row in query result
(2 rows)
-- send multiple queries, both fail
SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[],
ARRAY[:node_port, :node_port]::int[],
ARRAY['select a from generate_series(1,2) a',
'select a from generate_series(1,2) a']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | f | expected a single row in query result
localhost | 57637 | f | expected a single row in query result
(2 rows)
-- can create tables at worker
SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[],
ARRAY[:node_port, :node_port]::int[],
ARRAY['create table first_table(a int, b int)',
'create table second_table(a int, b int)']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | CREATE TABLE
localhost | 57637 | t | CREATE TABLE
(2 rows)
-- can insert into table
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | INSERT 0 20
(1 row)
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select count(*) from first_table']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 20
(1 row)
-- insert into second table twice
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['insert into second_table select * from first_table']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | INSERT 0 20
(1 row)
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['insert into second_table select * from first_table']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | INSERT 0 20
(1 row)
-- check inserted values at second table
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select count(*) from second_table']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 40
(1 row)
-- store worker node name and port again
-- previously set variables become unusable after some number of uses
SELECT quote_literal(node_name) as node_name, node_port as node_port
FROM master_get_active_worker_nodes()
ORDER BY node_port
LIMIT 1 \gset
-- create index on tables
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['create index first_table_index on first_table(a)']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | CREATE INDEX
(1 row)
-- drop created tables
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['drop table first_table']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | DROP TABLE
(1 row)
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['drop table second_table']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | DROP TABLE
(1 row)
-- verify table is dropped
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select count(*) from second_table']::text[],
false);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | f | ERROR: relation "second_table" does not exist
(1 row)
--
-- Run the same tests in parallel
--
-- connect to the first worker and ask for shard count, should return 0
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select count(*) from pg_dist_shard']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 0
(1 row)
-- connect to the first worker and ask for shards, should fail with
-- expecting a single column error
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select * from pg_dist_shard']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | f | expected a single column in query target
(1 row)
-- query result may only contain a single row
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select a from generate_series(1,2) a']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | f | expected a single row in query result
(1 row)
-- send multiple queries
SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[],
ARRAY[:node_port, :node_port]::int[],
ARRAY['select a from generate_series(1,1) a',
'select a from generate_series(2,2) a']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 1
localhost | 57637 | t | 2
(2 rows)
-- send multiple queries, one fails
SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[],
ARRAY[:node_port, :node_port]::int[],
ARRAY['select a from generate_series(1,1) a',
'select a from generate_series(1,2) a']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 1
localhost | 57637 | f | expected a single row in query result
(2 rows)
-- send multiple queries, both fail
SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[],
ARRAY[:node_port, :node_port]::int[],
ARRAY['select a from generate_series(1,2) a',
'select a from generate_series(1,2) a']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | f | expected a single row in query result
localhost | 57637 | f | expected a single row in query result
(2 rows)
-- can create tables at worker
SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[],
ARRAY[:node_port, :node_port]::int[],
ARRAY['create table first_table(a int, b int)',
'create table second_table(a int, b int)']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | CREATE TABLE
localhost | 57637 | t | CREATE TABLE
(2 rows)
-- store worker node name and port again
-- previously set variables become unusable after some number of uses
SELECT quote_literal(node_name) as node_name, node_port as node_port
FROM master_get_active_worker_nodes()
ORDER BY node_port
LIMIT 1 \gset
-- can insert into table
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | INSERT 0 20
(1 row)
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select count(*) from first_table']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 20
(1 row)
-- insert into second table twice
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['insert into second_table select * from first_table']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | INSERT 0 20
(1 row)
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['insert into second_table select * from first_table']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | INSERT 0 20
(1 row)
-- check inserted values at second table
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select count(*) from second_table']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 40
(1 row)
-- create index on tables
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['create index first_table_index on first_table(a)']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | CREATE INDEX
(1 row)
-- drop created tables
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['drop table first_table']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | DROP TABLE
(1 row)
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['drop table second_table']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | t | DROP TABLE
(1 row)
-- verify table is dropped
SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[],
ARRAY['select count(*) from second_table']::text[],
true);
node_name | node_port | success | result
---------------------------------------------------------------------
localhost | 57637 | f | ERROR: relation "second_table" does not exist
(1 row)
-- run_command_on_XXX tests
SELECT * FROM run_command_on_workers('select 1') ORDER BY 2 ASC;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 1
localhost | 57638 | t | 1
(2 rows)
SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') ORDER BY 2 ASC;
nodename | nodeport | success | result
---------------------------------------------------------------------
localhost | 57637 | t | 0
localhost | 57638 | t | 0
(2 rows)
-- make sure run_on_all_placements respects shardstate
SET citus.shard_count TO 5;
CREATE TABLE check_placements (key int);
SELECT create_distributed_table('check_placements', 'key', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM run_command_on_placements('check_placements', 'select 1');
nodename | nodeport | shardid | success | result
---------------------------------------------------------------------
localhost | 57637 | 1240000 | t | 1
localhost | 57638 | 1240000 | t | 1
localhost | 57637 | 1240001 | t | 1
localhost | 57638 | 1240001 | t | 1
localhost | 57637 | 1240002 | t | 1
localhost | 57638 | 1240002 | t | 1
localhost | 57637 | 1240003 | t | 1
localhost | 57638 | 1240003 | t | 1
localhost | 57637 | 1240004 | t | 1
localhost | 57638 | 1240004 | t | 1
(10 rows)
UPDATE pg_dist_shard_placement SET shardstate = 3
WHERE shardid % 2 = 0 AND nodeport = :worker_1_port;
SELECT * FROM run_command_on_placements('check_placements', 'select 1');
nodename | nodeport | shardid | success | result
---------------------------------------------------------------------
localhost | 57638 | 1240000 | t | 1
localhost | 57637 | 1240001 | t | 1
localhost | 57638 | 1240001 | t | 1
localhost | 57638 | 1240002 | t | 1
localhost | 57637 | 1240003 | t | 1
localhost | 57638 | 1240003 | t | 1
localhost | 57638 | 1240004 | t | 1
(7 rows)
DROP TABLE check_placements CASCADE;
-- make sure run_on_all_colocated_placements correctly detects colocation
CREATE TABLE check_colocated (key int);
SELECT create_distributed_table('check_colocated', 'key', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SET citus.shard_count TO 4;
CREATE TABLE second_table (key int);
SELECT create_distributed_table('second_table', 'key', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table',
'select 1');
ERROR: tables check_colocated and second_table are not co-located
CONTEXT: PL/pgSQL function run_command_on_colocated_placements(regclass,regclass,text,boolean) line XX at RAISE
-- even when the difference is in replication factor, an error is thrown
DROP TABLE second_table;
SET citus.shard_replication_factor TO 1;
SET citus.shard_count TO 5;
CREATE TABLE second_table (key int);
SELECT create_distributed_table('second_table', 'key', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table',
'select 1');
ERROR: tables check_colocated and second_table are not co-located
CONTEXT: PL/pgSQL function run_command_on_colocated_placements(regclass,regclass,text,boolean) line XX at RAISE
-- when everything matches, the command is run!
DROP TABLE second_table;
SET citus.shard_replication_factor TO 2;
SET citus.shard_count TO 5;
CREATE TABLE second_table (key int);
SELECT create_distributed_table('second_table', 'key', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table',
'select 1');
nodename | nodeport | shardid1 | shardid2 | success | result
---------------------------------------------------------------------
localhost | 57637 | 1240005 | 1240019 | t | 1
localhost | 57638 | 1240005 | 1240019 | t | 1
localhost | 57637 | 1240006 | 1240020 | t | 1
localhost | 57638 | 1240006 | 1240020 | t | 1
localhost | 57637 | 1240007 | 1240021 | t | 1
localhost | 57638 | 1240007 | 1240021 | t | 1
localhost | 57637 | 1240008 | 1240022 | t | 1
localhost | 57638 | 1240008 | 1240022 | t | 1
localhost | 57637 | 1240009 | 1240023 | t | 1
localhost | 57638 | 1240009 | 1240023 | t | 1
(10 rows)
DROP TABLE check_colocated CASCADE;
DROP TABLE second_table CASCADE;
-- runs on all shards
SET citus.shard_count TO 5;
CREATE TABLE check_shards (key int);
SELECT create_distributed_table('check_shards', 'key', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM run_command_on_shards('check_shards', 'select 1');
shardid | success | result
---------------------------------------------------------------------
1240024 | t | 1
1240025 | t | 1
1240026 | t | 1
1240027 | t | 1
1240028 | t | 1
(5 rows)
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0;
SELECT * FROM run_command_on_shards('check_shards', 'select 1');
NOTICE: some shards do not have active placements
CONTEXT: PL/pgSQL function run_command_on_shards(regclass,text,boolean) line XX at RAISE
shardid | success | result
---------------------------------------------------------------------
1240025 | t | 1
1240027 | t | 1
(2 rows)
DROP TABLE check_shards CASCADE;
-- test the connections to worker nodes
SELECT bool_and(success) AS all_nodes_are_successful FROM (
SELECT citus_check_connection_to_node(nodename, nodeport) AS success
FROM pg_dist_node
WHERE isactive = 't' AND noderole='primary'
) subquery;
all_nodes_are_successful
---------------------------------------------------------------------
t
(1 row)
-- verify that the coordinator can connect to itself
SELECT citus_check_connection_to_node('localhost', :master_port);
citus_check_connection_to_node
---------------------------------------------------------------------
t
(1 row)
-- verify that the connections are not successful for wrong port
-- test with invalid port, prevent OS dependent warning from being displayed
SET client_min_messages TO ERROR;
SELECT citus_check_connection_to_node('localhost', nodeport:=1234);
citus_check_connection_to_node
---------------------------------------------------------------------
f
(1 row)
-- verify that the connections are not successful due to timeouts
SET citus.node_connection_timeout TO 10;
SELECT citus_check_connection_to_node('www.citusdata.com');
citus_check_connection_to_node
---------------------------------------------------------------------
f
(1 row)
RESET citus.node_connection_timeout;
SET client_min_messages TO DEBUG;
-- check the connections in a transaction block
BEGIN;
SELECT citus_check_connection_to_node(nodename, nodeport)
FROM pg_dist_node
WHERE isactive = 't' AND noderole='primary';
citus_check_connection_to_node
---------------------------------------------------------------------
t
t
t
(3 rows)
CREATE TABLE distributed(id int, data text);
SELECT create_distributed_table('distributed', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT count(*) FROM distributed;
DEBUG: Router planner cannot handle multi-shard select queries
count
---------------------------------------------------------------------
0
(1 row)
ROLLBACK;
-- create some roles for testing purposes
SET client_min_messages TO ERROR;
CREATE ROLE role_without_login WITH NOLOGIN;
SELECT 1 FROM run_command_on_workers($$CREATE ROLE role_without_login WITH NOLOGIN$$);
?column?
---------------------------------------------------------------------
1
1
(2 rows)
CREATE ROLE role_with_login WITH LOGIN;
SELECT 1 FROM run_command_on_workers($$CREATE ROLE role_with_login WITH LOGIN$$);
?column?
---------------------------------------------------------------------
1
1
(2 rows)
SET client_min_messages TO DEBUG;
-- verify that we can create connections only with users with login privileges.
SET ROLE role_without_login;
SELECT citus_check_connection_to_node('localhost', :worker_1_port);
WARNING: connection to the remote node role_without_login@localhost:xxxxx failed with the following error: FATAL: role "role_without_login" is not permitted to log in
citus_check_connection_to_node
---------------------------------------------------------------------
f
(1 row)
SET ROLE role_with_login;
SELECT citus_check_connection_to_node('localhost', :worker_1_port);
citus_check_connection_to_node
---------------------------------------------------------------------
t
(1 row)
RESET role;
DROP ROLE role_with_login, role_without_login;
SELECT 1 FROM run_command_on_workers($$DROP ROLE role_with_login, role_without_login$$);
?column?
---------------------------------------------------------------------
1
1
(2 rows)
-- check connections from a worker node
\c - - - :worker_1_port
SELECT citus_check_connection_to_node('localhost', :master_port);
citus_check_connection_to_node
---------------------------------------------------------------------
t
(1 row)
SELECT citus_check_connection_to_node('localhost', :worker_1_port);
citus_check_connection_to_node
---------------------------------------------------------------------
t
(1 row)
SELECT citus_check_connection_to_node('localhost', :worker_2_port);
citus_check_connection_to_node
---------------------------------------------------------------------
t
(1 row)
\c - - - :master_port
SELECT * FROM citus_check_cluster_node_health() ORDER BY 1,2,3,4;
from_nodename | from_nodeport | to_nodename | to_nodeport | result
---------------------------------------------------------------------
localhost | 57636 | localhost | 57636 | t
localhost | 57636 | localhost | 57637 | t
localhost | 57636 | localhost | 57638 | t
localhost | 57637 | localhost | 57636 | t
localhost | 57637 | localhost | 57637 | t
localhost | 57637 | localhost | 57638 | t
localhost | 57638 | localhost | 57636 | t
localhost | 57638 | localhost | 57637 | t
localhost | 57638 | localhost | 57638 | t
(9 rows)
-- test cluster connectivity when we have broken nodes
SET client_min_messages TO ERROR;
SET citus.node_connection_timeout TO 10;
BEGIN;
INSERT INTO pg_dist_node VALUES
(123456789, 123456789, 'localhost', 123456789),
(1234567890, 1234567890, 'www.citusdata.com', 5432);
SELECT * FROM citus_check_cluster_node_health() ORDER BY 5,1,2,3,4;
from_nodename | from_nodeport | to_nodename | to_nodeport | result
---------------------------------------------------------------------
localhost | 57636 | localhost | 123456789 | f
localhost | 57636 | www.citusdata.com | 5432 | f
localhost | 57637 | localhost | 123456789 | f
localhost | 57637 | www.citusdata.com | 5432 | f
localhost | 57638 | localhost | 123456789 | f
localhost | 57638 | www.citusdata.com | 5432 | f
localhost | 57636 | localhost | 57636 | t
localhost | 57636 | localhost | 57637 | t
localhost | 57636 | localhost | 57638 | t
localhost | 57637 | localhost | 57636 | t
localhost | 57637 | localhost | 57637 | t
localhost | 57637 | localhost | 57638 | t
localhost | 57638 | localhost | 57636 | t
localhost | 57638 | localhost | 57637 | t
localhost | 57638 | localhost | 57638 | t
localhost | 123456789 | localhost | 57636 |
localhost | 123456789 | localhost | 57637 |
localhost | 123456789 | localhost | 57638 |
localhost | 123456789 | localhost | 123456789 |
localhost | 123456789 | www.citusdata.com | 5432 |
www.citusdata.com | 5432 | localhost | 57636 |
www.citusdata.com | 5432 | localhost | 57637 |
www.citusdata.com | 5432 | localhost | 57638 |
www.citusdata.com | 5432 | localhost | 123456789 |
www.citusdata.com | 5432 | www.citusdata.com | 5432 |
(25 rows)
ROLLBACK;
RESET citus.node_connection_timeout;
RESET client_min_messages;
DROP SCHEMA tools CASCADE;
RESET SEARCH_PATH;
-- set SHOW_CONTEXT back to default
\set SHOW_CONTEXT errors