mirror of https://github.com/citusdata/citus.git
Changes localhost to the correct host of the workers
Changes all "'localhost', :worker_1_port" strings into ":'worker_1_host', :worker_1_port" in all .sql files and multi_copy.source file Changes all "'localhost', :worker_2_port" strings into ":'worker_2_host', :worker_2_port" in all .sql filesconnection-string-tests-9.2-include
parent
e0ca43d7aa
commit
5fe2e1c427
|
@ -95,4 +95,6 @@ s/repartitioned_results_[0-9]+/repartitioned_results_xxxxx/g
|
|||
s/dbname=regression/dbname=<db>/g
|
||||
s/\b576[0-9][0-9]\b/xxxxx/g
|
||||
s/\blocalhost\b/<host>/g
|
||||
s/:'worker_2_host'/'<host>'/g
|
||||
s/:'worker_1_host'/'<host>'/g
|
||||
s/\bpostgres\b/<user>/g
|
||||
|
|
|
@ -481,7 +481,7 @@ SELECT shardid, nodename, nodeport
|
|||
WHERE logicalrelid = 'numbers_append'::regclass order by placementid;
|
||||
|
||||
-- disable the first node
|
||||
SELECT master_disable_node('localhost', :worker_1_port);
|
||||
SELECT master_disable_node(:'worker_1_host', :worker_1_port);
|
||||
-- set replication factor to 1 so that copy will
|
||||
-- succeed without replication count error
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
@ -502,7 +502,7 @@ SELECT shardid, nodename, nodeport
|
|||
WHERE logicalrelid = 'numbers_append'::regclass order by placementid;
|
||||
|
||||
-- add the node back
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_activate_node(:'worker_1_host', :worker_1_port);
|
||||
RESET citus.shard_replication_factor;
|
||||
-- add two new shards and verify they are created at both workers
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv');
|
||||
|
|
|
@ -278,6 +278,7 @@ my $mitmPort = 9060;
|
|||
my $masterPort = 57636;
|
||||
|
||||
my $workerCount = 2;
|
||||
my @workerHosts = ();
|
||||
my @workerPorts = ();
|
||||
|
||||
if ( $constr )
|
||||
|
@ -317,21 +318,36 @@ if ( $constr )
|
|||
print $out "s/$host/<host>/g\n";
|
||||
print $out "s/", substr("$masterPort", 0, length("$masterPort")-2), "[0-9][0-9]/xxxxx/g\n";
|
||||
|
||||
|
||||
my $worker1host = `psql "$constr" -t -c "SELECT nodename FROM pg_dist_node ORDER BY nodeid LIMIT 1;"`;
|
||||
my $worker1port = `psql "$constr" -t -c "SELECT nodeport FROM pg_dist_node ORDER BY nodeid LIMIT 1;"`;
|
||||
my $worker2host = `psql "$constr" -t -c "SELECT nodename FROM pg_dist_node ORDER BY nodeid OFFSET 1 LIMIT 1;"`;
|
||||
my $worker2port = `psql "$constr" -t -c "SELECT nodeport FROM pg_dist_node ORDER BY nodeid OFFSET 1 LIMIT 1;"`;
|
||||
|
||||
$worker1host =~ s/^\s+|\s+$//g;
|
||||
$worker1port =~ s/^\s+|\s+$//g;
|
||||
$worker2host =~ s/^\s+|\s+$//g;
|
||||
$worker2port =~ s/^\s+|\s+$//g;
|
||||
|
||||
push(@workerPorts, $worker1port);
|
||||
push(@workerPorts, $worker2port);
|
||||
push(@workerHosts, $worker1host);
|
||||
push(@workerHosts, $worker2host);
|
||||
|
||||
my $worker1hostReplaced = $worker1host;
|
||||
my $worker2hostReplaced = $worker2host;
|
||||
|
||||
$worker1hostReplaced =~ s/\./\\\./g;
|
||||
$worker2hostReplaced =~ s/\./\\\./g;
|
||||
|
||||
print $out "s/\\b$worker1hostReplaced\\b/<host>/g\n";
|
||||
print $out "s/\\b$worker2hostReplaced\\b/<host>/g\n";
|
||||
}
|
||||
else
|
||||
{
|
||||
for (my $workerIndex = 1; $workerIndex <= $workerCount; $workerIndex++) {
|
||||
my $workerPort = $masterPort + $workerIndex;
|
||||
push(@workerPorts, $workerPort);
|
||||
push(@workerHosts, "localhost");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -521,6 +537,11 @@ for my $workeroff (0 .. $#workerPorts)
|
|||
my $port = $workerPorts[$workeroff];
|
||||
print $fh "--variable=worker_".($workeroff+1)."_port=$port ";
|
||||
}
|
||||
for my $workeroff (0 .. $#workerHosts)
|
||||
{
|
||||
my $host = $workerHosts[$workeroff];
|
||||
print $fh "--variable=worker_".($workeroff+1)."_host=\"$host\" ";
|
||||
}
|
||||
for my $workeroff (0 .. $#followerWorkerPorts)
|
||||
{
|
||||
my $port = $followerWorkerPorts[$workeroff];
|
||||
|
|
|
@ -69,11 +69,11 @@ SELECT run_command_on_workers($$SELECT rolcreaterole FROM pg_authid WHERE rolnam
|
|||
ALTER ROLE alter_role_1 WITH SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 66 VALID UNTIL '2032-05-05' PASSWORD 'test3';
|
||||
SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
SELECT master_remove_node('localhost', :worker_1_port);
|
||||
SELECT master_remove_node(:'worker_1_host', :worker_1_port);
|
||||
ALTER ROLE alter_role_1 WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 0 VALID UNTIL '2052-05-05' PASSWORD 'test4';
|
||||
SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1';
|
||||
SELECT run_command_on_workers($$SELECT row(rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, EXTRACT (year FROM rolvaliduntil)) FROM pg_authid WHERE rolname = 'alter_role_1'$$);
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
--
|
||||
-- Setup MX data syncing
|
||||
--
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ RESET client_min_messages;
|
|||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
|
||||
|
||||
-- show that extension recreation on new nodes works also fine with extension names that require escaping
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
SELECT 1 from master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- this output will help us to understand why we have alternative outputs for this test
|
||||
-- print true if uuid-ossp is available false otherwise
|
||||
|
@ -41,7 +41,7 @@ WHERE name = 'uuid-ossp'
|
|||
:uuid_present_command;
|
||||
|
||||
-- and add the other node
|
||||
SELECT 1 from master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 from master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- show that the extension exists on both nodes
|
||||
SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname = 'uuid-ossp'$$);
|
||||
|
|
|
@ -155,16 +155,16 @@ WHERE s.logicalrelid = 'user_table'::regclass
|
|||
ORDER BY placementid;
|
||||
|
||||
-- fail master_add_node by failing copy out operation
|
||||
SELECT master_remove_node('localhost', :worker_1_port);
|
||||
SELECT master_remove_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT citus.mitmproxy('conn.onQuery(query="COPY").kill()');
|
||||
SELECT master_add_node('localhost', :worker_1_port);
|
||||
SELECT master_add_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- verify node is not added
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
ORDER BY 1, 2;
|
||||
|
||||
SELECT citus.mitmproxy('conn.allow()');
|
||||
SELECT master_add_node('localhost', :worker_1_port);
|
||||
SELECT master_add_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- verify node is added
|
||||
SELECT * FROM master_get_active_worker_nodes()
|
||||
|
|
|
@ -42,7 +42,7 @@ CREATE TABLE distributed_result_info AS
|
|||
FROM partition_task_list_results('test', $$ SELECT * FROM source_table $$, 'target_table')
|
||||
NATURAL JOIN pg_dist_node;
|
||||
SELECT * FROM distributed_result_info ORDER BY resultId;
|
||||
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], 'localhost', :worker_2_port) > 0 AS fetched;
|
||||
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], :'worker_2_host', :worker_2_port) > 0 AS fetched;
|
||||
SELECT count(*), sum(x) FROM
|
||||
read_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[],'binary') AS res (x int);
|
||||
ROLLBACk;
|
||||
|
@ -57,10 +57,10 @@ CREATE TABLE distributed_result_info AS
|
|||
SELECT * FROM distributed_result_info ORDER BY resultId;
|
||||
-- fetch from worker 2 should fail
|
||||
SAVEPOINT s1;
|
||||
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], 'localhost', :worker_2_port) > 0 AS fetched;
|
||||
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], :'worker_2_host', :worker_2_port) > 0 AS fetched;
|
||||
ROLLBACK TO SAVEPOINT s1;
|
||||
-- fetch from worker 1 should succeed
|
||||
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], 'localhost', :worker_1_port) > 0 AS fetched;
|
||||
SELECT fetch_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[], :'worker_1_host', :worker_1_port) > 0 AS fetched;
|
||||
-- make sure the results read are same as the previous transaction block
|
||||
SELECT count(*), sum(x) FROM
|
||||
read_intermediate_results('{test_from_100802_to_1,test_from_100802_to_2}'::text[],'binary') AS res (x int);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
SELECT citus.mitmproxy('conn.allow()');
|
||||
|
||||
-- add the workers
|
||||
SELECT master_add_node('localhost', :worker_1_port);
|
||||
SELECT master_add_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT master_add_node('localhost', :worker_2_proxy_port); -- an mitmproxy which forwards to the second worker
|
||||
|
|
|
@ -94,7 +94,7 @@ SELECT run_command_on_coordinator_and_workers('DROP SCHEMA non_dist_schema');
|
|||
-- test if the grantors are propagated correctly
|
||||
-- first remove one of the worker nodes
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- create a new schema
|
||||
CREATE SCHEMA grantor_schema;
|
||||
|
@ -122,7 +122,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER
|
|||
\c - - - :master_port
|
||||
|
||||
-- add the previously removed node
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- check if the grantors are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
|
@ -178,7 +178,7 @@ SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema CASCADE')
|
|||
-- test grants on public schema
|
||||
-- first remove one of the worker nodes
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- distribute the public schema (it has to be distributed by now but just in case)
|
||||
CREATE TABLE public_schema_table (id INT);
|
||||
|
@ -197,7 +197,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspna
|
|||
\c - - - :master_port
|
||||
|
||||
-- add the previously removed node
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
|
|
|
@ -231,39 +231,39 @@ END;
|
|||
-- straightforward, single-result case
|
||||
BEGIN;
|
||||
SELECT broadcast_intermediate_result('squares_1', 'SELECT s, s*s FROM generate_series(1, 5) s');
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_2_port);
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], :'worker_2_host', :worker_2_port);
|
||||
SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 int);
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], 'localhost', :worker_1_port);
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1']::text[], :'worker_1_host', :worker_1_port);
|
||||
SELECT * FROM read_intermediate_result('squares_1', 'binary') AS res (x int, x2 int);
|
||||
END;
|
||||
|
||||
-- multiple results, and some error cases
|
||||
BEGIN;
|
||||
SELECT store_intermediate_result_on_node('localhost', :worker_1_port,
|
||||
SELECT store_intermediate_result_on_node(:'worker_1_host', :worker_1_port,
|
||||
'squares_1', 'SELECT s, s*s FROM generate_series(1, 2) s');
|
||||
SELECT store_intermediate_result_on_node('localhost', :worker_1_port,
|
||||
SELECT store_intermediate_result_on_node(:'worker_1_host', :worker_1_port,
|
||||
'squares_2', 'SELECT s, s*s FROM generate_series(3, 4) s');
|
||||
SAVEPOINT s1;
|
||||
-- results aren't available on coordinator yet
|
||||
SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int);
|
||||
ROLLBACK TO SAVEPOINT s1;
|
||||
-- fetch from worker 2 should fail
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_2_port);
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], :'worker_2_host', :worker_2_port);
|
||||
ROLLBACK TO SAVEPOINT s1;
|
||||
-- still, results aren't available on coordinator yet
|
||||
SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int);
|
||||
ROLLBACK TO SAVEPOINT s1;
|
||||
-- fetch from worker 1 should succeed
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port);
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], :'worker_1_host', :worker_1_port);
|
||||
SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int);
|
||||
-- fetching again should succeed
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'localhost', :worker_1_port);
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], :'worker_1_host', :worker_1_port);
|
||||
SELECT * FROM read_intermediate_results(ARRAY['squares_1', 'squares_2']::text[], 'binary') AS res (x int, x2 int);
|
||||
ROLLBACK TO SAVEPOINT s1;
|
||||
-- empty result id list should succeed
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY[]::text[], 'localhost', :worker_1_port);
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY[]::text[], :'worker_1_host', :worker_1_port);
|
||||
-- null in result id list should error gracefully
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY[NULL, 'squares_1', 'squares_2']::text[], 'localhost', :worker_1_port);
|
||||
SELECT * FROM fetch_intermediate_results(ARRAY[NULL, 'squares_1', 'squares_2']::text[], :'worker_1_host', :worker_1_port);
|
||||
END;
|
||||
|
||||
-- results should have been deleted after transaction commit
|
||||
|
|
|
@ -9,14 +9,14 @@ CREATE EXTENSION citus VERSION '7.0-2';
|
|||
|
||||
INSERT INTO pg_dist_shard_placement
|
||||
(placementid, shardid, shardstate, shardlength, nodename, nodeport) VALUES
|
||||
(1, 1, 1, 0, 'localhost', :worker_1_port);
|
||||
(1, 1, 1, 0, :'worker_1_host', :worker_1_port);
|
||||
|
||||
-- if there are no worker nodes which match the shards this should fail
|
||||
ALTER EXTENSION citus UPDATE TO '7.0-3';
|
||||
|
||||
-- if you add a matching worker the upgrade should succeed
|
||||
INSERT INTO pg_dist_node (nodename, nodeport, groupid)
|
||||
VALUES ('localhost', :worker_1_port, 1);
|
||||
VALUES (:'worker_1_host', :worker_1_port, 1);
|
||||
ALTER EXTENSION citus UPDATE TO '7.0-3';
|
||||
|
||||
SELECT * FROM pg_dist_placement;
|
||||
|
|
|
@ -9,34 +9,34 @@ CREATE TABLE test_reference_table (y int primary key, name text);
|
|||
SELECT create_reference_table('test_reference_table');
|
||||
|
||||
-- add the nodes to the cluster
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- get the active nodes
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- try to add a node that is already in the cluster
|
||||
SELECT * FROM master_add_node('localhost', :worker_1_port);
|
||||
SELECT * FROM master_add_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- get the active nodes
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- try to remove a node (with no placements)
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- verify that the node has been deleted
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- try to disable a node with no placements see that node is removed
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_disable_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- add some shard placements to the cluster
|
||||
SET citus.shard_count TO 16;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SELECT * FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT * FROM master_activate_node(:'worker_2_host', :worker_2_port);
|
||||
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
||||
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
||||
|
||||
|
@ -44,7 +44,7 @@ SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
|||
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port;
|
||||
|
||||
-- try to remove a node with active placements and see that node removal is failed
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- insert a row so that master_disable_node() exercises closing connections
|
||||
|
@ -52,7 +52,7 @@ INSERT INTO test_reference_table VALUES (1, '1');
|
|||
|
||||
-- try to disable a node with active placements see that node is removed
|
||||
-- observe that a notification is displayed
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
SELECT master_disable_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- try to disable a node which does not exist and see that an error is thrown
|
||||
|
@ -73,24 +73,24 @@ DELETE FROM citus.pg_dist_object WHERE objid = 'public'::regnamespace::oid;
|
|||
|
||||
-- try to manipulate node metadata via non-super user
|
||||
SET ROLE non_super_user;
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port);
|
||||
SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
SELECT 1 FROM master_add_inactive_node(:'worker_2_host', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_activate_node(:'worker_2_host', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_disable_node(:'worker_2_host', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_remove_node(:'worker_2_host', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_add_secondary_node(:'worker_2_host', :worker_2_port + 2, :'worker_2_host', :worker_2_port);
|
||||
SELECT master_update_node(nodeid, :'worker_2_host', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
-- try to manipulate node metadata via privileged user
|
||||
SET ROLE node_metadata_user;
|
||||
BEGIN;
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_remove_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_add_secondary_node('localhost', :worker_2_port + 2, 'localhost', :worker_2_port);
|
||||
SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
SELECT 1 FROM master_add_inactive_node(:'worker_2_host', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_activate_node(:'worker_2_host', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_disable_node(:'worker_2_host', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_remove_node(:'worker_2_host', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port + 1);
|
||||
SELECT 1 FROM master_add_secondary_node(:'worker_2_host', :worker_2_port + 2, :'worker_2_host', :worker_2_port);
|
||||
SELECT master_update_node(nodeid, :'worker_2_host', :worker_2_port + 3) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport;
|
||||
ABORT;
|
||||
|
||||
|
@ -99,10 +99,10 @@ SET citus.next_shard_id TO 1220016;
|
|||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- restore the node for next tests
|
||||
SELECT * FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT * FROM master_activate_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- try to remove a node with active placements and see that node removal is failed
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- mark all placements in the candidate node as inactive
|
||||
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
|
||||
|
@ -110,7 +110,7 @@ UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=:worker_2_group;
|
|||
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port;
|
||||
|
||||
-- try to remove a node with only inactive placements and see that removal still fails
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- mark all placements in the candidate node as to be deleted
|
||||
|
@ -123,11 +123,11 @@ SELECT create_distributed_table('cluster_management_test_colocated', 'col_1', 'h
|
|||
SELECT logicalrelid, shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard;
|
||||
|
||||
-- try to remove a node with only to be deleted placements and see that removal still fails
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_get_active_worker_nodes();
|
||||
|
||||
-- clean-up
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
UPDATE pg_dist_placement SET shardstate=1 WHERE groupid=:worker_2_group;
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP TABLE cluster_management_test_colocated;
|
||||
|
@ -142,63 +142,63 @@ DELETE FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
|||
SELECT * FROM cluster_management_test;
|
||||
|
||||
-- clean-up
|
||||
SELECT master_add_node('localhost', :worker_2_port) AS new_node \gset
|
||||
SELECT master_add_node(:'worker_2_host', :worker_2_port) AS new_node \gset
|
||||
SELECT groupid AS new_group FROM pg_dist_node WHERE nodeid = :new_node \gset
|
||||
UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group;
|
||||
|
||||
-- test that you are allowed to remove secondary nodes even if there are placements
|
||||
SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary');
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_remove_node('localhost', 9990);
|
||||
|
||||
-- clean-up
|
||||
DROP TABLE cluster_management_test;
|
||||
|
||||
-- check that adding/removing nodes are propagated to nodes with metadata
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
\c - - - :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
\c - - - :master_port
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
\c - - - :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
\c - - - :master_port
|
||||
|
||||
-- check that added nodes are not propagated to nodes without metadata
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
\c - - - :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
\c - - - :master_port
|
||||
|
||||
-- check that removing two nodes in the same transaction works
|
||||
SELECT
|
||||
master_remove_node('localhost', :worker_1_port),
|
||||
master_remove_node('localhost', :worker_2_port);
|
||||
master_remove_node(:'worker_1_host', :worker_1_port),
|
||||
master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT count(1) FROM pg_dist_node;
|
||||
|
||||
-- check that adding two nodes in the same transaction works
|
||||
SELECT
|
||||
master_add_node('localhost', :worker_1_port),
|
||||
master_add_node('localhost', :worker_2_port);
|
||||
master_add_node(:'worker_1_host', :worker_1_port),
|
||||
master_add_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||
|
||||
-- check that mixed add/remove node commands work fine inside transaction
|
||||
BEGIN;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
COMMIT;
|
||||
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
BEGIN;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
COMMIT;
|
||||
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
|
@ -208,15 +208,15 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep
|
|||
\c - - - :master_port
|
||||
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- check that a distributed table can be created after adding a node in a transaction
|
||||
SET citus.shard_count TO 4;
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
BEGIN;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
CREATE TABLE temp(col1 text, col2 int);
|
||||
SELECT create_distributed_table('temp', 'col1');
|
||||
INSERT INTO temp VALUES ('row1', 1);
|
||||
|
@ -242,8 +242,8 @@ DELETE FROM pg_dist_shard;
|
|||
DELETE FROM pg_dist_placement;
|
||||
DELETE FROM pg_dist_node;
|
||||
\c - - - :master_port
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- check that you can't add a primary to a non-default cluster
|
||||
SELECT master_add_node('localhost', 9999, nodecluster => 'olap');
|
||||
|
@ -295,17 +295,17 @@ SELECT * FROM pg_dist_node WHERE nodeport=8887;
|
|||
-- them in any of the remaining tests
|
||||
|
||||
-- master_add_secondary_node lets you skip looking up the groupid
|
||||
SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port);
|
||||
SELECT master_add_secondary_node('localhost', 9995, :'worker_1_host', :worker_1_port);
|
||||
SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port);
|
||||
SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000);
|
||||
SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
|
||||
SELECT master_add_secondary_node('localhost', 9992, :'worker_1_host', :worker_1_port, nodecluster => 'second-cluster');
|
||||
|
||||
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
|
||||
|
||||
-- master_update_node checks node exists
|
||||
SELECT master_update_node(100, 'localhost', 8000);
|
||||
-- master_update_node disallows aliasing existing node
|
||||
SELECT master_update_node(:worker_1_node, 'localhost', :worker_2_port);
|
||||
SELECT master_update_node(:worker_1_node, :'worker_2_host', :worker_2_port);
|
||||
|
||||
-- master_update_node moves a node
|
||||
SELECT master_update_node(:worker_1_node, 'somehost', 9000);
|
||||
|
@ -313,7 +313,7 @@ SELECT master_update_node(:worker_1_node, 'somehost', 9000);
|
|||
SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node;
|
||||
|
||||
-- cleanup
|
||||
SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port);
|
||||
SELECT master_update_node(:worker_1_node, :'worker_1_host', :worker_1_port);
|
||||
SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node;
|
||||
|
||||
|
||||
|
@ -323,7 +323,7 @@ CREATE TABLE test_dist (x int, y int);
|
|||
SELECT create_distributed_table('test_dist', 'x');
|
||||
|
||||
-- testing behaviour when setting shouldhaveshards to false on partially empty node
|
||||
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false);
|
||||
SELECT * from master_set_node_property(:'worker_2_host', :worker_2_port, 'shouldhaveshards', false);
|
||||
CREATE TABLE test_dist_colocated (x int, y int);
|
||||
CREATE TABLE test_dist_non_colocated (x int, y int);
|
||||
CREATE TABLE test_dist_colocated_with_non_colocated (x int, y int);
|
||||
|
@ -358,7 +358,7 @@ WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
|
|||
DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, test_dist_colocated_with_non_colocated;
|
||||
|
||||
-- testing behaviour when setting shouldhaveshards to false on fully empty node
|
||||
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false);
|
||||
SELECT * from master_set_node_property(:'worker_2_host', :worker_2_port, 'shouldhaveshards', false);
|
||||
CREATE TABLE test_dist (x int, y int);
|
||||
CREATE TABLE test_dist_colocated (x int, y int);
|
||||
CREATE TABLE test_dist_non_colocated (x int, y int);
|
||||
|
@ -376,7 +376,7 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
|
||||
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true);
|
||||
SELECT * from master_set_node_property(:'worker_2_host', :worker_2_port, 'shouldhaveshards', true);
|
||||
|
||||
-- distributed tables should still not be placed on nodes that were switched to
|
||||
-- shouldhaveshards true
|
||||
|
@ -405,6 +405,6 @@ SELECT nodeport, count(*)
|
|||
FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'test_dist_non_colocated'::regclass GROUP BY nodeport ORDER BY nodeport;
|
||||
|
||||
SELECT * from master_set_node_property('localhost', :worker_2_port, 'bogusproperty', false);
|
||||
SELECT * from master_set_node_property(:'worker_2_host', :worker_2_port, 'bogusproperty', false);
|
||||
|
||||
DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated;
|
||||
|
|
|
@ -23,7 +23,7 @@ WHERE
|
|||
ORDER BY s.shardid, sp.nodeport;
|
||||
|
||||
-- repair colocated shards
|
||||
SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(1300000, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after shard repair
|
||||
SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate
|
||||
|
@ -48,7 +48,7 @@ WHERE
|
|||
ORDER BY s.shardid, sp.nodeport;
|
||||
|
||||
-- repair NOT colocated shard
|
||||
SELECT master_copy_shard_placement(1300016, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(1300016, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after shard repair
|
||||
SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate
|
||||
|
@ -73,7 +73,7 @@ WHERE
|
|||
ORDER BY s.shardid, sp.nodeport;
|
||||
|
||||
-- repair shard in append distributed table
|
||||
SELECT master_copy_shard_placement(1300020, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(1300020, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after shard repair
|
||||
SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate
|
||||
|
@ -101,7 +101,7 @@ WHERE
|
|||
ORDER BY s.shardid, sp.nodeport;
|
||||
|
||||
-- repair while all placements of one shard in colocation group is unhealthy
|
||||
SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(1300000, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after shard repair
|
||||
SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate
|
||||
|
|
|
@ -21,8 +21,8 @@ RESET client_min_messages;
|
|||
CREATE EXTENSION citus;
|
||||
|
||||
-- re-add the nodes to the cluster
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- verify that a table can be created after the extension has been dropped and recreated
|
||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||
|
|
|
@ -306,7 +306,7 @@ CREATE DATABASE another;
|
|||
|
||||
\c another
|
||||
CREATE EXTENSION citus;
|
||||
SELECT FROM master_add_node('localhost', :worker_1_port);
|
||||
SELECT FROM master_add_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
CREATE EXTENSION citus;
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
|
||||
-- do some setup
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
CREATE TABLE the_table (a int, b int);
|
||||
SELECT create_distributed_table('the_table', 'a');
|
||||
|
|
|
@ -76,10 +76,10 @@ SELECT stop_metadata_sync_to_node('localhost', 8888);
|
|||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888;
|
||||
|
||||
-- Add a node to another cluster to make sure it's also synced
|
||||
SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
|
||||
SELECT master_add_secondary_node('localhost', 8889, :'worker_1_host', :worker_1_port, nodecluster => 'second-cluster');
|
||||
|
||||
-- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port;
|
||||
|
||||
-- Check that the metadata has been copied to the worker
|
||||
|
@ -118,7 +118,7 @@ CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text,
|
|||
SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1');
|
||||
SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1');
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- Check that foreign key metadata exists on the worker
|
||||
\c - - - :worker_1_port
|
||||
|
@ -133,8 +133,8 @@ RESET citus.replication_model;
|
|||
|
||||
-- Check that repeated calls to start_metadata_sync_to_node has no side effects
|
||||
\c - - - :master_port
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
\c - - - :worker_1_port
|
||||
SELECT * FROM pg_dist_local_group;
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||
|
@ -151,7 +151,7 @@ SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table':
|
|||
-- Make sure that start_metadata_sync_to_node cannot be called inside a transaction
|
||||
\c - - - :master_port
|
||||
BEGIN;
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
ROLLBACK;
|
||||
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||
|
@ -160,7 +160,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
|||
\c - - - :master_port
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
CREATE TABLE mx_query_test (a int, b text, c int);
|
||||
SELECT create_distributed_table('mx_query_test', 'a');
|
||||
|
@ -186,14 +186,14 @@ DROP TABLE mx_query_test;
|
|||
|
||||
-- Check that stop_metadata_sync_to_node function sets hasmetadata of the node to false
|
||||
\c - - - :master_port
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port;
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port;
|
||||
|
||||
|
||||
-- Test DDL propagation in MX tables
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SET citus.shard_count = 5;
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
CREATE SCHEMA mx_test_schema_1;
|
||||
|
@ -429,13 +429,13 @@ SET citus.shard_count TO 3;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- sync table with serial column after create_distributed_table
|
||||
CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL);
|
||||
SELECT create_distributed_table('mx_table_with_small_sequence', 'a');
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
DROP TABLE mx_table_with_small_sequence;
|
||||
|
||||
-- Show that create_distributed_table works with a serial column
|
||||
|
@ -469,7 +469,7 @@ SELECT nextval('mx_table_with_sequence_c_seq');
|
|||
|
||||
-- Check that adding a new metadata node sets the sequence space correctly
|
||||
\c - - - :master_port
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
\c - - - :worker_2_port
|
||||
SELECT groupid FROM pg_dist_local_group;
|
||||
|
@ -515,7 +515,7 @@ CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition;
|
|||
DELETE FROM pg_dist_placement;
|
||||
DELETE FROM pg_dist_partition;
|
||||
SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- the master user needs superuser permissions to change the replication model
|
||||
CREATE USER mx_user WITH SUPERUSER;
|
||||
|
@ -532,8 +532,8 @@ SET citus.replication_model TO 'streaming';
|
|||
SELECT create_distributed_table('mx_table', 'a');
|
||||
|
||||
\c - postgres - :master_port
|
||||
SELECT master_add_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT master_add_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
\c - mx_user - :worker_1_port
|
||||
SELECT nextval('mx_table_b_seq');
|
||||
|
@ -569,7 +569,7 @@ UPDATE pg_dist_placement
|
|||
WHERE groupid = :old_worker_2_group;
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
DROP USER mx_user;
|
||||
\c - - - :worker_1_port
|
||||
|
@ -637,7 +637,7 @@ CREATE TABLE tmp_placement AS
|
|||
SELECT * FROM pg_dist_placement WHERE groupid = :old_worker_2_group;
|
||||
DELETE FROM pg_dist_placement
|
||||
WHERE groupid = :old_worker_2_group;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
CREATE TABLE mx_ref (col_1 int, col_2 text);
|
||||
SELECT create_reference_table('mx_ref');
|
||||
|
||||
|
@ -651,7 +651,7 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
|
|||
WHERE logicalrelid='mx_ref'::regclass;
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT master_add_node('localhost', :worker_2_port);
|
||||
SELECT master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
SELECT shardid, nodename, nodeport
|
||||
FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
|
||||
|
@ -727,15 +727,15 @@ SELECT create_reference_table('dist_table_2');
|
|||
ALTER TABLE dist_table_1 ADD COLUMN b int;
|
||||
|
||||
SELECT master_add_node('localhost', :master_port, groupid => 0);
|
||||
SELECT master_disable_node('localhost', :worker_1_port);
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node('localhost', :worker_1_port);
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_disable_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT master_disable_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- master_update_node should succeed
|
||||
SELECT nodeid AS worker_2_nodeid FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
|
||||
SELECT master_update_node(:worker_2_nodeid, 'localhost', 4444);
|
||||
SELECT master_update_node(:worker_2_nodeid, 'localhost', :worker_2_port);
|
||||
SELECT master_update_node(:worker_2_nodeid, :'worker_2_host', :worker_2_port);
|
||||
|
||||
ALTER SYSTEM SET citus.metadata_sync_interval TO DEFAULT;
|
||||
ALTER SYSTEM SET citus.metadata_sync_retry_interval TO DEFAULT;
|
||||
|
@ -744,8 +744,8 @@ SELECT pg_reload_conf();
|
|||
UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port;
|
||||
|
||||
-- Cleanup
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
DROP TABLE mx_test_schema_2.mx_table_2 CASCADE;
|
||||
DROP TABLE mx_test_schema_1.mx_table_1 CASCADE;
|
||||
DROP TABLE mx_testing_schema.mx_test_table;
|
||||
|
|
|
@ -333,8 +333,8 @@ SELECT run_command_on_workers($$SELECT proowner::regrole FROM pg_proc WHERE pron
|
|||
|
||||
-- we don't want other tests to have metadata synced
|
||||
-- that might change the test outputs, so we're just trying to be careful
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
RESET ROLE;
|
||||
-- now we distribute the table as super user
|
||||
|
@ -415,15 +415,15 @@ RESET ROLE;
|
|||
|
||||
\c - - - :worker_2_port
|
||||
-- super user should not be able to copy files created by a user
|
||||
SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port);
|
||||
SELECT worker_fetch_partition_file(42, 1, 1, 1, :'worker_1_host', :worker_1_port);
|
||||
|
||||
-- different user should not be able to fetch partition file
|
||||
SET ROLE usage_access;
|
||||
SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port);
|
||||
SELECT worker_fetch_partition_file(42, 1, 1, 1, :'worker_1_host', :worker_1_port);
|
||||
|
||||
-- only the user whom created the files should be able to fetch
|
||||
SET ROLE full_access;
|
||||
SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port);
|
||||
SELECT worker_fetch_partition_file(42, 1, 1, 1, :'worker_1_host', :worker_1_port);
|
||||
RESET ROLE;
|
||||
|
||||
-- now we will test that only the user who owns the fetched file is able to merge it into
|
||||
|
|
|
@ -17,8 +17,8 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
|
|||
-- test that coordinator pg_dist_node entry is synced to the workers
|
||||
SELECT wait_until_metadata_sync();
|
||||
|
||||
SELECT verify_metadata('localhost', :worker_1_port),
|
||||
verify_metadata('localhost', :worker_2_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port),
|
||||
verify_metadata(:'worker_2_host', :worker_2_port);
|
||||
|
||||
CREATE TABLE ref(a int);
|
||||
SELECT create_reference_table('ref');
|
||||
|
@ -82,8 +82,8 @@ SELECT master_remove_node('localhost', :master_port);
|
|||
|
||||
-- test that coordinator pg_dist_node entry was removed from the workers
|
||||
SELECT wait_until_metadata_sync();
|
||||
SELECT verify_metadata('localhost', :worker_1_port),
|
||||
verify_metadata('localhost', :worker_2_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port),
|
||||
verify_metadata(:'worker_2_host', :worker_2_port);
|
||||
|
||||
DROP SCHEMA mx_add_coordinator CASCADE;
|
||||
SET search_path TO DEFAULT;
|
||||
|
|
|
@ -164,12 +164,12 @@ call multi_mx_call.mx_call_proc_raise(2);
|
|||
\set VERBOSITY default
|
||||
|
||||
-- Test that we don't propagate to non-metadata worker nodes
|
||||
select stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
select stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
select stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
select stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
call multi_mx_call.mx_call_proc(2, 0);
|
||||
SET client_min_messages TO NOTICE;
|
||||
select start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
select start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
select start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
select start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- stop_metadata_sync_to_node()/start_metadata_sync_to_node() might make
|
||||
-- worker backend caches inconsistent. Reconnect to coordinator to use
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- create schema to test schema support
|
||||
CREATE SCHEMA citus_mx_test_schema;
|
||||
|
|
|
@ -213,12 +213,12 @@ WITH r AS (
|
|||
) SELECT * FROM test, r, t WHERE t.c=0;
|
||||
|
||||
-- Test that we don't propagate to non-metadata worker nodes
|
||||
select stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
select stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
select stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
select stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
select mx_call_func(2, 0);
|
||||
SET client_min_messages TO NOTICE;
|
||||
select start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
select start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
select start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
select start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- stop_metadata_sync_to_node()/start_metadata_sync_to_node() might make
|
||||
-- worker backend caches inconsistent. Reconnect to coordinator to use
|
||||
|
|
|
@ -11,15 +11,15 @@ SET citus.shard_count TO 4;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO streaming;
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- SET citus.log_remote_commands TO on;
|
||||
-- SET client_min_messages TO log;
|
||||
|
||||
-- remove worker 2, so we can add it after we have created some functions that caused
|
||||
-- problems
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- reproduction case as described in #3378
|
||||
CREATE TABLE zoop_table (x int, y int);
|
||||
|
@ -43,7 +43,7 @@ $$;
|
|||
SELECT create_distributed_function('zoop(int)', '$1');
|
||||
|
||||
-- now add the worker back, this triggers function distribution which should not fail.
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT public.wait_until_metadata_sync();
|
||||
|
||||
|
||||
|
@ -53,4 +53,4 @@ DROP SCHEMA function_table_reference CASCADE;
|
|||
-- make sure the worker is added at the end irregardless of anything failing to not make
|
||||
-- subsequent tests fail as well. All artifacts created during this test should have been
|
||||
-- dropped by the drop cascade above.
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
|
|
@ -25,8 +25,8 @@ SET citus.shard_count TO 4;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
CREATE TABLE test_table(id int, time date);
|
||||
SELECT create_distributed_table('test_table', 'id');
|
||||
|
|
|
@ -13,8 +13,8 @@ SET citus.shard_count TO 4;
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
CREATE TABlE ref_table(id int, value_1 int);
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
|
|
@ -25,7 +25,7 @@ CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLE
|
|||
$$;
|
||||
|
||||
-- add a node to the cluster
|
||||
SELECT master_add_node('localhost', :worker_1_port) As nodeid_1 \gset
|
||||
SELECT master_add_node(:'worker_1_host', :worker_1_port) As nodeid_1 \gset
|
||||
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
|
||||
-- create couple of tables
|
||||
|
@ -37,11 +37,11 @@ SELECT create_distributed_table('dist_table_1', 'a');
|
|||
|
||||
-- update the node
|
||||
SELECT 1 FROM master_update_node((SELECT nodeid FROM pg_dist_node),
|
||||
'localhost', :worker_2_port);
|
||||
:'worker_2_host', :worker_2_port);
|
||||
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
|
||||
-- start syncing metadata to the node
|
||||
SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
|
||||
--------------------------------------------------------------------------
|
||||
|
@ -53,7 +53,7 @@ SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node
|
|||
-- if the maintenance daemon does the metadata sync too fast.
|
||||
BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
|
||||
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, :'worker_1_host', :worker_1_port);
|
||||
SELECT nodeid, nodename, nodeport, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
END;
|
||||
|
||||
|
@ -62,7 +62,7 @@ END;
|
|||
SELECT wait_until_metadata_sync();
|
||||
SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- Update the node to a non-existent node. This is to simulate updating to
|
||||
-- a unwriteable node.
|
||||
|
@ -77,7 +77,7 @@ SELECT wait_until_metadata_sync();
|
|||
SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
|
||||
-- update it back to :worker_1_port, now metadata should be synced
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, :'worker_1_host', :worker_1_port);
|
||||
SELECT wait_until_metadata_sync();
|
||||
SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node;
|
||||
|
||||
|
@ -85,15 +85,15 @@ SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node;
|
|||
-- Test updating a node when another node is in readonly-mode
|
||||
--------------------------------------------------------------------------
|
||||
|
||||
SELECT master_add_node('localhost', :worker_2_port) AS nodeid_2 \gset
|
||||
SELECT 1 FROM start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT master_add_node(:'worker_2_host', :worker_2_port) AS nodeid_2 \gset
|
||||
SELECT 1 FROM start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- Create a table with shards on both nodes
|
||||
CREATE TABLE dist_table_2(a int);
|
||||
SELECT create_distributed_table('dist_table_2', 'a');
|
||||
INSERT INTO dist_table_2 SELECT i FROM generate_series(1, 100) i;
|
||||
|
||||
SELECT mark_node_readonly('localhost', :worker_2_port, TRUE);
|
||||
SELECT mark_node_readonly(:'worker_2_host', :worker_2_port, TRUE);
|
||||
|
||||
-- Now updating the other node will mark worker 2 as not synced.
|
||||
BEGIN;
|
||||
|
@ -107,27 +107,27 @@ SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 23456);
|
|||
SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid;
|
||||
|
||||
-- Make the node writeable.
|
||||
SELECT mark_node_readonly('localhost', :worker_2_port, FALSE);
|
||||
SELECT mark_node_readonly(:'worker_2_host', :worker_2_port, FALSE);
|
||||
SELECT wait_until_metadata_sync();
|
||||
|
||||
-- Mark the node readonly again, so the following master_update_node warns
|
||||
SELECT mark_node_readonly('localhost', :worker_2_port, TRUE);
|
||||
SELECT mark_node_readonly(:'worker_2_host', :worker_2_port, TRUE);
|
||||
|
||||
-- Revert the nodeport of worker 1.
|
||||
BEGIN;
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, :'worker_1_host', :worker_1_port);
|
||||
SELECT count(*) FROM dist_table_2;
|
||||
END;
|
||||
|
||||
SELECT wait_until_metadata_sync();
|
||||
|
||||
-- Make the node writeable.
|
||||
SELECT mark_node_readonly('localhost', :worker_2_port, FALSE);
|
||||
SELECT mark_node_readonly(:'worker_2_host', :worker_2_port, FALSE);
|
||||
SELECT wait_until_metadata_sync();
|
||||
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port);
|
||||
SELECT verify_metadata('localhost', :worker_1_port),
|
||||
verify_metadata('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, :'worker_1_host', :worker_1_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port),
|
||||
verify_metadata(:'worker_2_host', :worker_2_port);
|
||||
|
||||
--------------------------------------------------------------------------
|
||||
-- Test that master_update_node rolls back properly
|
||||
|
@ -136,8 +136,8 @@ BEGIN;
|
|||
SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', 12345);
|
||||
ROLLBACK;
|
||||
|
||||
SELECT verify_metadata('localhost', :worker_1_port),
|
||||
verify_metadata('localhost', :worker_2_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port),
|
||||
verify_metadata(:'worker_2_host', :worker_2_port);
|
||||
|
||||
--------------------------------------------------------------------------
|
||||
-- Test that master_update_node can appear in a prepared transaction.
|
||||
|
@ -151,15 +151,15 @@ SELECT wait_until_metadata_sync();
|
|||
SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid;
|
||||
|
||||
BEGIN;
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, 'localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_update_node(:nodeid_1, :'worker_1_host', :worker_1_port);
|
||||
PREPARE TRANSACTION 'tx01';
|
||||
COMMIT PREPARED 'tx01';
|
||||
|
||||
SELECT wait_until_metadata_sync();
|
||||
SELECT nodeid, hasmetadata, metadatasynced FROM pg_dist_node ORDER BY nodeid;
|
||||
|
||||
SELECT verify_metadata('localhost', :worker_1_port),
|
||||
verify_metadata('localhost', :worker_2_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port),
|
||||
verify_metadata(:'worker_2_host', :worker_2_port);
|
||||
|
||||
--------------------------------------------------------------------------
|
||||
-- Test that changes in isactive is propagated to the metadata nodes
|
||||
|
@ -167,11 +167,11 @@ SELECT verify_metadata('localhost', :worker_1_port),
|
|||
-- Don't drop the reference table so it has shards on the nodes being disabled
|
||||
DROP TABLE dist_table_1, dist_table_2;
|
||||
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_disable_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port);
|
||||
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_activate_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port);
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
-- Test master_disable_node() when the node that is being disabled is actually down
|
||||
|
@ -189,13 +189,13 @@ SELECT 1 FROM master_disable_node('localhost', 1);
|
|||
SELECT stop_metadata_sync_to_node('localhost', 1);
|
||||
SELECT 1 FROM master_disable_node('localhost', 1);
|
||||
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port);
|
||||
|
||||
SELECT master_update_node(:nodeid_2, 'localhost', :worker_2_port);
|
||||
SELECT master_update_node(:nodeid_2, :'worker_2_host', :worker_2_port);
|
||||
SELECT wait_until_metadata_sync();
|
||||
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_activate_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port);
|
||||
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
|
@ -209,19 +209,19 @@ SELECT wait_until_metadata_sync();
|
|||
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid IN (:nodeid_1, :nodeid_2);
|
||||
|
||||
-- should error out
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_disable_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- try again after stopping metadata sync
|
||||
SELECT stop_metadata_sync_to_node('localhost', 1);
|
||||
SELECT 1 FROM master_disable_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_disable_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- bring up node 1
|
||||
SELECT master_update_node(:nodeid_1, 'localhost', :worker_1_port);
|
||||
SELECT master_update_node(:nodeid_1, :'worker_1_host', :worker_1_port);
|
||||
SELECT wait_until_metadata_sync();
|
||||
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_activate_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
SELECT verify_metadata('localhost', :worker_1_port);
|
||||
SELECT verify_metadata(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- cleanup
|
||||
DROP TABLE ref_table;
|
||||
|
|
|
@ -9,7 +9,7 @@ SET citus.shard_replication_factor TO 1;
|
|||
|
||||
-- make sure wen can create partitioning tables in MX
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- 1-) Distributing partitioned table
|
||||
-- create partitioned table
|
||||
|
@ -162,9 +162,9 @@ DROP TABLE partitioning_test;
|
|||
\c - - - :master_port
|
||||
|
||||
-- make sure we can repeatedly call start_metadata_sync_to_node
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- make sure we can drop partitions
|
||||
DROP TABLE partitioning_test_2009;
|
||||
|
|
|
@ -612,7 +612,7 @@ EXECUTE countsome; -- should indicate replanning
|
|||
EXECUTE countsome; -- no replanning
|
||||
|
||||
-- repair shards, should invalidate via master_metadata_utility.c
|
||||
SELECT master_copy_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port)
|
||||
SELECT master_copy_shard_placement(shardid, :'worker_2_host', :worker_2_port, :'worker_1_host', :worker_1_port)
|
||||
FROM pg_dist_shard_placement
|
||||
WHERE shardid IN (
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_table'::regclass)
|
||||
|
|
|
@ -920,7 +920,7 @@ SELECT placementid AS a_placement_id FROM pg_dist_shard_placement WHERE shardid
|
|||
SELECT placementid AS b_placement_id FROM pg_dist_shard_placement WHERE shardid = :a_shard_id AND nodeport = :worker_2_port \gset
|
||||
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE placementid = :a_placement_id;
|
||||
SELECT master_copy_shard_placement(:a_shard_id, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
|
||||
SELECT master_copy_shard_placement(:a_shard_id, :'worker_2_host', :worker_2_port, :'worker_1_host', :worker_1_port);
|
||||
SELECT shardid, shardstate FROM pg_dist_shard_placement WHERE placementid = :a_placement_id;
|
||||
|
||||
-- some queries that are captured in functions
|
||||
|
|
|
@ -14,7 +14,7 @@ CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE
|
|||
DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port;
|
||||
|
||||
-- make worker 1 receive metadata changes
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- remove non-existing node
|
||||
SELECT master_remove_node('localhost', 55555);
|
||||
|
@ -25,13 +25,13 @@ SELECT master_remove_node('localhost', 55555);
|
|||
-- verify node exist before removal
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- verify node is removed
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT master_add_node('localhost', :worker_2_port) AS worker_2_nodeid \gset
|
||||
SELECT master_add_node(:'worker_2_host', :worker_2_port) AS worker_2_nodeid \gset
|
||||
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeid=:worker_2_nodeid \gset
|
||||
-- add a secondary to check we don't attempt to replicate the table to it
|
||||
SELECT 1 FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary');
|
||||
|
@ -84,7 +84,7 @@ WHERE
|
|||
|
||||
\c - - - :master_port
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after master_remove_node
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
@ -117,17 +117,17 @@ WHERE
|
|||
\c - - - :master_port
|
||||
|
||||
-- remove same node twice
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- try to disable the node before removing it (this used to crash)
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_disable_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- re-add the node for the next test
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- remove node in a transaction and ROLLBACK
|
||||
|
||||
|
@ -162,7 +162,7 @@ WHERE
|
|||
\c - - - :master_port
|
||||
|
||||
BEGIN;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
ROLLBACK;
|
||||
|
||||
-- status after master_remove_node
|
||||
|
@ -228,7 +228,7 @@ WHERE
|
|||
\c - - - :master_port
|
||||
|
||||
BEGIN;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
COMMIT;
|
||||
|
||||
-- status after master_remove_node
|
||||
|
@ -262,7 +262,7 @@ WHERE
|
|||
\c - - - :master_port
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- test inserting a value then removing a node in a transaction
|
||||
|
||||
|
@ -298,7 +298,7 @@ WHERE
|
|||
|
||||
BEGIN;
|
||||
INSERT INTO remove_node_reference_table VALUES(1);
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
COMMIT;
|
||||
|
||||
-- status after master_remove_node
|
||||
|
@ -337,7 +337,7 @@ SELECT * FROM remove_node_reference_table;
|
|||
\c - - - :master_port
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
|
||||
-- test executing DDL command then removing a node in a transaction
|
||||
|
@ -374,7 +374,7 @@ WHERE
|
|||
|
||||
BEGIN;
|
||||
ALTER TABLE remove_node_reference_table ADD column2 int;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
COMMIT;
|
||||
|
||||
-- status after master_remove_node
|
||||
|
@ -413,7 +413,7 @@ SET citus.next_shard_id TO 1380001;
|
|||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_node_reference_table'::regclass;
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
|
||||
-- test DROP table after removing a node in a transaction
|
||||
|
@ -436,7 +436,7 @@ WHERE colocationid IN
|
|||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
BEGIN;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
DROP TABLE remove_node_reference_table;
|
||||
COMMIT;
|
||||
|
||||
|
@ -453,7 +453,7 @@ WHERE
|
|||
SELECT * FROM pg_dist_colocation WHERE colocationid = 1380000;
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- re-create remove_node_reference_table
|
||||
CREATE TABLE remove_node_reference_table(column1 int);
|
||||
|
@ -497,7 +497,7 @@ ORDER BY
|
|||
shardid;
|
||||
\c - - - :master_port
|
||||
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after master_remove_node
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
@ -530,7 +530,7 @@ WHERE
|
|||
\c - - - :master_port
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
|
||||
-- test with master_disable_node
|
||||
|
@ -568,7 +568,7 @@ ORDER BY shardid ASC;
|
|||
|
||||
\c - - - :master_port
|
||||
|
||||
SELECT master_disable_node('localhost', :worker_2_port);
|
||||
SELECT master_disable_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after master_disable_node
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
@ -601,7 +601,7 @@ WHERE
|
|||
\c - - - :master_port
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_activate_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
|
||||
-- DROP tables to clean workspace
|
||||
|
@ -609,7 +609,7 @@ DROP TABLE remove_node_reference_table;
|
|||
DROP TABLE remove_node_reference_table_schema.table1;
|
||||
DROP SCHEMA remove_node_reference_table_schema CASCADE;
|
||||
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- reload pg_dist_shard_placement table
|
||||
INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement);
|
||||
|
|
|
@ -43,37 +43,37 @@ UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid
|
|||
-- cannot repair a shard after a modification (transaction still open during repair)
|
||||
BEGIN;
|
||||
ALTER TABLE customer_engagements ADD COLUMN value float;
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:newshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
ROLLBACK;
|
||||
|
||||
BEGIN;
|
||||
INSERT INTO customer_engagements VALUES (4, '04-01-2015', 'fourth event');
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:newshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
ROLLBACK;
|
||||
|
||||
-- modifications after reparing a shard are fine (will use new metadata)
|
||||
BEGIN;
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:newshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
ALTER TABLE customer_engagements ADD COLUMN value float;
|
||||
ROLLBACK;
|
||||
|
||||
BEGIN;
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:newshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
INSERT INTO customer_engagements VALUES (4, '04-01-2015', 'fourth event');
|
||||
ROLLBACK;
|
||||
|
||||
-- deactivate placement
|
||||
UPDATE pg_dist_placement SET shardstate = 1 WHERE groupid = :worker_2_group and shardid = :newshardid;
|
||||
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:newshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
|
||||
UPDATE pg_dist_placement SET shardstate = 3 WHERE groupid = :worker_2_group and shardid = :newshardid;
|
||||
|
||||
-- also try to copy from an inactive placement
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
|
||||
SELECT master_copy_shard_placement(:newshardid, :'worker_2_host', :worker_2_port, :'worker_1_host', :worker_1_port);
|
||||
|
||||
-- "copy" this shard from the first placement to the second one
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:newshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
|
||||
-- now, update first placement as unhealthy (and raise a notice) so that queries are not routed to there
|
||||
UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid AND groupid = :worker_1_group;
|
||||
|
@ -102,4 +102,4 @@ SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remo
|
|||
UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :remotenewshardid AND groupid = :worker_2_group;
|
||||
|
||||
-- oops! we don't support repairing shards backed by foreign tables
|
||||
SELECT master_copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:remotenewshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
|
|
|
@ -12,7 +12,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000;
|
|||
-- remove a node for testing purposes
|
||||
CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port;
|
||||
DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port;
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
|
||||
-- test adding new node with no reference tables
|
||||
|
@ -20,7 +20,7 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
|||
-- verify there is no node with nodeport = :worker_2_port before adding the node
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- verify node is added
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
@ -35,7 +35,7 @@ WHERE
|
|||
|
||||
|
||||
-- test adding new node with a reference table which does not have any healthy placement
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- verify there is no node with nodeport = :worker_2_port before adding the node
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
@ -44,7 +44,7 @@ CREATE TABLE replicate_reference_table_unhealthy(column1 int);
|
|||
SELECT create_reference_table('replicate_reference_table_unhealthy');
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1370000;
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- verify node is not added
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
@ -80,7 +80,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'replicate_reference_table_valid'::regclass);
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after master_add_node
|
||||
SELECT
|
||||
|
@ -117,7 +117,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'replicate_reference_table_valid'::regclass);
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after master_add_node
|
||||
SELECT
|
||||
|
@ -139,7 +139,7 @@ DROP TABLE replicate_reference_table_valid;
|
|||
|
||||
|
||||
-- test replicating a reference table when a new node added in TRANSACTION + ROLLBACK
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
CREATE TABLE replicate_reference_table_rollback(column1 int);
|
||||
SELECT create_reference_table('replicate_reference_table_rollback');
|
||||
|
@ -161,7 +161,7 @@ WHERE colocationid IN
|
|||
WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass);
|
||||
|
||||
BEGIN;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
ROLLBACK;
|
||||
|
||||
-- status after master_add_node
|
||||
|
@ -204,7 +204,7 @@ WHERE colocationid IN
|
|||
WHERE logicalrelid = 'replicate_reference_table_commit'::regclass);
|
||||
|
||||
BEGIN;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
COMMIT;
|
||||
|
||||
-- status after master_add_node
|
||||
|
@ -227,7 +227,7 @@ DROP TABLE replicate_reference_table_commit;
|
|||
|
||||
|
||||
-- test adding new node + upgrading another hash distributed table to reference table + creating new reference table in TRANSACTION
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
CREATE TABLE replicate_reference_table_reference_one(column1 int);
|
||||
SELECT create_reference_table('replicate_reference_table_reference_one');
|
||||
|
@ -270,7 +270,7 @@ ORDER BY logicalrelid;
|
|||
|
||||
BEGIN;
|
||||
SET LOCAL client_min_messages TO ERROR;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT upgrade_to_reference_table('replicate_reference_table_hash');
|
||||
SELECT create_reference_table('replicate_reference_table_reference_two');
|
||||
COMMIT;
|
||||
|
@ -306,14 +306,14 @@ DROP TABLE replicate_reference_table_reference_two;
|
|||
|
||||
|
||||
-- test inserting a value then adding a new node in a transaction
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
CREATE TABLE replicate_reference_table_insert(column1 int);
|
||||
SELECT create_reference_table('replicate_reference_table_insert');
|
||||
|
||||
BEGIN;
|
||||
INSERT INTO replicate_reference_table_insert VALUES(1);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
ROLLBACK;
|
||||
|
||||
DROP TABLE replicate_reference_table_insert;
|
||||
|
@ -331,7 +331,7 @@ COPY replicate_reference_table_copy FROM STDIN;
|
|||
4
|
||||
5
|
||||
\.
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
ROLLBACK;
|
||||
|
||||
DROP TABLE replicate_reference_table_copy;
|
||||
|
@ -343,7 +343,7 @@ SELECT create_reference_table('replicate_reference_table_ddl');
|
|||
|
||||
BEGIN;
|
||||
ALTER TABLE replicate_reference_table_ddl ADD column2 int;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
ROLLBACK;
|
||||
|
||||
DROP TABLE replicate_reference_table_ddl;
|
||||
|
@ -370,7 +370,7 @@ WHERE colocationid IN
|
|||
WHERE logicalrelid = 'replicate_reference_table_drop'::regclass);
|
||||
|
||||
BEGIN;
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
DROP TABLE replicate_reference_table_drop;
|
||||
COMMIT;
|
||||
|
||||
|
@ -386,7 +386,7 @@ ORDER BY shardid, nodeport;
|
|||
SELECT * FROM pg_dist_colocation WHERE colocationid = 1370009;
|
||||
|
||||
-- test adding a node while there is a reference table at another schema
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
CREATE SCHEMA replicate_reference_table_schema;
|
||||
CREATE TABLE replicate_reference_table_schema.table1(column1 int);
|
||||
|
@ -408,7 +408,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass);
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after master_add_node
|
||||
SELECT
|
||||
|
@ -431,7 +431,7 @@ DROP SCHEMA replicate_reference_table_schema CASCADE;
|
|||
|
||||
|
||||
-- test adding a node when there are foreign keys between reference tables
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
CREATE TABLE ref_table_1(id int primary key, v int);
|
||||
CREATE TABLE ref_table_2(id int primary key, v int references ref_table_1(id));
|
||||
|
@ -450,7 +450,7 @@ WHERE
|
|||
nodeport = :worker_2_port
|
||||
ORDER BY shardid, nodeport;
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- status after master_add_node
|
||||
SELECT
|
||||
|
@ -467,12 +467,12 @@ SELECT run_command_on_workers('select count(*) from pg_constraint where contype=
|
|||
DROP TABLE ref_table_1, ref_table_2, ref_table_3;
|
||||
|
||||
-- do some tests with inactive node
|
||||
SELECT master_remove_node('localhost', :worker_2_port);
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
CREATE TABLE initially_not_replicated_reference_table (key int);
|
||||
SELECT create_reference_table('initially_not_replicated_reference_table');
|
||||
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_inactive_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- we should see only one shard placements (other than coordinator)
|
||||
SELECT
|
||||
|
@ -490,7 +490,7 @@ WHERE
|
|||
ORDER BY 1,4,5;
|
||||
|
||||
-- we should see the two shard placements after activation
|
||||
SELECT 1 FROM master_activate_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_activate_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
SELECT
|
||||
shardid, shardstate, shardlength, nodename, nodeport
|
||||
|
@ -507,7 +507,7 @@ WHERE
|
|||
ORDER BY 1,4,5;
|
||||
|
||||
-- this should have no effect
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- test adding an invalid node while we have reference tables to replicate
|
||||
-- set client message level to ERROR and verbosity to terse to supporess
|
||||
|
|
|
@ -493,7 +493,7 @@ SET search_path TO public;
|
|||
|
||||
-- mark shard as inactive
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port;
|
||||
SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
|
||||
SELECT master_copy_shard_placement(1190000, :'worker_2_host', :worker_2_port, :'worker_1_host', :worker_1_port);
|
||||
|
||||
-- verify shardstate
|
||||
SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport;
|
||||
|
@ -504,7 +504,7 @@ SET search_path TO test_schema_support;
|
|||
|
||||
-- mark shard as inactive
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port;
|
||||
SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
|
||||
SELECT master_copy_shard_placement(1190000, :'worker_2_host', :worker_2_port, :'worker_1_host', :worker_1_port);
|
||||
|
||||
-- verify shardstate
|
||||
SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport;
|
||||
|
|
|
@ -56,8 +56,8 @@ DROP EXTENSION citus;
|
|||
CREATE EXTENSION citus;
|
||||
|
||||
-- re-add the nodes to the cluster
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- create a table with a SERIAL column
|
||||
CREATE TABLE testserialtable(id serial, group_id integer);
|
||||
|
|
|
@ -312,7 +312,7 @@ SELECT create_distributed_table('transactional_drop_mx', 'column1');
|
|||
UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='transactional_drop_mx'::regclass;
|
||||
|
||||
-- make worker 1 receive metadata changes
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- see metadata is propogated to the worker
|
||||
\c - - - :worker_1_port
|
||||
|
@ -373,7 +373,7 @@ SELECT master_remove_node('localhost', :master_port);
|
|||
|
||||
-- clean the workspace
|
||||
DROP TABLE transactional_drop_shards, transactional_drop_reference;
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- test DROP TABLE as a non-superuser in a transaction block
|
||||
CREATE USER try_drop_table WITH LOGIN;
|
||||
|
|
|
@ -34,7 +34,7 @@ FROM pg_dist_partition
|
|||
WHERE logicalrelid IN ('mx_table'::regclass, 'mx_table_2'::regclass)
|
||||
ORDER BY logicalrelid;
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
COPY mx_table (col_1, col_2) FROM STDIN WITH (FORMAT 'csv');
|
||||
-37, 'lorem'
|
||||
|
@ -138,19 +138,19 @@ SET colocationid = :old_colocation_id
|
|||
WHERE logicalrelid='mx_table_2'::regclass;
|
||||
|
||||
-- start_metadata_sync_to_node
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||
|
||||
-- stop_metadata_sync_to_node
|
||||
\c - - - :master_port
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
\c - - - :worker_1_port
|
||||
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
\c - - - :master_port
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||
\c - - - :worker_2_port
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition;
|
||||
|
@ -180,7 +180,7 @@ SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_po
|
|||
INSERT INTO pg_dist_placement (groupid, shardid, shardstate, shardlength)
|
||||
VALUES (:worker_2_group, :testshardid, 3, 0);
|
||||
|
||||
SELECT master_copy_shard_placement(:testshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:testshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
|
||||
SELECT shardid, nodename, nodeport, shardstate
|
||||
FROM pg_dist_shard_placement
|
||||
|
@ -212,7 +212,7 @@ ROLLBACK;
|
|||
\c - - - :master_port
|
||||
DROP TABLE mx_table;
|
||||
DROP TABLE mx_table_2;
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
\c - - - :worker_1_port
|
||||
DELETE FROM pg_dist_node;
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition;
|
||||
|
|
|
@ -626,7 +626,7 @@ UPDATE pg_dist_shard_placement SET shardstate = 3
|
|||
WHERE nodeport = :worker_2_port AND
|
||||
shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='upgrade_reference_table_mx'::regclass);
|
||||
|
||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- situation before upgrade_reference_table
|
||||
SELECT
|
||||
|
@ -724,5 +724,5 @@ ORDER BY shardid;
|
|||
|
||||
\c - - - :master_port
|
||||
DROP TABLE upgrade_reference_table_mx;
|
||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
RESET client_min_messages;
|
||||
|
|
|
@ -97,7 +97,7 @@ DROP EXTENSION seg CASCADE;
|
|||
|
||||
DROP SCHEMA "extension'test" CASCADE;
|
||||
RESET client_min_messages;
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
SELECT 1 from master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- then create the extension
|
||||
CREATE EXTENSION seg;
|
||||
|
@ -111,7 +111,7 @@ CREATE TABLE ref_table_2 (x seg);
|
|||
SELECT create_reference_table('ref_table_2');
|
||||
|
||||
-- and add the other node
|
||||
SELECT 1 from master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 from master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- show that the extension is created on both existing and new node
|
||||
SELECT run_command_on_workers($$SELECT count(extnamespace) FROM pg_extension WHERE extname = 'seg'$$);
|
||||
|
@ -202,7 +202,7 @@ SET search_path TO "extension'test";
|
|||
RESET client_min_messages;
|
||||
|
||||
-- remove the node, we'll add back again
|
||||
SELECT 1 from master_remove_node('localhost', :worker_2_port);
|
||||
SELECT 1 from master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- now, create a type that depends on another type, which
|
||||
-- finally depends on an extension
|
||||
|
@ -223,7 +223,7 @@ BEGIN;
|
|||
COMMIT;
|
||||
|
||||
-- add the node back
|
||||
SELECT 1 from master_add_node('localhost', :worker_2_port);
|
||||
SELECT 1 from master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- make sure that both extensions are created on both nodes
|
||||
SELECT count(*) FROM citus.pg_dist_object WHERE objid IN (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn'));
|
||||
|
|
|
@ -202,18 +202,18 @@ UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid
|
|||
-- cannot repair a shard after a modification (transaction still open during repair)
|
||||
BEGIN;
|
||||
INSERT INTO customer_engagements VALUES (1, 1);
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:newshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
ROLLBACK;
|
||||
|
||||
-- modifications after reparing a shard are fine (will use new metadata)
|
||||
BEGIN;
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:newshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
ALTER TABLE customer_engagements ADD COLUMN value float DEFAULT 1.0;
|
||||
SELECT * FROM customer_engagements ORDER BY 1,2,3;
|
||||
ROLLBACK;
|
||||
|
||||
BEGIN;
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT master_copy_shard_placement(:newshardid, :'worker_1_host', :worker_1_port, :'worker_2_host', :worker_2_port);
|
||||
INSERT INTO customer_engagements VALUES (1, 1);
|
||||
SELECT count(*) FROM customer_engagements;
|
||||
ROLLBACK;
|
||||
|
|
Loading…
Reference in New Issue