mirror of https://github.com/citusdata/citus.git
Adds real_hosts
Changes "\c - - - :worker_1_port" into "\c - - :real_worker_1_host :worker_1_port", "\c - - - :worker_2_port" into "\c - - :real_worker_2_host :worker_2_port" and "\c - - - :master_port" into "\c - - :real_master_host :master_port" in all input/ and .sql filesconnection-string-tests-9.2-include
parent
5fe2e1c427
commit
cd2a606998
|
@ -116,7 +116,7 @@ check-empty: all
|
|||
-- $(MULTI_REGRESS_OPTS) $(EXTRA_TESTS)
|
||||
|
||||
check-multi: all
|
||||
$(pg_regress_multi_check) --constr="$(constr)" --load-extension=citus \
|
||||
$(pg_regress_multi_check) --constr="$(constr)" --hoststr="$(hoststr)" --load-extension=citus \
|
||||
-- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS)
|
||||
|
||||
check-multi-non-adaptive: all
|
||||
|
|
|
@ -98,3 +98,6 @@ s/\blocalhost\b/<host>/g
|
|||
s/:'worker_2_host'/'<host>'/g
|
||||
s/:'worker_1_host'/'<host>'/g
|
||||
s/\bpostgres\b/<user>/g
|
||||
s/:real_worker_1_host/-/g
|
||||
s/:real_worker_2_host/-/g
|
||||
s/:real_master_host/-/g
|
||||
|
|
|
@ -34,9 +34,9 @@ SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
|
|||
-- verify that the storage options made it to the table definitions
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Verify that we can add columns
|
||||
|
||||
|
@ -47,13 +47,13 @@ ALTER TABLE lineitem_alter ADD COLUMN int_column2 INTEGER DEFAULT 2;
|
|||
ALTER TABLE lineitem_alter ADD COLUMN null_column INTEGER;
|
||||
|
||||
-- show changed schema on one worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT attname, atttypid::regtype
|
||||
FROM
|
||||
(SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc
|
||||
JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid)
|
||||
ORDER BY attnum;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
|
||||
|
@ -234,9 +234,9 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
|
|||
DROP INDEX temp_index_2;
|
||||
|
||||
-- Add column on only one worker...
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
ALTER TABLE lineitem_alter_220000 ADD COLUMN first integer;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- and try to add it in a multi-statement block, which fails
|
||||
BEGIN;
|
||||
|
@ -281,7 +281,7 @@ DROP INDEX replica_idx;
|
|||
ALTER TABLE single_shard_items REPLICA IDENTITY default;
|
||||
|
||||
-- Drop the column from the worker...
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
ALTER TABLE lineitem_alter_220000 DROP COLUMN first;
|
||||
|
||||
-- Create table to trigger at-xact-end (deferred) failure
|
||||
|
@ -296,7 +296,7 @@ $ldt$ LANGUAGE plpgsql;
|
|||
|
||||
CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
-- The above trigger will cause failure at transaction end on one placement.
|
||||
-- We'll test 2PC first, as it should handle this "best" (no divergence)
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
|
@ -318,12 +318,12 @@ COMMIT;
|
|||
-- The block should have committed with a warning
|
||||
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
DROP EVENT TRIGGER log_ddl_tag;
|
||||
DROP FUNCTION log_ddl_tag();
|
||||
DROP TABLE ddl_commands;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
-- Distributed SELECTs may appear after ALTER
|
||||
BEGIN;
|
||||
CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
|
||||
|
@ -357,13 +357,13 @@ SELECT shardid FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard
|
|||
WHERE logicalrelid='test_ab'::regclass AND shardstate=3;
|
||||
|
||||
-- Check that the schema on the worker still looks reasonable
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT attname, atttypid::regtype
|
||||
FROM
|
||||
(SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc
|
||||
JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid)
|
||||
ORDER BY attnum;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- verify that we can rename distributed tables
|
||||
SHOW citus.enable_ddl_propagation;
|
||||
|
@ -373,32 +373,32 @@ ALTER TABLE lineitem_alter RENAME TO lineitem_renamed;
|
|||
SELECT relname FROM pg_class WHERE relname = 'lineitem_renamed';
|
||||
|
||||
-- show rename worked on one worker, too
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_renamed%' ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- revert it to original name
|
||||
ALTER TABLE lineitem_renamed RENAME TO lineitem_alter;
|
||||
|
||||
-- show rename worked on one worker, too
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- verify that we can set and reset storage parameters
|
||||
ALTER TABLE lineitem_alter SET(fillfactor=40);
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
ALTER TABLE lineitem_alter RESET(fillfactor);
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- verify that we can rename indexes on distributed tables
|
||||
CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber);
|
||||
|
@ -408,9 +408,9 @@ ALTER INDEX temp_index_1 RENAME TO idx_lineitem_linenumber;
|
|||
SELECT relname FROM pg_class WHERE relname = 'idx_lineitem_linenumber';
|
||||
|
||||
-- show rename worked on one worker, too
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'idx_lineitem_linenumber%' ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- now get rid of the index
|
||||
DROP INDEX idx_lineitem_linenumber;
|
||||
|
@ -430,9 +430,9 @@ ALTER TABLE lineitem_renamed RENAME TO lineitem_alter;
|
|||
ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int;
|
||||
|
||||
-- verify newly added column is not present in a worker shard
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- ddl propagation flag is reset to default, disable it again
|
||||
SET citus.enable_ddl_propagation to false;
|
||||
|
@ -459,9 +459,9 @@ CREATE UNIQUE INDEX unique_lineitem_partkey on lineitem_alter(l_partkey);
|
|||
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
|
||||
|
||||
-- verify index is not created on worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alter_%';
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- verify alter table and drop sequence in the same transaction does not cause deadlock
|
||||
SET citus.shard_count TO 4;
|
||||
|
@ -491,7 +491,7 @@ CREATE TABLE trigger_table (
|
|||
|
||||
SELECT create_distributed_table('trigger_table', 'id');
|
||||
-- first set a trigger on a shard
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
CREATE FUNCTION update_value() RETURNS trigger AS $up$
|
||||
BEGIN
|
||||
|
@ -504,7 +504,7 @@ CREATE TRIGGER update_value
|
|||
BEFORE INSERT ON trigger_table_220017
|
||||
FOR EACH ROW EXECUTE PROCEDURE update_value();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
INSERT INTO trigger_table VALUES (1, 'trigger disabled');
|
||||
SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value;
|
||||
|
||||
|
@ -531,9 +531,9 @@ SELECT master_apply_delete_command('DELETE FROM lineitem_alter');
|
|||
DROP TABLE lineitem_alter;
|
||||
-- check that nothing's left over on workers, other than the leftover shard created
|
||||
-- during the unsuccessful COPY
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Test alter table with drop table in the same transaction
|
||||
BEGIN;
|
||||
|
@ -544,9 +544,9 @@ DROP TABLE test_table_1;
|
|||
END;
|
||||
|
||||
-- There should be no test_table_1 shard on workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%';
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Test WITH options on a normal simple hash-distributed table
|
||||
CREATE TABLE hash_dist(id bigint primary key, f1 text) WITH (fillfactor=40);
|
||||
|
@ -555,24 +555,24 @@ SELECT create_distributed_table('hash_dist','id');
|
|||
-- verify that the storage options made it to the table definitions
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist%' ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- verify that we can set and reset index storage parameters
|
||||
ALTER INDEX hash_dist_pkey SET(fillfactor=40);
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
ALTER INDEX hash_dist_pkey RESET(fillfactor);
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- verify error message on ALTER INDEX, SET TABLESPACE is unsupported
|
||||
ALTER INDEX hash_dist_pkey SET TABLESPACE foo;
|
||||
|
@ -583,9 +583,9 @@ CREATE UNIQUE INDEX another_index ON hash_dist(id) WITH (fillfactor=50);
|
|||
-- show the index and its storage options on coordinator, then workers
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index%' ORDER BY relname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- get rid of the index
|
||||
DROP INDEX another_index;
|
||||
|
|
|
@ -638,9 +638,9 @@ SET citus.shard_count to 4;
|
|||
CREATE TABLE numbers_hash(a int, b int);
|
||||
SELECT create_distributed_table('numbers_hash', 'a');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
ALTER TABLE numbers_hash_560180 DROP COLUMN b;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- operation will fail to modify a shard and roll back
|
||||
COPY numbers_hash FROM STDIN WITH (FORMAT 'csv');
|
||||
|
@ -714,9 +714,9 @@ SELECT * FROM drop_copy_test_table WHERE col3 = 1;
|
|||
DROP TABLE drop_copy_test_table;
|
||||
|
||||
-- There should be no "tt1" shard on the worker nodes
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'tt1%';
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- copy >8MB to a single worker to trigger a flush in PutRemoteCopyData
|
||||
BEGIN;
|
||||
|
|
|
@ -11,13 +11,13 @@ SET search_path TO citus_mx_test_schema;
|
|||
\COPY citus_mx_test_schema_join_2.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|';
|
||||
|
||||
-- now try loading data from worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO public;
|
||||
|
||||
\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
-- and use second worker as well
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
|
@ -26,7 +26,7 @@ SET search_path TO public;
|
|||
-- worker nodes, yet in order to remove broadcast logic related codes we change
|
||||
-- the table to reference table and copy data from master. Should be updated
|
||||
-- when worker nodes gain capability to run dml commands on reference tables.
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO public;
|
||||
|
||||
\COPY customer_mx FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
|
|
|
@ -81,6 +81,7 @@ my $connectionTimeout = 5000;
|
|||
my $useMitmproxy = 0;
|
||||
my $mitmFifoPath = catfile($TMP_CHECKDIR, "mitmproxy.fifo");
|
||||
my $constr = "";
|
||||
my $hoststr = "";
|
||||
|
||||
my $serversAreShutdown = "TRUE";
|
||||
my $usingWindows = 0;
|
||||
|
@ -110,6 +111,7 @@ GetOptions(
|
|||
'connection-timeout=s' => \$connectionTimeout,
|
||||
'mitmproxy' => \$useMitmproxy,
|
||||
'constr=s' => \$constr,
|
||||
'hoststr=s' => \$hoststr,
|
||||
'help' => sub { Usage() });
|
||||
|
||||
# Update environment to include [DY]LD_LIBRARY_PATH/LIBDIR/etc -
|
||||
|
@ -266,6 +268,8 @@ revert_replace_postgres();
|
|||
my $host = "localhost";
|
||||
my $user = "postgres";
|
||||
my $dbname = "postgres";
|
||||
my $realWorker1Host = "localhost";
|
||||
my $realWorker2Host = "localhost";
|
||||
|
||||
# n.b. previously this was on port 57640, which caused issues because that's in the
|
||||
# ephemeral port range, it was sometimes in the TIME_WAIT state which prevented us from
|
||||
|
@ -351,6 +355,13 @@ else
|
|||
}
|
||||
}
|
||||
|
||||
if ($hoststr)
|
||||
{
|
||||
my %hostvals = split /=|\s/, $hoststr;
|
||||
$realWorker1Host = $hostvals{worker1host};
|
||||
$realWorker2Host = $hostvals{worker2host};
|
||||
}
|
||||
|
||||
my $followerCoordPort = 9070;
|
||||
my @followerWorkerPorts = ();
|
||||
for (my $workerIndex = 1; $workerIndex <= $workerCount; $workerIndex++) {
|
||||
|
@ -542,6 +553,9 @@ for my $workeroff (0 .. $#workerHosts)
|
|||
my $host = $workerHosts[$workeroff];
|
||||
print $fh "--variable=worker_".($workeroff+1)."_host=\"$host\" ";
|
||||
}
|
||||
print $fh "--variable=real_master_host=\"$host\" ";
|
||||
print $fh "--variable=real_worker_1_host=\"$realWorker1Host\" ";
|
||||
print $fh "--variable=real_worker_2_host=\"$realWorker2Host\" ";
|
||||
for my $workeroff (0 .. $#followerWorkerPorts)
|
||||
{
|
||||
my $port = $followerWorkerPorts[$workeroff];
|
||||
|
|
|
@ -14,7 +14,7 @@ CREATE TABLE stock (
|
|||
|
||||
SELECT create_distributed_table('stock','s_w_id');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path = ch_bench_having;
|
||||
explain (costs false, summary false, timing false)
|
||||
select s_i_id, sum(s_order_cnt) as ordercount
|
||||
|
@ -122,7 +122,7 @@ having (select max(s_order_cnt) > 2 as having_query from stock where s_i_id =
|
|||
order by s_i_id;
|
||||
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.replication_model TO streaming;
|
||||
SET citus.shard_replication_factor to 1;
|
||||
SET citus.shard_count to 4;
|
||||
|
@ -158,7 +158,7 @@ insert into stock VALUES
|
|||
|
||||
SELECT create_distributed_table('stock','s_w_id');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path = ch_bench_having, public;
|
||||
|
||||
select s_i_id, sum(s_order_cnt) as ordercount
|
||||
|
@ -192,7 +192,7 @@ having sum(s_order_cnt) >
|
|||
and n_name = 'GERMANY')
|
||||
order by ordercount desc;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
BEGIN;
|
||||
SET LOCAL client_min_messages TO WARNING;
|
||||
DROP SCHEMA ch_bench_having CASCADE;
|
||||
|
|
|
@ -16,14 +16,14 @@ CREATE COLLATION german_phonebook_unpropagated (provider = icu, locale = 'de-u-c
|
|||
|
||||
SET citus.enable_ddl_propagation TO on;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT c.collname, nsp.nspname, a.rolname
|
||||
FROM pg_collation c
|
||||
JOIN pg_namespace nsp ON nsp.oid = c.collnamespace
|
||||
JOIN pg_authid a ON a.oid = c.collowner
|
||||
WHERE collname like 'german_phonebook%'
|
||||
ORDER BY 1,2,3;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path to collation_tests;
|
||||
|
||||
CREATE TABLE test_propagate(id int, t1 text COLLATE german_phonebook,
|
||||
|
@ -55,27 +55,27 @@ INSERT INTO test_range VALUES (U&'\00E4sop', 1), (U&'Vo\1E9Er', 2);
|
|||
SET client_min_messages TO debug;
|
||||
SELECT * FROM test_range WHERE key > 'Ab' AND key < U&'\00E4z';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT c.collname, nsp.nspname, a.rolname
|
||||
FROM pg_collation c
|
||||
JOIN pg_namespace nsp ON nsp.oid = c.collnamespace
|
||||
JOIN pg_authid a ON a.oid = c.collowner
|
||||
WHERE collname like 'german_phonebook%'
|
||||
ORDER BY 1,2,3;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
ALTER COLLATION collation_tests.german_phonebook RENAME TO german_phonebook2;
|
||||
ALTER COLLATION collation_tests.german_phonebook2 SET SCHEMA collation_tests2;
|
||||
ALTER COLLATION collation_tests2.german_phonebook2 OWNER TO collationuser;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT c.collname, nsp.nspname, a.rolname
|
||||
FROM pg_collation c
|
||||
JOIN pg_namespace nsp ON nsp.oid = c.collnamespace
|
||||
JOIN pg_authid a ON a.oid = c.collowner
|
||||
WHERE collname like 'german_phonebook%'
|
||||
ORDER BY 1,2,3;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA collation_tests CASCADE;
|
||||
|
@ -83,17 +83,17 @@ DROP SCHEMA collation_tests2 CASCADE;
|
|||
|
||||
-- This is hacky, but we should clean-up the resources as below
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA collation_tests CASCADE;
|
||||
DROP SCHEMA collation_tests2 CASCADE;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA collation_tests CASCADE;
|
||||
DROP SCHEMA collation_tests2 CASCADE;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP USER collationuser;
|
||||
SELECT run_command_on_workers($$DROP USER collationuser;$$);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
CREATE SCHEMA collation_conflict;
|
||||
SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO collation_conflict;
|
||||
|
||||
CREATE COLLATION caseinsensitive (
|
||||
|
@ -9,7 +9,7 @@ CREATE COLLATION caseinsensitive (
|
|||
locale = 'und-u-ks-level2'
|
||||
);
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO collation_conflict;
|
||||
|
||||
CREATE COLLATION caseinsensitive (
|
||||
|
@ -19,14 +19,14 @@ CREATE COLLATION caseinsensitive (
|
|||
CREATE TABLE tblcoll(val text COLLATE caseinsensitive);
|
||||
SELECT create_reference_table('tblcoll');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT c.collname, nsp.nspname, a.rolname
|
||||
FROM pg_collation c
|
||||
JOIN pg_namespace nsp ON nsp.oid = c.collnamespace
|
||||
JOIN pg_authid a ON a.oid = c.collowner
|
||||
WHERE collname like 'caseinsensitive%'
|
||||
ORDER BY 1,2,3;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO collation_conflict;
|
||||
|
||||
-- Now drop & recreate in order to make sure rename detects the existing renamed objects
|
||||
|
@ -35,7 +35,7 @@ SET search_path TO collation_conflict;
|
|||
DROP TABLE tblcoll;
|
||||
DROP COLLATION caseinsensitive;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO collation_conflict;
|
||||
|
||||
CREATE COLLATION caseinsensitive (
|
||||
|
@ -43,7 +43,7 @@ CREATE COLLATION caseinsensitive (
|
|||
locale = 'und-u-ks-level1'
|
||||
);
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO collation_conflict;
|
||||
|
||||
CREATE COLLATION caseinsensitive (
|
||||
|
@ -53,14 +53,14 @@ CREATE COLLATION caseinsensitive (
|
|||
CREATE TABLE tblcoll(val text COLLATE caseinsensitive);
|
||||
SELECT create_reference_table('tblcoll');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT c.collname, nsp.nspname, a.rolname
|
||||
FROM pg_collation c
|
||||
JOIN pg_namespace nsp ON nsp.oid = c.collnamespace
|
||||
JOIN pg_authid a ON a.oid = c.collowner
|
||||
WHERE collname like 'caseinsensitive%'
|
||||
ORDER BY 1,2,3;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO collation_conflict;
|
||||
|
||||
-- now test worker_create_or_replace_object directly
|
||||
|
|
|
@ -474,20 +474,20 @@ DROP SCHEMA function_tests2 CASCADE;
|
|||
SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary';
|
||||
-- This is hacky, but we should clean-up the resources as below
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
UPDATE pg_dist_local_group SET groupid = 0;
|
||||
TRUNCATE pg_dist_node;
|
||||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA function_tests CASCADE;
|
||||
DROP SCHEMA function_tests2 CASCADE;
|
||||
SET search_path TO function_tests, function_tests2;
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
UPDATE pg_dist_local_group SET groupid = 0;
|
||||
TRUNCATE pg_dist_node;
|
||||
SET client_min_messages TO error; -- suppress cascading objects dropping
|
||||
DROP SCHEMA function_tests CASCADE;
|
||||
DROP SCHEMA function_tests2 CASCADE;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP USER functionuser;
|
||||
SELECT run_command_on_workers($$DROP USER functionuser$$);
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
CREATE SCHEMA proc_conflict;
|
||||
SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO proc_conflict;
|
||||
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$
|
||||
BEGIN
|
||||
|
@ -16,7 +16,7 @@ CREATE AGGREGATE existing_agg(int) (
|
|||
STYPE = int
|
||||
);
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO proc_conflict;
|
||||
|
||||
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$
|
||||
|
@ -31,7 +31,7 @@ CREATE AGGREGATE existing_agg(int) (
|
|||
|
||||
SELECT create_distributed_function('existing_agg(int)');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO proc_conflict;
|
||||
|
||||
WITH data (val) AS (
|
||||
|
@ -41,7 +41,7 @@ WITH data (val) AS (
|
|||
)
|
||||
SELECT existing_agg(val) FROM data;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO proc_conflict;
|
||||
|
||||
WITH data (val) AS (
|
||||
|
@ -57,7 +57,7 @@ SET client_min_messages TO error;
|
|||
DROP AGGREGATE existing_agg(int) CASCADE;
|
||||
DROP FUNCTION existing_func(int, int) CASCADE;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO proc_conflict;
|
||||
|
||||
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$
|
||||
|
@ -70,7 +70,7 @@ CREATE AGGREGATE existing_agg(int) (
|
|||
STYPE = int
|
||||
);
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO proc_conflict;
|
||||
|
||||
CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$
|
||||
|
@ -85,7 +85,7 @@ CREATE AGGREGATE existing_agg(int) (
|
|||
|
||||
SELECT create_distributed_function('existing_agg(int)');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO proc_conflict;
|
||||
|
||||
WITH data (val) AS (
|
||||
|
@ -95,7 +95,7 @@ WITH data (val) AS (
|
|||
)
|
||||
SELECT existing_agg(val) FROM data;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO proc_conflict;
|
||||
|
||||
WITH data (val) AS (
|
||||
|
|
|
@ -218,15 +218,15 @@ END;
|
|||
-- of result files in both nodes were same when calling read_intermediate_results()
|
||||
-- in the above UPDATE calls).
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT * FROM distributed_intermediate_results.colocated_with_target_4213604 ORDER BY key;
|
||||
SELECT * FROM distributed_intermediate_results.colocated_with_target_4213605 ORDER BY key;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SELECT * FROM distributed_intermediate_results.colocated_with_target_4213604 ORDER BY key;
|
||||
SELECT * FROM distributed_intermediate_results.colocated_with_target_4213605 ORDER BY key;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET search_path TO 'distributed_intermediate_results';
|
||||
DROP TABLE source_table, target_table, colocated_with_target, distributed_result_info;
|
||||
|
|
|
@ -5,13 +5,13 @@ SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$);
|
|||
|
||||
-- create a type on a worker that should not cause data loss once overwritten with a type
|
||||
-- from the coordinator
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET citus.enable_ddl_propagation TO off;
|
||||
SET search_path TO type_conflict;
|
||||
CREATE TYPE my_precious_type AS (secret text, should bool);
|
||||
CREATE TABLE local_table (a int, b my_precious_type);
|
||||
INSERT INTO local_table VALUES (42, ('always bring a towel', true)::my_precious_type);
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO type_conflict;
|
||||
|
||||
-- overwrite the type on the worker from the coordinator. The type should be over written
|
||||
|
@ -19,7 +19,7 @@ SET search_path TO type_conflict;
|
|||
CREATE TYPE my_precious_type AS (scatterd_secret text);
|
||||
|
||||
-- verify the data is retained
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO type_conflict;
|
||||
-- show fields for table
|
||||
SELECT pg_class.relname,
|
||||
|
@ -34,7 +34,7 @@ ORDER BY attnum;
|
|||
|
||||
SELECT * FROM local_table;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO type_conflict;
|
||||
|
||||
-- make sure worker_create_or_replace correctly generates new names while types are existing
|
||||
|
|
|
@ -28,49 +28,49 @@ SELECT create_distributed_table('dist_schema.dist_table', 'id');
|
|||
SELECT create_distributed_table('another_dist_schema.dist_table', 'id');
|
||||
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema';
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema';
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- grant all permissions
|
||||
GRANT ALL ON SCHEMA dist_schema, another_dist_schema, non_dist_schema TO role_1, role_2, role_3 WITH GRANT OPTION;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- revoke all permissions
|
||||
REVOKE ALL ON SCHEMA dist_schema, another_dist_schema, non_dist_schema FROM role_1, role_2, role_3, PUBLIC CASCADE;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- grant with multiple permissions, roles and schemas
|
||||
GRANT USAGE, CREATE ON SCHEMA dist_schema, another_dist_schema, non_dist_schema TO role_1, role_2, role_3;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- revoke with multiple permissions, roles and schemas
|
||||
REVOKE USAGE, CREATE ON SCHEMA dist_schema, another_dist_schema, non_dist_schema FROM role_1, role_2;
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- grant with grant option
|
||||
GRANT USAGE ON SCHEMA dist_schema TO role_1, role_3 WITH GRANT OPTION;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- revoke grant option for
|
||||
REVOKE GRANT OPTION FOR USAGE ON SCHEMA dist_schema FROM role_3;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- test current_user
|
||||
SET citus.enable_alter_role_propagation TO ON;
|
||||
|
@ -78,9 +78,9 @@ ALTER ROLE role_1 SUPERUSER;
|
|||
SET citus.enable_alter_role_propagation TO OFF;
|
||||
SET ROLE role_1;
|
||||
GRANT CREATE ON SCHEMA dist_schema TO CURRENT_USER;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname IN ('dist_schema', 'another_dist_schema', 'non_dist_schema') ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
RESET ROLE;
|
||||
SET citus.enable_alter_role_propagation TO ON;
|
||||
ALTER ROLE role_1 NOSUPERUSER;
|
||||
|
@ -117,27 +117,27 @@ SELECT create_distributed_table('grantor_schema.grantor_table', 'id');
|
|||
|
||||
-- check if the grantors are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- add the previously removed node
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- check if the grantors are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- revoke one of the permissions
|
||||
REVOKE USAGE ON SCHEMA grantor_schema FROM role_1 CASCADE;
|
||||
|
||||
-- check if revoke worked correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- test if grantor propagates correctly on already distributed schemas
|
||||
GRANT USAGE ON SCHEMA grantor_schema TO role_1 WITH GRANT OPTION;
|
||||
|
@ -150,9 +150,9 @@ RESET ROLE;
|
|||
|
||||
-- check the results
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP TABLE grantor_schema.grantor_table;
|
||||
SELECT run_command_on_coordinator_and_workers('DROP SCHEMA grantor_schema CASCADE');
|
||||
|
@ -168,9 +168,9 @@ CREATE TABLE dist_schema.dist_table (id int);
|
|||
SELECT create_distributed_table('dist_schema.dist_table', 'id');
|
||||
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY nspname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP TABLE dist_schema.dist_table;
|
||||
SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema CASCADE');
|
||||
|
@ -192,27 +192,27 @@ RESET ROLE;
|
|||
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- add the previously removed node
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- revoke those new permissions
|
||||
REVOKE CREATE, USAGE ON SCHEMA PUBLIC FROM role_1 CASCADE;
|
||||
|
||||
-- check if the grants are propagated correctly
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'public' ORDER BY nspname;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP TABLE public_schema_table;
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ CREATE TABLE collections_list_0
|
|||
FOR VALUES IN ( 0 );
|
||||
|
||||
-- connection worker and get ready for the tests
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO local_shard_execution;
|
||||
|
||||
-- returns true of the distribution key filter
|
||||
|
@ -772,7 +772,7 @@ WHERE distributed_table.key = 1
|
|||
RESET client_min_messages;
|
||||
RESET citus.log_local_commands;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.next_shard_id TO 1480000;
|
||||
-- local execution with custom type
|
||||
SET citus.replication_model TO "streaming";
|
||||
|
@ -818,7 +818,7 @@ CALL register_for_event(16, 1, 'yes');
|
|||
CALL register_for_event(16, 1, 'yes');
|
||||
CALL register_for_event(16, 1, 'yes');
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
CALL register_for_event(16, 1, 'yes');
|
||||
CALL register_for_event(16, 1, 'yes');
|
||||
CALL register_for_event(16, 1, 'yes');
|
||||
|
@ -875,7 +875,7 @@ INSERT INTO event_responses VALUES (16, 666, 'maybe'), (17, 777, 'no')
|
|||
ON CONFLICT (event_id, user_id)
|
||||
DO UPDATE SET response = EXCLUDED.response RETURNING *;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
SET search_path TO public;
|
||||
|
|
|
@ -233,7 +233,7 @@ SELECT * FROM
|
|||
(SELECT key FROM table_2 GROUP BY key HAVING max(value) > (SELECT * FROM cte_2) LIMIT 1) as bar
|
||||
WHERE foo.key != bar.key;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- now use the same queries on a worker
|
||||
SET search_path TO locally_execute_intermediate_results;
|
||||
|
@ -701,7 +701,7 @@ SELECT * FROM
|
|||
(SELECT key FROM table_2 GROUP BY key HAVING max(value) > (SELECT * FROM cte_2) LIMIT 1) as bar
|
||||
WHERE foo.key != bar.key;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP SCHEMA locally_execute_intermediate_results CASCADE;
|
||||
|
|
|
@ -406,11 +406,11 @@ ROLLBACK;
|
|||
|
||||
-- There should be no constraint on master and worker(s)
|
||||
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Tests to check the effect of rollback
|
||||
BEGIN;
|
||||
|
@ -423,11 +423,11 @@ ROLLBACK;
|
|||
-- There should be no constraint on master and worker(s)
|
||||
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP TABLE products;
|
||||
|
||||
|
|
|
@ -158,20 +158,20 @@ DROP TABLE cluster_management_test;
|
|||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- check that added nodes are not propagated to nodes without metadata
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- check that removing two nodes in the same transaction works
|
||||
SELECT
|
||||
|
@ -203,9 +203,9 @@ COMMIT;
|
|||
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
|
||||
SELECT 1 FROM master_add_node(:'worker_1_host', :worker_1_port);
|
||||
|
@ -236,12 +236,12 @@ WHERE
|
|||
|
||||
DROP TABLE temp;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
DELETE FROM pg_dist_partition;
|
||||
DELETE FROM pg_dist_shard;
|
||||
DELETE FROM pg_dist_placement;
|
||||
DELETE FROM pg_dist_node;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
|
|
|
@ -286,11 +286,11 @@ SELECT create_distributed_table('table_failing', 'id', colocate_with => NULL);
|
|||
CREATE TABLE table_bigint ( id bigint );
|
||||
SELECT create_distributed_table('table_bigint', 'id', colocate_with => 'table1_groupE');
|
||||
-- check worker table schemas
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table3_groupE_1300062'::regclass;
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='schema_colocation.table4_groupE_1300064'::regclass;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.next_shard_id TO 1300080;
|
||||
|
||||
CREATE TABLE table1_groupF ( id int );
|
||||
|
|
|
@ -290,9 +290,9 @@ DROP TABLE data_load_test1, data_load_test2;
|
|||
END;
|
||||
|
||||
-- There should be no table on the worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'data_load_test%';
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- creating an index after loading data works
|
||||
BEGIN;
|
||||
|
@ -348,9 +348,9 @@ CREATE UNLOGGED TABLE unlogged_table
|
|||
SELECT create_distributed_table('unlogged_table', 'key');
|
||||
SELECT * FROM master_get_table_ddl_events('unlogged_table');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%';
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Test rollback of create table
|
||||
BEGIN;
|
||||
|
@ -359,9 +359,9 @@ SELECT create_distributed_table('rollback_table','id');
|
|||
ROLLBACK;
|
||||
|
||||
-- Table should not exist on the worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Insert 3 rows to make sure that copy after shard creation touches the same
|
||||
-- worker node twice.
|
||||
|
@ -374,9 +374,9 @@ SELECT create_distributed_table('rollback_table','id');
|
|||
ROLLBACK;
|
||||
|
||||
-- Table should not exist on the worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
BEGIN;
|
||||
CREATE TABLE rollback_table(id int, name varchar(20));
|
||||
|
@ -404,9 +404,9 @@ SELECT create_distributed_table('rollback_table','id');
|
|||
ROLLBACK;
|
||||
|
||||
-- Table should not exist on the worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%');
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
BEGIN;
|
||||
CREATE TABLE tt1(id int);
|
||||
|
@ -418,10 +418,10 @@ INSERT INTO tt2 SELECT * FROM tt1 WHERE id = 1;
|
|||
COMMIT;
|
||||
|
||||
-- Table should exist on the worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360069'::regclass;
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt2_360073'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP TABLE tt1;
|
||||
DROP TABLE tt2;
|
||||
|
@ -435,14 +435,14 @@ SELECT master_create_empty_shard('append_tt1');
|
|||
ROLLBACK;
|
||||
|
||||
-- Table exists on the worker node.
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.append_tt1_360077'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- There should be no table on the worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'public.tt1%');
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Queries executing with router executor is allowed in the same transaction
|
||||
-- with create_distributed_table
|
||||
|
@ -455,9 +455,9 @@ SELECT * FROM tt1 WHERE id = 1;
|
|||
COMMIT;
|
||||
|
||||
-- Placements should be created on the worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360078'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP TABLE tt1;
|
||||
|
||||
|
@ -468,9 +468,9 @@ DROP TABLE tt1;
|
|||
COMMIT;
|
||||
|
||||
-- There should be no table on the worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'tt1%');
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Tests with create_distributed_table & DDL & DML commands
|
||||
|
||||
|
|
|
@ -224,11 +224,11 @@ CREATE TABLE check_example
|
|||
other_other_col integer CHECK (abs(other_other_col) >= 100)
|
||||
);
|
||||
SELECT create_distributed_table('check_example', 'partition_col', 'hash');
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'check_example_partition_col_key_365056'::regclass;
|
||||
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365056'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Index-based constraints are created with shard-extended names, but others
|
||||
-- (e.g. expression-based table CHECK constraints) do _not_ have shardids in
|
||||
|
|
|
@ -58,7 +58,7 @@ BEGIN;
|
|||
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id();
|
||||
|
||||
|
|
|
@ -195,11 +195,11 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 2);
|
|||
SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 3);
|
||||
|
||||
-- verify result of the get_shard_id_for_distribution_column
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT * FROM get_shardid_test_table1_540006;
|
||||
SELECT * FROM get_shardid_test_table1_540009;
|
||||
SELECT * FROM get_shardid_test_table1_540007;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- test non-existing value
|
||||
SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 4);
|
||||
|
@ -216,10 +216,10 @@ SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{a, b, c
|
|||
SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{d, e, f}');
|
||||
|
||||
-- verify result of the get_shard_id_for_distribution_column
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT * FROM get_shardid_test_table2_540013;
|
||||
SELECT * FROM get_shardid_test_table2_540011;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- test mismatching data type
|
||||
SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', 'a');
|
||||
|
|
|
@ -192,7 +192,7 @@ DROP EXTENSION citus;
|
|||
CREATE EXTENSION citus;
|
||||
|
||||
-- test cache invalidation in workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
DROP EXTENSION citus;
|
||||
SET citus.enable_version_checks TO 'false';
|
||||
|
@ -204,7 +204,7 @@ ALTER EXTENSION citus UPDATE;
|
|||
-- if cache is invalidated succesfull, this \d should work without any problem
|
||||
\d
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- check that maintenance daemon gets (re-)started for the right user
|
||||
DROP EXTENSION citus;
|
||||
|
@ -300,20 +300,20 @@ DROP SCHEMA test_deamon CASCADE;
|
|||
|
||||
-- create a test database, configure citus with single node
|
||||
CREATE DATABASE another;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE DATABASE another;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
\c another
|
||||
CREATE EXTENSION citus;
|
||||
SELECT FROM master_add_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE EXTENSION citus;
|
||||
ALTER FUNCTION assign_distributed_transaction_id(initiator_node_identifier integer, transaction_number bigint, transaction_stamp timestamp with time zone)
|
||||
RENAME TO dummy_assign_function;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.shard_replication_factor to 1;
|
||||
-- create_distributed_table command should fail
|
||||
CREATE TABLE t1(a int, b int);
|
||||
|
@ -329,9 +329,9 @@ END;
|
|||
$$;
|
||||
|
||||
\c regression
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP DATABASE another;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
DROP DATABASE another;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
CREATE TABLE the_table (a int, b int, z bigserial);
|
||||
SELECT create_distributed_table('the_table', 'a');
|
||||
|
@ -76,5 +76,5 @@ SET citus.writable_standby_coordinator TO on;
|
|||
INSERT INTO the_table (a, b, z) VALUES (1, 2, 3);
|
||||
SELECT * FROM the_table ORDER BY a;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE the_table;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- do some setup
|
||||
|
||||
|
@ -46,7 +46,7 @@ SELECT * FROM the_table;
|
|||
|
||||
-- add the secondary nodes and try again, the SELECT statement should work this time
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port,
|
||||
groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_1_port),
|
||||
|
@ -91,12 +91,12 @@ order by s_i_id;
|
|||
-- now move the secondary nodes into the new cluster and see that the follower, finally
|
||||
-- correctly configured, can run select queries involving them
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
UPDATE pg_dist_node SET nodecluster = 'second-cluster' WHERE noderole = 'secondary';
|
||||
\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'"
|
||||
SELECT * FROM the_table;
|
||||
|
||||
-- clean up after ourselves
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE the_table;
|
||||
DROP TABLE stock;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- do some setup
|
||||
|
||||
|
@ -17,6 +17,6 @@ SELECT * FROM tab;
|
|||
|
||||
-- clean up
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP TABLE tab;
|
||||
|
|
|
@ -105,12 +105,12 @@ SELECT create_distributed_table('foreign_table', 'id');
|
|||
ALTER FOREIGN TABLE foreign_table rename to renamed_foreign_table;
|
||||
ALTER FOREIGN TABLE renamed_foreign_table rename full_name to rename_name;
|
||||
ALTER FOREIGN TABLE renamed_foreign_table alter rename_name type char(8);
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
select table_name, column_name, data_type
|
||||
from information_schema.columns
|
||||
where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id'
|
||||
order by table_name;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT master_get_table_ddl_events('renamed_foreign_table');
|
||||
|
||||
|
@ -122,12 +122,12 @@ SELECT master_get_table_ddl_events('local_view');
|
|||
-- clean up
|
||||
DROP VIEW IF EXISTS local_view;
|
||||
DROP FOREIGN TABLE IF EXISTS renamed_foreign_table;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
select table_name, column_name, data_type
|
||||
from information_schema.columns
|
||||
where table_schema='public' and table_name like 'renamed_foreign_table_%' and column_name <> 'id'
|
||||
order by table_name;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE IF EXISTS simple_table, not_null_table, column_constraint_table,
|
||||
table_constraint_table, default_value_table, pkey_table,
|
||||
unique_table, clustered_table, fiddly_table;
|
||||
|
|
|
@ -79,12 +79,12 @@ DROP TABLE local_table;
|
|||
|
||||
-- Verify that all indexes got created on the master node and one of the workers
|
||||
SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1);
|
||||
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%';
|
||||
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%';
|
||||
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%';
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Verify that we error out on unsupported statement types
|
||||
|
||||
|
@ -156,14 +156,14 @@ DROP INDEX CONCURRENTLY lineitem_concurrently_index;
|
|||
-- As there's a primary key, so exclude those from this check.
|
||||
SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%';
|
||||
SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%';
|
||||
SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
|
||||
|
||||
-- create index that will conflict with master operations
|
||||
CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON index_test_hash_102089(b);
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- should fail because worker index already exists
|
||||
CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b);
|
||||
|
@ -176,12 +176,12 @@ DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx;
|
|||
CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b);
|
||||
SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- now drop shard index to test partial master DROP failure
|
||||
DROP INDEX CONCURRENTLY ith_b_idx_102089;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP INDEX CONCURRENTLY ith_b_idx;
|
||||
|
||||
-- the failure results in an INVALID index
|
||||
|
|
|
@ -83,7 +83,7 @@ SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
|||
SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port;
|
||||
|
||||
-- Check that the metadata has been copied to the worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT * FROM pg_dist_local_group;
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
||||
|
@ -102,7 +102,7 @@ SELECT * FROM pg_dist_colocation ORDER BY colocationid;
|
|||
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
|
||||
|
||||
-- Make sure that start_metadata_sync_to_node considers foreign key constraints
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Since we're superuser, we can set the replication model to 'streaming' to
|
||||
-- create some MX tables
|
||||
|
@ -121,10 +121,10 @@ SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1');
|
|||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- Check that foreign key metadata exists on the worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE mx_testing_schema_2.fk_test_2;
|
||||
DROP TABLE mx_testing_schema.fk_test_1;
|
||||
|
||||
|
@ -132,10 +132,10 @@ RESET citus.shard_replication_factor;
|
|||
RESET citus.replication_model;
|
||||
|
||||
-- Check that repeated calls to start_metadata_sync_to_node has no side effects
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT * FROM pg_dist_local_group;
|
||||
SELECT * FROM pg_dist_node ORDER BY nodeid;
|
||||
SELECT * FROM pg_dist_partition ORDER BY logicalrelid;
|
||||
|
@ -149,7 +149,7 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
|||
SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass;
|
||||
|
||||
-- Make sure that start_metadata_sync_to_node cannot be called inside a transaction
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
BEGIN;
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
ROLLBACK;
|
||||
|
@ -157,7 +157,7 @@ ROLLBACK;
|
|||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||
|
||||
-- Check that the distributed table can be queried from the worker
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
@ -173,19 +173,19 @@ INSERT INTO mx_query_test VALUES (3, 'three', 9);
|
|||
INSERT INTO mx_query_test VALUES (4, 'four', 16);
|
||||
INSERT INTO mx_query_test VALUES (5, 'five', 24);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT * FROM mx_query_test ORDER BY a;
|
||||
INSERT INTO mx_query_test VALUES (6, 'six', 36);
|
||||
UPDATE mx_query_test SET c = 25 WHERE a = 5;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT * FROM mx_query_test ORDER BY a;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE mx_query_test;
|
||||
|
||||
-- Check that stop_metadata_sync_to_node function sets hasmetadata of the node to false
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port;
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
@ -246,7 +246,7 @@ ORDER BY
|
|||
logicalrelid, shardid;
|
||||
|
||||
-- Check that metadata of MX tables exist on the metadata worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Check that tables are created
|
||||
\dt mx_test_schema_?.mx_table_?
|
||||
|
@ -272,7 +272,7 @@ ORDER BY
|
|||
logicalrelid, shardid;
|
||||
|
||||
-- Check that metadata of MX tables don't exist on the non-metadata worker
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
\d mx_test_schema_1.mx_table_1
|
||||
\d mx_test_schema_2.mx_table_2
|
||||
|
@ -282,27 +282,27 @@ SELECT * FROM pg_dist_shard;
|
|||
SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport;
|
||||
|
||||
-- Check that CREATE INDEX statement is propagated
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
SET client_min_messages TO 'ERROR';
|
||||
CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1);
|
||||
ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1);
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'mx_test_schema_2.mx_index_3'::regclass;
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'mx_test_schema_2.mx_table_2_col1_key'::regclass;
|
||||
|
||||
-- Check that DROP INDEX statement is propagated
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
DROP INDEX mx_test_schema_2.mx_index_3;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'mx_test_schema_2.mx_index_3'::regclass;
|
||||
|
||||
-- Check that ALTER TABLE statements are propagated
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
ALTER TABLE mx_test_schema_1.mx_table_1 ADD COLUMN col3 NUMERIC;
|
||||
ALTER TABLE mx_test_schema_1.mx_table_1 ALTER COLUMN col3 SET DATA TYPE INT;
|
||||
|
@ -314,12 +314,12 @@ FOREIGN KEY
|
|||
(col1)
|
||||
REFERENCES
|
||||
mx_test_schema_2.mx_table_2(col1);
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||
|
||||
-- Check that foreign key constraint with NOT VALID works as well
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.multi_shard_commit_protocol TO '2pc';
|
||||
ALTER TABLE mx_test_schema_1.mx_table_1 DROP CONSTRAINT mx_fk_constraint;
|
||||
ALTER TABLE
|
||||
|
@ -331,11 +331,11 @@ FOREIGN KEY
|
|||
REFERENCES
|
||||
mx_test_schema_2.mx_table_2(col1)
|
||||
NOT VALID;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass;
|
||||
|
||||
-- Check that mark_tables_colocated call propagates the changes to the workers
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gset
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000;
|
||||
SET citus.shard_count TO 7;
|
||||
|
@ -384,7 +384,7 @@ FROM
|
|||
WHERE
|
||||
logicalrelid = 'mx_colocation_test_1'::regclass
|
||||
OR logicalrelid = 'mx_colocation_test_2'::regclass;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT
|
||||
logicalrelid, colocationid
|
||||
FROM
|
||||
|
@ -393,7 +393,7 @@ WHERE
|
|||
logicalrelid = 'mx_colocation_test_1'::regclass
|
||||
OR logicalrelid = 'mx_colocation_test_2'::regclass;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Check that DROP TABLE on MX tables works
|
||||
DROP TABLE mx_colocation_test_1;
|
||||
|
@ -401,12 +401,12 @@ DROP TABLE mx_colocation_test_2;
|
|||
\d mx_colocation_test_1
|
||||
\d mx_colocation_test_2
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\d mx_colocation_test_1
|
||||
\d mx_colocation_test_2
|
||||
|
||||
-- Check that dropped MX table can be recreated again
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.shard_count TO 7;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
|
@ -424,7 +424,7 @@ SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_te
|
|||
DROP TABLE mx_temp_drop_test;
|
||||
|
||||
-- Check that MX tables can be created with SERIAL columns
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.shard_count TO 3;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
|
@ -443,10 +443,10 @@ CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL);
|
|||
SELECT create_distributed_table('mx_table_with_small_sequence', 'a');
|
||||
INSERT INTO mx_table_with_small_sequence VALUES (0);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
INSERT INTO mx_table_with_small_sequence VALUES (1), (3);
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO 'streaming';
|
||||
|
||||
|
@ -458,7 +458,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_
|
|||
\ds mx_table_with_sequence_c_seq
|
||||
|
||||
-- Check that the sequences created on the metadata worker as well
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
|
||||
\ds mx_table_with_sequence_b_seq
|
||||
\ds mx_table_with_sequence_c_seq
|
||||
|
@ -468,10 +468,10 @@ SELECT nextval('mx_table_with_sequence_b_seq');
|
|||
SELECT nextval('mx_table_with_sequence_c_seq');
|
||||
|
||||
-- Check that adding a new metadata node sets the sequence space correctly
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SELECT groupid FROM pg_dist_local_group;
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass;
|
||||
\ds mx_table_with_sequence_b_seq
|
||||
|
@ -482,7 +482,7 @@ SELECT nextval('mx_table_with_sequence_c_seq');
|
|||
INSERT INTO mx_table_with_small_sequence VALUES (2), (4);
|
||||
|
||||
-- Check that dropping the mx table with sequences works as expected
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- check our small sequence values
|
||||
SELECT a, b, c FROM mx_table_with_small_sequence ORDER BY a,b,c;
|
||||
|
@ -494,18 +494,18 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence;
|
|||
\ds mx_table_with_sequence_c_seq
|
||||
|
||||
-- Check that the sequences are dropped from the workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\d mx_table_with_sequence
|
||||
\ds mx_table_with_sequence_b_seq
|
||||
\ds mx_table_with_sequence_c_seq
|
||||
|
||||
-- Check that the sequences are dropped from the workers
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
\ds mx_table_with_sequence_b_seq
|
||||
\ds mx_table_with_sequence_c_seq
|
||||
|
||||
-- Check that MX sequences play well with non-super users
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Remove a node so that shards and sequences won't be created on table creation. Therefore,
|
||||
-- we can test that start_metadata_sync_to_node can actually create the sequence with proper
|
||||
|
@ -519,9 +519,9 @@ SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
|||
|
||||
-- the master user needs superuser permissions to change the replication model
|
||||
CREATE USER mx_user WITH SUPERUSER;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE USER mx_user;
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
CREATE USER mx_user;
|
||||
|
||||
\c - mx_user - :master_port
|
||||
|
@ -559,26 +559,26 @@ DROP TABLE pg_dist_partition_temp;
|
|||
UPDATE pg_dist_placement
|
||||
SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
|
||||
WHERE groupid = :old_worker_2_group;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
UPDATE pg_dist_placement
|
||||
SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
|
||||
WHERE groupid = :old_worker_2_group;
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
UPDATE pg_dist_placement
|
||||
SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
|
||||
WHERE groupid = :old_worker_2_group;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
DROP USER mx_user;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
DROP USER mx_user;
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
DROP USER mx_user;
|
||||
|
||||
-- Check that create_reference_table creates the metadata on workers
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
CREATE TABLE mx_ref (col_1 int, col_2 text);
|
||||
SELECT create_reference_table('mx_ref');
|
||||
|
||||
|
@ -588,7 +588,7 @@ SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0;
|
|||
|
||||
\dt mx_ref
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt mx_ref
|
||||
SELECT
|
||||
logicalrelid, partmethod, repmodel, shardid, placementid, nodename, nodeport
|
||||
|
@ -604,7 +604,7 @@ ORDER BY
|
|||
SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset
|
||||
|
||||
-- Check that DDL commands are propagated to reference tables on workers
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0;
|
||||
CREATE INDEX mx_ref_index ON mx_ref(col_1);
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass;
|
||||
|
@ -612,25 +612,25 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
|||
relid = 'mx_ref_index'::regclass;
|
||||
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass;
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'mx_ref_index'::regclass;
|
||||
|
||||
-- Check that metada is cleaned successfully upon drop table
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE mx_ref;
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'mx_ref_index'::regclass;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'mx_ref_index'::regclass;
|
||||
SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid;
|
||||
SELECT * FROM pg_dist_shard_placement WHERE shardid=:ref_table_shardid;
|
||||
|
||||
-- Check that master_add_node propagates the metadata about new placements of a reference table
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT groupid AS old_worker_2_group
|
||||
FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
|
||||
CREATE TABLE tmp_placement AS
|
||||
|
@ -645,12 +645,12 @@ SELECT shardid, nodename, nodeport
|
|||
FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
|
||||
WHERE logicalrelid='mx_ref'::regclass;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT shardid, nodename, nodeport
|
||||
FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
|
||||
WHERE logicalrelid='mx_ref'::regclass;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
SELECT shardid, nodename, nodeport
|
||||
|
@ -658,14 +658,14 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
|
|||
WHERE logicalrelid='mx_ref'::regclass
|
||||
ORDER BY shardid, nodeport;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT shardid, nodename, nodeport
|
||||
FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
|
||||
WHERE logicalrelid='mx_ref'::regclass
|
||||
ORDER BY shardid, nodeport;
|
||||
|
||||
-- Get the metadata back into a consistent state
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
INSERT INTO pg_dist_placement (SELECT * FROM tmp_placement);
|
||||
DROP TABLE tmp_placement;
|
||||
|
||||
|
@ -673,20 +673,20 @@ UPDATE pg_dist_placement
|
|||
SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
|
||||
WHERE groupid = :old_worker_2_group;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
UPDATE pg_dist_placement
|
||||
SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port)
|
||||
WHERE groupid = :old_worker_2_group;
|
||||
|
||||
-- Confirm that shouldhaveshards is 'true'
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
select shouldhaveshards from pg_dist_node where nodeport = 8888;
|
||||
\c - postgres - :worker_1_port
|
||||
select shouldhaveshards from pg_dist_node where nodeport = 8888;
|
||||
|
||||
|
||||
-- Check that setting shouldhaveshards to false is correctly transferred to other mx nodes
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', false);
|
||||
select shouldhaveshards from pg_dist_node where nodeport = 8888;
|
||||
|
||||
|
@ -701,7 +701,7 @@ select shouldhaveshards from pg_dist_node where nodeport = 8888;
|
|||
\c - postgres - :worker_1_port
|
||||
select shouldhaveshards from pg_dist_node where nodeport = 8888;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
--
|
||||
-- Check that metadata commands error out if any nodes are out-of-sync
|
||||
--
|
||||
|
|
|
@ -243,13 +243,13 @@ INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell',
|
|||
-- Test that shards which miss a modification are marked unhealthy
|
||||
|
||||
-- First: Connect to the second worker node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- Second: Move aside limit_orders shard on the second worker node
|
||||
ALTER TABLE limit_orders_750000 RENAME TO renamed_orders;
|
||||
|
||||
-- Third: Connect back to master node
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Fourth: Perform an INSERT on the remaining node
|
||||
-- the whole transaction should fail
|
||||
|
@ -257,20 +257,20 @@ ALTER TABLE limit_orders_750000 RENAME TO renamed_orders;
|
|||
INSERT INTO limit_orders VALUES (276, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67);
|
||||
|
||||
-- set the shard name back
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- Second: Move aside limit_orders shard on the second worker node
|
||||
ALTER TABLE renamed_orders RENAME TO limit_orders_750000;
|
||||
|
||||
-- Verify the insert failed and both placements are healthy
|
||||
-- or the insert succeeded and placement marked unhealthy
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT count(*) FROM limit_orders_750000 WHERE id = 276;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SELECT count(*) FROM limit_orders_750000 WHERE id = 276;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT count(*) FROM limit_orders WHERE id = 276;
|
||||
|
||||
|
@ -284,13 +284,13 @@ AND s.logicalrelid = 'limit_orders'::regclass;
|
|||
-- Test that if all shards miss a modification, no state change occurs
|
||||
|
||||
-- First: Connect to the first worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Second: Move aside limit_orders shard on the second worker node
|
||||
ALTER TABLE limit_orders_750000 RENAME TO renamed_orders;
|
||||
|
||||
-- Third: Connect back to master node
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Fourth: Perform an INSERT on the remaining node
|
||||
\set VERBOSITY terse
|
||||
|
@ -310,13 +310,13 @@ AND s.logicalrelid = 'limit_orders'::regclass;
|
|||
-- Undo our change...
|
||||
|
||||
-- First: Connect to the first worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Second: Move aside limit_orders shard on the second worker node
|
||||
ALTER TABLE renamed_orders RENAME TO limit_orders_750000;
|
||||
|
||||
-- Third: Connect back to master node
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- attempting to change the partition key is unsupported
|
||||
UPDATE limit_orders SET id = 0 WHERE id = 246;
|
||||
|
@ -363,15 +363,15 @@ UPDATE limit_orders SET array_of_values = 1 || array_of_values WHERE id = 246;
|
|||
CREATE FUNCTION immutable_append(old_values int[], new_value int)
|
||||
RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE FUNCTION immutable_append(old_values int[], new_value int)
|
||||
RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
CREATE FUNCTION immutable_append(old_values int[], new_value int)
|
||||
RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- immutable function calls with vars are also allowed
|
||||
UPDATE limit_orders
|
||||
|
|
|
@ -392,7 +392,7 @@ AND sp.shardstate = 1
|
|||
AND s.logicalrelid = 'objects'::regclass;
|
||||
|
||||
-- create trigger on one worker to reject certain values
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$
|
||||
BEGIN
|
||||
|
@ -409,7 +409,7 @@ AFTER INSERT ON objects_1200003
|
|||
DEFERRABLE INITIALLY IMMEDIATE
|
||||
FOR EACH ROW EXECUTE PROCEDURE reject_bad();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- test partial failure; worker_1 succeeds, 2 fails
|
||||
-- in this case, we expect the transaction to abort
|
||||
|
@ -437,7 +437,7 @@ DELETE FROM objects;
|
|||
|
||||
-- there cannot be errors on different shards at different times
|
||||
-- because the first failure will fail the whole transaction
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$
|
||||
BEGIN
|
||||
IF (NEW.name = 'BAD') THEN
|
||||
|
@ -453,7 +453,7 @@ AFTER INSERT ON labs_1200002
|
|||
DEFERRABLE INITIALLY IMMEDIATE
|
||||
FOR EACH ROW EXECUTE PROCEDURE reject_bad();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
BEGIN;
|
||||
INSERT INTO objects VALUES (1, 'apple');
|
||||
|
@ -476,7 +476,7 @@ AND (s.logicalrelid = 'objects'::regclass OR
|
|||
s.logicalrelid = 'labs'::regclass);
|
||||
|
||||
-- what if the failures happen at COMMIT time?
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
DROP TRIGGER reject_bad ON objects_1200003;
|
||||
|
||||
|
@ -485,7 +485,7 @@ AFTER INSERT ON objects_1200003
|
|||
DEFERRABLE INITIALLY DEFERRED
|
||||
FOR EACH ROW EXECUTE PROCEDURE reject_bad();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- should be the same story as before, just at COMMIT time
|
||||
BEGIN;
|
||||
|
@ -516,7 +516,7 @@ WHERE sp.shardid = s.shardid
|
|||
AND s.logicalrelid = 'objects'::regclass;
|
||||
|
||||
-- what if all nodes have failures at COMMIT time?
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
DROP TRIGGER reject_bad ON labs_1200002;
|
||||
|
||||
|
@ -525,7 +525,7 @@ AFTER INSERT ON labs_1200002
|
|||
DEFERRABLE INITIALLY DEFERRED
|
||||
FOR EACH ROW EXECUTE PROCEDURE reject_bad();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
BEGIN;
|
||||
INSERT INTO objects VALUES (1, 'apple');
|
||||
|
@ -548,11 +548,11 @@ AND (s.logicalrelid = 'objects'::regclass OR
|
|||
s.logicalrelid = 'labs'::regclass);
|
||||
|
||||
-- what if one shard (objects) succeeds but another (labs) completely fails?
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
DROP TRIGGER reject_bad ON objects_1200003;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.next_shard_id TO 1200004;
|
||||
BEGIN;
|
||||
INSERT INTO objects VALUES (1, 'apple');
|
||||
|
@ -644,7 +644,7 @@ ROLLBACK;
|
|||
SELECT * FROM reference_modifying_xacts;
|
||||
|
||||
-- lets fail on of the workers at before the commit time
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$
|
||||
BEGIN
|
||||
|
@ -661,7 +661,7 @@ AFTER INSERT ON reference_modifying_xacts_1200006
|
|||
DEFERRABLE INITIALLY IMMEDIATE
|
||||
FOR EACH ROW EXECUTE PROCEDURE reject_bad_reference();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
\set VERBOSITY terse
|
||||
-- try without wrapping inside a transaction
|
||||
INSERT INTO reference_modifying_xacts VALUES (999, 3);
|
||||
|
@ -672,7 +672,7 @@ INSERT INTO reference_modifying_xacts VALUES (999, 3);
|
|||
COMMIT;
|
||||
|
||||
-- lets fail one of the workers at COMMIT time
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
DROP TRIGGER reject_bad_reference ON reference_modifying_xacts_1200006;
|
||||
|
||||
CREATE CONSTRAINT TRIGGER reject_bad_reference
|
||||
|
@ -680,7 +680,7 @@ AFTER INSERT ON reference_modifying_xacts_1200006
|
|||
DEFERRABLE INITIALLY DEFERRED
|
||||
FOR EACH ROW EXECUTE PROCEDURE reject_bad_reference();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
\set VERBOSITY terse
|
||||
|
||||
-- try without wrapping inside a transaction
|
||||
|
@ -701,11 +701,11 @@ GROUP BY s.logicalrelid, sp.shardstate
|
|||
ORDER BY s.logicalrelid, sp.shardstate;
|
||||
|
||||
-- for the time-being drop the constraint
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
DROP TRIGGER reject_bad_reference ON reference_modifying_xacts_1200006;
|
||||
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- now create a hash distributed table and run tests
|
||||
-- including both the reference table and the hash
|
||||
|
@ -738,7 +738,7 @@ INSERT INTO hash_modifying_xacts VALUES (2, 2);
|
|||
ABORT;
|
||||
|
||||
-- lets fail one of the workers before COMMIT time for the hash table
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$
|
||||
BEGIN
|
||||
|
@ -755,7 +755,7 @@ AFTER INSERT ON hash_modifying_xacts_1200007
|
|||
DEFERRABLE INITIALLY IMMEDIATE
|
||||
FOR EACH ROW EXECUTE PROCEDURE reject_bad_hash();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
\set VERBOSITY terse
|
||||
|
||||
-- the transaction as a whole should fail
|
||||
|
@ -769,7 +769,7 @@ SELECT * FROM reference_modifying_xacts WHERE key = 55;
|
|||
|
||||
-- now lets fail on of the workers for the hash distributed table table
|
||||
-- when there is a reference table involved
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
DROP TRIGGER reject_bad_hash ON hash_modifying_xacts_1200007;
|
||||
|
||||
-- the trigger is on execution time
|
||||
|
@ -778,7 +778,7 @@ AFTER INSERT ON hash_modifying_xacts_1200007
|
|||
DEFERRABLE INITIALLY DEFERRED
|
||||
FOR EACH ROW EXECUTE PROCEDURE reject_bad_hash();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
\set VERBOSITY terse
|
||||
|
||||
-- the transaction as a whole should fail
|
||||
|
@ -804,14 +804,14 @@ ORDER BY s.logicalrelid, sp.shardstate;
|
|||
-- and ensure that hash distributed table's
|
||||
-- change is rollbacked as well
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
CREATE CONSTRAINT TRIGGER reject_bad_reference
|
||||
AFTER INSERT ON reference_modifying_xacts_1200006
|
||||
DEFERRABLE INITIALLY IMMEDIATE
|
||||
FOR EACH ROW EXECUTE PROCEDURE reject_bad_reference();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
\set VERBOSITY terse
|
||||
|
||||
BEGIN;
|
||||
|
@ -881,9 +881,9 @@ SELECT count(*) FROM pg_dist_transaction;
|
|||
|
||||
-- first create the new user on all nodes
|
||||
CREATE USER test_user;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE USER test_user;
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
CREATE USER test_user;
|
||||
|
||||
-- now connect back to the master with the new user
|
||||
|
|
|
@ -39,7 +39,7 @@ GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access;
|
|||
|
||||
SET citus.enable_ddl_propagation TO DEFAULT;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE USER full_access;
|
||||
CREATE USER usage_access;
|
||||
CREATE USER read_access;
|
||||
|
@ -60,7 +60,7 @@ GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
|
|||
GRANT ALL ON SCHEMA full_access_user_schema TO full_access;
|
||||
GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
CREATE USER full_access;
|
||||
CREATE USER usage_access;
|
||||
CREATE USER read_access;
|
||||
|
@ -81,7 +81,7 @@ GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
|
|||
GRANT ALL ON SCHEMA full_access_user_schema TO full_access;
|
||||
GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
@ -320,7 +320,7 @@ CREATE FUNCTION usage_access_func_third(key int, variadic v int[]) RETURNS text
|
|||
LANGUAGE plpgsql AS 'begin return current_user; end;';
|
||||
|
||||
-- connect back as super user
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- show that the current user is a super user
|
||||
SELECT usesuper FROM pg_user where usename IN (SELECT current_user);
|
||||
|
@ -406,14 +406,14 @@ SELECT worker_cleanup_job_schema_cache();
|
|||
RESET ROLE;
|
||||
|
||||
-- to test access to files created during repartition we will create some on worker 1
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET ROLE full_access;
|
||||
SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS a', 'a', 23, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]);
|
||||
RESET ROLE;
|
||||
|
||||
-- all attempts for transfer are initiated from other workers
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
-- super user should not be able to copy files created by a user
|
||||
SELECT worker_fetch_partition_file(42, 1, 1, 1, :'worker_1_host', :worker_1_port);
|
||||
|
||||
|
@ -498,7 +498,7 @@ SELECT count(*) FROM pg_merge_job_0042.task_000001;
|
|||
DROP TABLE pg_merge_job_0042.task_000001, pg_merge_job_0042.task_000001_merge; -- drop table so we can reuse the same files for more tests
|
||||
RESET ROLE;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT run_command_on_workers($$SELECT task_tracker_cleanup_job(42);$$);
|
||||
|
||||
|
|
|
@ -24,13 +24,13 @@ CREATE TABLE ref(a int);
|
|||
SELECT create_reference_table('ref');
|
||||
|
||||
-- alter role from mx worker isn't propagated
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET citus.enable_alter_role_propagation TO ON;
|
||||
ALTER ROLE reprefuser WITH CREATEROLE;
|
||||
select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser';
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser';
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO mx_add_coordinator,public;
|
||||
SET client_min_messages TO WARNING;
|
||||
select rolcreatedb, rolcreaterole from pg_roles where rolname = 'reprefuser';
|
||||
|
@ -56,7 +56,7 @@ INSERT INTO ref VALUES (1);
|
|||
TRUNCATE ref;
|
||||
|
||||
-- test that changes from a metadata node is reflected in the coordinator placement
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO mx_add_coordinator,public;
|
||||
INSERT INTO ref VALUES (1), (2), (3);
|
||||
UPDATE ref SET a = a + 1;
|
||||
|
@ -69,7 +69,7 @@ INSERT INTO local_table VALUES (2), (4);
|
|||
SELECT r.a FROM ref r JOIN local_table lt on r.a = lt.a;
|
||||
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO mx_add_coordinator,public;
|
||||
SELECT * FROM ref ORDER BY a;
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ select start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
|||
-- stop_metadata_sync_to_node()/start_metadata_sync_to_node() might make
|
||||
-- worker backend caches inconsistent. Reconnect to coordinator to use
|
||||
-- new worker connections, hence new backends.
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path to multi_mx_call, public;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text);
|
|||
CREATE TYPE order_side_mx AS ENUM ('buy', 'sell');
|
||||
|
||||
-- now create required stuff in the worker 1
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- create schema to test schema support
|
||||
CREATE SCHEMA citus_mx_test_schema_join_1;
|
||||
|
@ -101,7 +101,7 @@ CREATE OPERATOR citus_mx_test_schema.=== (
|
|||
);
|
||||
|
||||
-- now create required stuff in the worker 2
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- create schema to test schema support
|
||||
CREATE SCHEMA citus_mx_test_schema_join_1;
|
||||
|
@ -146,7 +146,7 @@ CREATE OPERATOR citus_mx_test_schema.=== (
|
|||
);
|
||||
|
||||
-- connect back to the master, and do some more tests
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO streaming;
|
||||
|
|
|
@ -22,7 +22,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table':
|
|||
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relname LIKE 'ddl_test%_index';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- make sure we don't break the following tests by hiding the shard names
|
||||
SET citus.override_table_visibility TO FALSE;
|
||||
|
@ -34,7 +34,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1
|
|||
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relname LIKE 'ddl_test%_index_1220088';
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- make sure we don't break the following tests by hiding the shard names
|
||||
SET citus.override_table_visibility TO FALSE;
|
||||
|
@ -50,7 +50,7 @@ INSERT INTO mx_ddl_table VALUES (37, 78, 2);
|
|||
INSERT INTO mx_ddl_table VALUES (38, 78);
|
||||
|
||||
-- Switch to the coordinator
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- SET DATA TYPE
|
||||
|
@ -58,11 +58,11 @@ ALTER TABLE mx_ddl_table ALTER COLUMN version SET DATA TYPE double precision;
|
|||
|
||||
INSERT INTO mx_ddl_table VALUES (78, 83, 2.1);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT * FROM mx_ddl_table ORDER BY key;
|
||||
|
||||
-- Switch to the coordinator
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- DROP INDEX
|
||||
DROP INDEX ddl_test_index;
|
||||
|
@ -83,14 +83,14 @@ ALTER TABLE mx_ddl_table DROP COLUMN version;
|
|||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||
\di ddl_test*_index
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||
\di ddl_test*_index
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass;
|
||||
\di ddl_test*_index_1220088
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass;
|
||||
\di ddl_test*_index
|
||||
|
@ -98,7 +98,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1
|
|||
\di ddl_test*_index_1220089
|
||||
|
||||
-- Show that DDL commands are done within a two-phase commit transaction
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
CREATE INDEX ddl_test_index ON mx_ddl_table(value);
|
||||
|
||||
|
@ -111,15 +111,15 @@ SET citus.replication_model TO streaming;
|
|||
CREATE TABLE mx_sequence(key INT, value BIGSERIAL);
|
||||
SELECT create_distributed_table('mx_sequence', 'key');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT last_value AS worker_1_lastval FROM mx_sequence_value_seq \gset
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
SELECT last_value AS worker_2_lastval FROM mx_sequence_value_seq \gset
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- don't look at the actual values because they rely on the groupids of the nodes
|
||||
-- which can change depending on the tests which have run before this one
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
--
|
||||
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
|
||||
\c - - - :worker_1_port
|
||||
\c - - - :worker_2_port
|
||||
\c - - - :master_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
\a\t
|
||||
|
||||
|
@ -17,7 +17,7 @@ VACUUM ANALYZE orders_mx;
|
|||
VACUUM ANALYZE customer_mx;
|
||||
VACUUM ANALYZE supplier_mx;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
-- Function that parses explain output as JSON
|
||||
CREATE FUNCTION explain_json(query text)
|
||||
RETURNS jsonb
|
||||
|
@ -42,7 +42,7 @@ BEGIN
|
|||
END;
|
||||
$BODY$ LANGUAGE plpgsql;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
-- Function that parses explain output as JSON
|
||||
CREATE FUNCTION explain_json(query text)
|
||||
RETURNS jsonb
|
||||
|
@ -83,7 +83,7 @@ SELECT true AS valid FROM explain_json($$
|
|||
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Test XML format
|
||||
EXPLAIN (COSTS FALSE, FORMAT XML)
|
||||
|
@ -105,7 +105,7 @@ EXPLAIN (COSTS FALSE, FORMAT TEXT)
|
|||
SELECT l_quantity, count(*) count_quantity FROM lineitem_mx
|
||||
GROUP BY l_quantity ORDER BY count_quantity, l_quantity;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- Test verbose
|
||||
EXPLAIN (COSTS FALSE, VERBOSE TRUE)
|
||||
|
|
|
@ -223,7 +223,7 @@ select start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
|||
-- stop_metadata_sync_to_node()/start_metadata_sync_to_node() might make
|
||||
-- worker backend caches inconsistent. Reconnect to coordinator to use
|
||||
-- new worker connections, hence new backends.
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path to multi_mx_function_call_delegation, public;
|
||||
SET client_min_messages TO DEBUG1;
|
||||
SET citus.replication_model = 'streaming';
|
||||
|
|
|
@ -38,7 +38,7 @@ SELECT * FROM citus_shard_indexes_on_worker;
|
|||
|
||||
-- now show that we see the shards, but not the
|
||||
-- indexes as there are no indexes
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
|
@ -57,13 +57,13 @@ SELECT
|
|||
1));
|
||||
|
||||
-- now create an index
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
CREATE INDEX test_index ON mx_hide_shard_names.test_table(id);
|
||||
|
||||
-- now show that we see the shards, and the
|
||||
-- indexes as well
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
|
@ -77,7 +77,7 @@ SELECT pg_table_is_visible('test_table_1130000'::regclass);
|
|||
SET citus.override_table_visibility TO FALSE;
|
||||
SELECT pg_table_is_visible('test_table_1130000'::regclass);
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
-- make sure that we're resilient to the edge cases
|
||||
-- such that the table name includes the shard number
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
|
@ -90,7 +90,7 @@ SET citus.replication_model TO 'streaming';
|
|||
CREATE TABLE test_table_102008(id int, time date);
|
||||
SELECT create_distributed_table('test_table_102008', 'id');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
|
||||
-- existing shard ids appended to a local table name
|
||||
|
@ -103,7 +103,7 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
|||
|
||||
\d
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
-- make sure that don't mess up with schemas
|
||||
CREATE SCHEMA mx_hide_shard_names_2;
|
||||
SET search_path TO 'mx_hide_shard_names_2';
|
||||
|
@ -115,7 +115,7 @@ CREATE TABLE test_table(id int, time date);
|
|||
SELECT create_distributed_table('test_table', 'id');
|
||||
CREATE INDEX test_index ON mx_hide_shard_names_2.test_table(id);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
|
@ -127,7 +127,7 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
|||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
|
||||
-- now try very long table names
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
@ -143,7 +143,7 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
|
|||
col2 integer not null);
|
||||
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO 'mx_hide_shard_names_3';
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
\d
|
||||
|
@ -151,7 +151,7 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
|||
|
||||
|
||||
-- now try weird schema names
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.shard_count TO 4;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
@ -167,7 +167,7 @@ CREATE INDEX "MyTenantIndex" ON "CiTuS.TeeN"."TeeNTabLE.1!?!"("TeNANt_Id");
|
|||
-- create distributed table with weird names
|
||||
SELECT create_distributed_table('"CiTuS.TeeN"."TeeNTabLE.1!?!"', 'TeNANt_Id');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO "CiTuS.TeeN";
|
||||
SELECT * FROM citus_shards_on_worker ORDER BY 2;
|
||||
SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
||||
|
@ -176,7 +176,7 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
|
|||
\di
|
||||
|
||||
-- clean-up
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- show that common psql functions do not show shards
|
||||
-- including the ones that are not in the current schema
|
||||
|
|
|
@ -36,12 +36,12 @@ TRUNCATE target_table;
|
|||
--
|
||||
-- Test repartitioned INSERT/SELECT from MX worker
|
||||
--
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO multi_mx_insert_select_repartition;
|
||||
EXPLAIN (costs off) INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a;
|
||||
INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO multi_mx_insert_select_repartition;
|
||||
SELECT * FROM target_table ORDER BY a;
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ SELECT recover_prepared_transactions();
|
|||
-- Verify that the commit records have been removed
|
||||
SELECT count(*) FROM pg_dist_transaction;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass;
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
|
@ -58,7 +58,7 @@ WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
|||
SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass;
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
|
@ -73,7 +73,7 @@ SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
|||
WHERE logicalrelid = 'distributed_mx_table'::regclass;
|
||||
|
||||
-- Create a table and then roll back the transaction
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO streaming;
|
||||
|
||||
|
@ -86,11 +86,11 @@ SELECT create_distributed_table('should_not_exist', 'key');
|
|||
ABORT;
|
||||
|
||||
-- Verify that the table does not exist on the worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT count(*) FROM pg_tables WHERE tablename = 'should_not_exist';
|
||||
|
||||
-- Ensure that we don't allow prepare on a metadata transaction
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO streaming;
|
||||
|
||||
|
@ -118,14 +118,14 @@ SELECT create_distributed_table('objects_for_xacts', 'id');
|
|||
COMMIT;
|
||||
|
||||
-- see that the table actually created and distributed
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT repmodel FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass;
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
|
||||
WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO streaming;
|
||||
|
||||
|
@ -147,7 +147,7 @@ ROLLBACK;
|
|||
-- show that the table not exists on the coordinator
|
||||
SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts';
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- the distributed table not exists on the worker node
|
||||
SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts';
|
||||
|
@ -161,7 +161,7 @@ SELECT master_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::reg
|
|||
SELECT recover_prepared_transactions();
|
||||
|
||||
-- Create some "fake" prepared transactions to recover
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
BEGIN;
|
||||
CREATE TABLE should_abort (value int);
|
||||
|
@ -175,7 +175,7 @@ BEGIN;
|
|||
CREATE TABLE should_be_sorted_into_middle (value int);
|
||||
PREPARE TRANSACTION 'citus_0_should_be_sorted_into_middle';
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
-- Add "fake" pg_dist_transaction records and run recovery
|
||||
SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset
|
||||
INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_commit');
|
||||
|
@ -185,11 +185,11 @@ SELECT recover_prepared_transactions();
|
|||
SELECT count(*) FROM pg_dist_transaction;
|
||||
|
||||
-- Confirm that transactions were correctly rolled forward
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort';
|
||||
SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit';
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
CREATE USER no_access_mx;
|
||||
SELECT run_command_on_workers($$CREATE USER no_access_mx;$$);
|
||||
|
|
|
@ -12,13 +12,13 @@ INSERT INTO limit_orders_mx VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54',
|
|||
SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32743;
|
||||
|
||||
-- now singe-row INSERT from a worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
INSERT INTO limit_orders_mx VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy',
|
||||
20.69);
|
||||
SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32744;
|
||||
|
||||
-- now singe-row INSERT to the other worker
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
\set VERBOSITY terse
|
||||
|
||||
INSERT INTO limit_orders_mx VALUES (32745, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy',
|
||||
|
@ -84,7 +84,7 @@ INSERT INTO limit_orders_mx VALUES (2037, 'GOOG', 5634, now(), 'buy', random()),
|
|||
(2039, 'GOOG', 5634, now(), 'buy', random());
|
||||
|
||||
-- connect back to the other node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- commands containing a CTE are supported
|
||||
WITH deleted_orders AS (DELETE FROM limit_orders_mx WHERE id < 0 RETURNING *)
|
||||
|
@ -184,11 +184,11 @@ SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246;
|
|||
UPDATE limit_orders_mx SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol;
|
||||
|
||||
-- connect coordinator to run the DDL
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
ALTER TABLE limit_orders_mx ADD COLUMN array_of_values integer[];
|
||||
|
||||
-- connect back to the other node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- updates referencing STABLE functions are allowed
|
||||
UPDATE limit_orders_mx SET placed_at = LEAST(placed_at, now()::timestamp) WHERE id = 246;
|
||||
|
@ -196,7 +196,7 @@ UPDATE limit_orders_mx SET placed_at = LEAST(placed_at, now()::timestamp) WHERE
|
|||
UPDATE limit_orders_mx SET array_of_values = 1 || array_of_values WHERE id = 246;
|
||||
|
||||
-- connect back to the other node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- immutable function calls with vars are also allowed
|
||||
UPDATE limit_orders_mx
|
||||
|
@ -220,11 +220,11 @@ UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246;
|
|||
SELECT array_of_values FROM limit_orders_mx WHERE id = 246;
|
||||
|
||||
-- connect coordinator to run the DDL
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
ALTER TABLE limit_orders_mx DROP array_of_values;
|
||||
|
||||
-- connect back to the other node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- even in RETURNING
|
||||
UPDATE limit_orders_mx SET placed_at = placed_at WHERE id = 246 RETURNING NOW();
|
||||
|
|
|
@ -26,7 +26,7 @@ CREATE TABLE test_table_1(id int, value_1 int);
|
|||
SELECT create_distributed_table('test_table_1', 'id');
|
||||
INSERT INTO test_table_1 VALUES(5,5),(6,6);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO 'mx_modify_reference_table';
|
||||
|
||||
-- Simple DML operations from the first worker node
|
||||
|
@ -58,7 +58,7 @@ INSERT INTO ref_table_2 SELECT * FROM ref_table;
|
|||
SELECT SUM(value_1) FROM ref_table_2;
|
||||
|
||||
-- Now connect to the second worker and observe the results as well
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SET search_path TO 'mx_modify_reference_table';
|
||||
|
||||
SELECT SUM(value_1) FROM ref_table;
|
||||
|
@ -85,7 +85,7 @@ SELECT SUM(value_1) FROM ref_table;
|
|||
INSERT INTO ref_table_2 SELECT * FROM ref_table;
|
||||
SELECT SUM(value_1) FROM ref_table_2;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET search_path TO 'public';
|
||||
DROP SCHEMA mx_modify_reference_table CASCADE;
|
||||
|
|
|
@ -20,7 +20,7 @@ COMMIT;
|
|||
SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2;
|
||||
|
||||
-- do it on the worker node as well
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
BEGIN;
|
||||
DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2;
|
||||
INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 1');
|
||||
|
@ -28,7 +28,7 @@ COMMIT;
|
|||
SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2;
|
||||
|
||||
-- do it on the worker other node as well
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
BEGIN;
|
||||
DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2;
|
||||
INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 2');
|
||||
|
@ -36,7 +36,7 @@ COMMIT;
|
|||
SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2;
|
||||
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- abort a modification
|
||||
BEGIN;
|
||||
|
@ -45,7 +45,7 @@ ABORT;
|
|||
|
||||
SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- abort a modification on the worker node
|
||||
BEGIN;
|
||||
|
@ -54,7 +54,7 @@ ABORT;
|
|||
|
||||
SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- abort a modification on the other worker node
|
||||
BEGIN;
|
||||
|
@ -65,7 +65,7 @@ SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1;
|
|||
|
||||
|
||||
-- switch back to the first worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- creating savepoints should work...
|
||||
BEGIN;
|
||||
|
@ -126,7 +126,7 @@ INSERT INTO researchers_mx VALUES (9, 6, 'Leslie Lamport');
|
|||
COMMIT;
|
||||
|
||||
-- have the same test on the other worker node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
-- should be valid to edit labs_mx after researchers_mx...
|
||||
BEGIN;
|
||||
INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart');
|
||||
|
@ -143,7 +143,7 @@ INSERT INTO researchers_mx VALUES (9, 6, 'Leslie Lamport');
|
|||
COMMIT;
|
||||
|
||||
-- switch back to the worker node
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- this logic doesn't apply to router SELECTs occurring after a modification:
|
||||
-- selecting from the modified node is fine...
|
||||
|
@ -171,7 +171,7 @@ SELECT name FROM labs_mx WHERE id = 10;
|
|||
INSERT INTO labs_mx VALUES (6, 'Bell labs_mx');
|
||||
COMMIT;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
-- test primary key violations
|
||||
BEGIN;
|
||||
INSERT INTO objects_mx VALUES (1, 'apple');
|
||||
|
@ -182,7 +182,7 @@ COMMIT;
|
|||
SELECT * FROM objects_mx WHERE id = 1;
|
||||
|
||||
-- same test on the second worker node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
-- test primary key violations
|
||||
BEGIN;
|
||||
INSERT INTO objects_mx VALUES (1, 'apple');
|
||||
|
@ -193,7 +193,7 @@ COMMIT;
|
|||
SELECT * FROM objects_mx WHERE id = 1;
|
||||
|
||||
-- create trigger on one worker to reject certain values
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$
|
||||
BEGIN
|
||||
|
@ -222,7 +222,7 @@ SELECT * FROM objects_mx WHERE id = 2;
|
|||
SELECT * FROM labs_mx WHERE id = 7;
|
||||
|
||||
-- same failure test from worker 2
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- test partial failure; statement 1 successed, statement 2 fails
|
||||
BEGIN;
|
||||
|
@ -234,10 +234,10 @@ COMMIT;
|
|||
SELECT * FROM objects_mx WHERE id = 2;
|
||||
SELECT * FROM labs_mx WHERE id = 7;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- what if there are errors on different shards at different times?
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
CREATE CONSTRAINT TRIGGER reject_bad_mx
|
||||
AFTER INSERT ON labs_mx_1220102
|
||||
|
@ -257,7 +257,7 @@ SELECT * FROM objects_mx WHERE id = 1;
|
|||
SELECT * FROM labs_mx WHERE id = 8;
|
||||
|
||||
-- same test from the other worker
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
|
||||
BEGIN;
|
||||
|
@ -273,7 +273,7 @@ SELECT * FROM labs_mx WHERE id = 8;
|
|||
|
||||
|
||||
-- what if the failures happen at COMMIT time?
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
DROP TRIGGER reject_bad_mx ON objects_mx_1220103;
|
||||
|
||||
|
@ -315,7 +315,7 @@ SELECT * FROM objects_mx WHERE id = 1;
|
|||
SELECT * FROM labs_mx WHERE id = 8;
|
||||
|
||||
-- what if one shard (objects_mx) succeeds but another (labs_mx) completely fails?
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
DROP TRIGGER reject_bad_mx ON objects_mx_1220103;
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03');
|
|||
SELECT create_distributed_table('partitioning_test', 'id');
|
||||
|
||||
-- see from MX node, the data is loaded to shards
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT * FROM partitioning_test ORDER BY 1;
|
||||
|
||||
|
@ -55,7 +55,7 @@ ORDER BY
|
|||
-- see from MX node, partitioning hierarchy is built
|
||||
SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
|
@ -63,7 +63,7 @@ SET citus.shard_replication_factor TO 1;
|
|||
CREATE TABLE partitioning_test_2011 PARTITION OF partitioning_test FOR VALUES FROM ('2011-01-01') TO ('2012-01-01');
|
||||
|
||||
-- see from MX node, new partition is automatically distributed as well
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT
|
||||
logicalrelid
|
||||
|
@ -85,7 +85,7 @@ ORDER BY
|
|||
-- see from MX node, partitioning hierarchy is built
|
||||
SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
|
@ -99,7 +99,7 @@ INSERT INTO partitioning_test_2012 VALUES (6, '2012-07-07');
|
|||
ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2012 FOR VALUES FROM ('2012-01-01') TO ('2013-01-01');
|
||||
|
||||
-- see from MX node, attached partition is distributed as well
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT
|
||||
logicalrelid
|
||||
|
@ -124,7 +124,7 @@ SELECT * FROM partitioning_test ORDER BY 1;
|
|||
-- see from MX node, partitioning hierarchy is built
|
||||
SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.replication_model TO 'streaming';
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
|
@ -139,27 +139,27 @@ INSERT INTO partitioning_test_2013 VALUES (8, '2013-07-07');
|
|||
ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES FROM ('2013-01-01') TO ('2014-01-01');
|
||||
|
||||
-- see from MX node, see the data is loaded to shards
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT * FROM partitioning_test ORDER BY 1;
|
||||
|
||||
-- see from MX node, partitioning hierarchy is built
|
||||
SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- 5-) Detaching partition of the partitioned table
|
||||
ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009;
|
||||
|
||||
-- see from MX node, partitioning hierarchy is built
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'::regclass ORDER BY 1;
|
||||
|
||||
-- make sure DROPping from worker node is not allowed
|
||||
DROP TABLE partitioning_test;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- make sure we can repeatedly call start_metadata_sync_to_node
|
||||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp);
|
||||
SELECT create_reference_table('reference_table_test');
|
||||
|
||||
|
@ -17,7 +17,7 @@ BEGIN;
|
|||
SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE;
|
||||
END;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- SELECT .. FOR UPDATE should work on first worker (takes lock on self)
|
||||
SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE;
|
||||
|
@ -232,11 +232,11 @@ SELECT * FROM (
|
|||
ORDER BY value_1;
|
||||
|
||||
-- to make the tests more interested for aggregation tests, ingest some more data
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01');
|
||||
INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02');
|
||||
INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03');
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- some aggregations
|
||||
SELECT
|
||||
|
@ -336,7 +336,7 @@ CREATE TEMP TABLE temp_reference_test as
|
|||
FROM reference_table_test
|
||||
WHERE value_1 = 1;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
-- all kinds of joins are supported among reference tables
|
||||
-- first create two more tables
|
||||
CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp);
|
||||
|
@ -353,7 +353,7 @@ INSERT INTO reference_table_test_second VALUES (3, 3.0, '3', '2016-12-03');
|
|||
INSERT INTO reference_table_test_third VALUES (4, 4.0, '4', '2016-12-04');
|
||||
INSERT INTO reference_table_test_third VALUES (5, 5.0, '5', '2016-12-05');
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- SELECT .. FOR UPDATE should work on second worker (takes lock on first worker)
|
||||
SELECT value_1, value_2 FROM reference_table_test ORDER BY value_1, value_2 LIMIT 1 FOR UPDATE;
|
||||
|
@ -419,9 +419,9 @@ ORDER BY
|
|||
1;
|
||||
|
||||
-- ingest a common row to see more meaningful results with joins involving 3 tables
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
INSERT INTO reference_table_test_third VALUES (3, 3.0, '3', '2016-12-03');
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT
|
||||
DISTINCT t1.value_1
|
||||
|
@ -468,7 +468,7 @@ FROM
|
|||
ORDER BY
|
||||
1;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.shard_count TO 6;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.replication_model TO streaming;
|
||||
|
@ -489,7 +489,7 @@ INSERT INTO colocated_table_test VALUES (2, 2.0, '2', '2016-12-02');
|
|||
INSERT INTO colocated_table_test_2 VALUES (1, 1.0, '1', '2016-12-01');
|
||||
INSERT INTO colocated_table_test_2 VALUES (2, 2.0, '2', '2016-12-02');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET client_min_messages TO DEBUG1;
|
||||
SET citus.log_multi_join_order TO TRUE;
|
||||
|
||||
|
@ -558,5 +558,5 @@ SET client_min_messages TO NOTICE;
|
|||
SET citus.log_multi_join_order TO FALSE;
|
||||
|
||||
-- clean up tables
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE reference_table_test, reference_table_test_second, reference_table_test_third;;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
-- This test runs the below query from the :worker_1_port and the
|
||||
-- concurrent test runs the same query on :worker_2_port. Note that, both
|
||||
-- tests use the same sequence ids but the queries should not fail.
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SET citus.task_executor_type TO "task-tracker";
|
||||
CREATE TEMP TABLE t1 AS
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
-- This test runs the below query from the :worker_2_port and the
|
||||
-- concurrent test runs the same query on :worker_1_port. Note that, both
|
||||
-- tests use the same sequence ids but the queries should not fail.
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
SET citus.task_executor_type TO "task-tracker";
|
||||
CREATE TEMP TABLE t1 AS
|
||||
|
|
|
@ -65,7 +65,7 @@ CREATE TABLE repartition_udt_other (
|
|||
-- proceed with type creation as above; thus the OIDs will be different.
|
||||
-- so that the OID is off.
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- START type creation
|
||||
-- ... as well as a function to use as its comparator...
|
||||
|
@ -109,7 +109,7 @@ FUNCTION 1 test_udt_hash(test_udt);
|
|||
|
||||
-- END type creation
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- START type creation
|
||||
-- ... as well as a function to use as its comparator...
|
||||
|
@ -155,7 +155,7 @@ FUNCTION 1 test_udt_hash(test_udt);
|
|||
|
||||
-- Connect to master
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Distribute and populate the two tables.
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
@ -201,5 +201,5 @@ SELECT * FROM repartition_udt JOIN repartition_udt_other
|
|||
WHERE repartition_udt.pk > 1
|
||||
ORDER BY repartition_udt.pk;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
-- MULTI_MX_REPARTITION_W1_UDT
|
||||
--
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET client_min_messages = LOG;
|
||||
-- Query that should result in a repartition join on UDT column.
|
||||
SET citus.task_executor_type = 'task-tracker';
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
-- MULTI_MX_REPARTITION_W2_UDT
|
||||
--
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SET client_min_messages = LOG;
|
||||
-- Query that should result in a repartition join on UDT column.
|
||||
SET citus.task_executor_type = 'task-tracker';
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
-- and CTE inlining is not relevant to router plannery anyway
|
||||
SET citus.enable_cte_inlining TO false;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
-- this table is used in a CTE test
|
||||
CREATE TABLE authors_hash_mx ( name text, id bigint );
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
--
|
||||
|
||||
-- connect to a worker node and run some queries
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- test very basic queries
|
||||
SELECT * FROM nation_hash ORDER BY n_nationkey LIMIT 4;
|
||||
|
@ -223,7 +223,7 @@ SET citus.task_executor_type TO "adaptive";
|
|||
-- connect to the master and do some test
|
||||
-- regarding DDL support on schemas where
|
||||
-- the search_path is set
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
CREATE SCHEMA mx_ddl_schema_1;
|
||||
CREATE SCHEMA mx_ddl_schema_2;
|
||||
|
@ -311,26 +311,26 @@ CREATE SCHEMA mx_new_schema;
|
|||
SELECT objid::oid::regnamespace as "Distributed Schemas"
|
||||
FROM citus.pg_dist_object
|
||||
WHERE objid::oid::regnamespace IN ('mx_old_schema', 'mx_new_schema');
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT table_schema AS "Table's Schema" FROM information_schema.tables WHERE table_name='table_set_schema';
|
||||
SELECT table_schema AS "Shards' Schema"
|
||||
FROM information_schema.tables
|
||||
WHERE table_name LIKE 'table\_set\_schema\_%'
|
||||
GROUP BY table_schema;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
ALTER TABLE mx_old_schema.table_set_schema SET SCHEMA mx_new_schema;
|
||||
|
||||
SELECT objid::oid::regnamespace as "Distributed Schemas"
|
||||
FROM citus.pg_dist_object
|
||||
WHERE objid::oid::regnamespace IN ('mx_old_schema', 'mx_new_schema');
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT table_schema AS "Table's Schema" FROM information_schema.tables WHERE table_name='table_set_schema';
|
||||
SELECT table_schema AS "Shards' Schema"
|
||||
FROM information_schema.tables
|
||||
WHERE table_name LIKE 'table\_set\_schema\_%'
|
||||
GROUP BY table_schema;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT * FROM mx_new_schema.table_set_schema;
|
||||
|
||||
DROP SCHEMA mx_old_schema CASCADE;
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
|
||||
-- connect to the coordinator
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Query #1 from the TPC-H decision support benchmark
|
||||
|
||||
|
@ -31,7 +31,7 @@ ORDER BY
|
|||
l_linestatus;
|
||||
|
||||
-- connect one of the workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Query #1 from the TPC-H decision support benchmark
|
||||
|
||||
|
@ -58,7 +58,7 @@ ORDER BY
|
|||
l_linestatus;
|
||||
|
||||
-- connect to the other node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- Query #1 from the TPC-H decision support benchmark
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
|
||||
-- connect to master
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT
|
||||
c_custkey,
|
||||
|
@ -43,7 +43,7 @@ LIMIT 20;
|
|||
|
||||
|
||||
-- connect one of the workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT
|
||||
c_custkey,
|
||||
|
@ -79,7 +79,7 @@ ORDER BY
|
|||
LIMIT 20;
|
||||
|
||||
-- connect to the other worker
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
SELECT
|
||||
c_custkey,
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
|
||||
-- connect to the coordinator
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Query #12 from the TPC-H decision support benchmark
|
||||
|
||||
|
@ -38,7 +38,7 @@ ORDER BY
|
|||
l_shipmode;
|
||||
|
||||
-- connect one of the workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Query #12 from the TPC-H decision support benchmark
|
||||
|
||||
|
@ -72,7 +72,7 @@ ORDER BY
|
|||
l_shipmode;
|
||||
|
||||
-- connect to the other worker node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- Query #12 from the TPC-H decision support benchmark
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
|
||||
-- connect to the coordinator
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Query #14 from the TPC-H decision support benchmark
|
||||
|
||||
|
@ -23,7 +23,7 @@ WHERE
|
|||
AND l_shipdate < date '1995-09-01' + interval '1' year;
|
||||
|
||||
-- connect one of the workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Query #14 from the TPC-H decision support benchmark
|
||||
|
||||
|
@ -42,7 +42,7 @@ WHERE
|
|||
AND l_shipdate < date '1995-09-01' + interval '1' year;
|
||||
|
||||
-- connect to the other node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- Query #14 from the TPC-H decision support benchmark
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
|
||||
-- connect to the coordinator
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Query #19 from the TPC-H decision support benchmark. Note that we modified
|
||||
-- the query from its original to make it work on smaller data sets.
|
||||
|
@ -40,7 +40,7 @@ WHERE
|
|||
);
|
||||
|
||||
-- connect one of the workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Query #19 from the TPC-H decision support benchmark. Note that we modified
|
||||
-- the query from its original to make it work on smaller data sets.
|
||||
|
@ -76,7 +76,7 @@ WHERE
|
|||
);
|
||||
|
||||
-- connect to the other node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- Query #19 from the TPC-H decision support benchmark. Note that we modified
|
||||
-- the query from its original to make it work on smaller data sets.
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
|
||||
-- connect to the coordinator
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT
|
||||
l_orderkey,
|
||||
|
@ -32,7 +32,7 @@ ORDER BY
|
|||
o_orderdate;
|
||||
|
||||
-- connect one of the workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT
|
||||
l_orderkey,
|
||||
|
@ -58,7 +58,7 @@ ORDER BY
|
|||
o_orderdate;
|
||||
|
||||
-- connect to the other node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
SELECT
|
||||
l_orderkey,
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
|
||||
-- connect to the coordinator
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Query #6 from the TPC-H decision support benchmark
|
||||
|
||||
|
@ -19,7 +19,7 @@ WHERE
|
|||
and l_quantity < 24;
|
||||
|
||||
-- connect to one of the worker nodes
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Query #6 from the TPC-H decision support benchmark
|
||||
|
||||
|
@ -34,7 +34,7 @@ WHERE
|
|||
and l_quantity < 24;
|
||||
|
||||
-- connect to the other worker node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- Query #6 from the TPC-H decision support benchmark
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
|
||||
-- connect to the coordinator
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Query #7 from the TPC-H decision support benchmark
|
||||
|
||||
|
@ -49,7 +49,7 @@ ORDER BY
|
|||
l_year;
|
||||
|
||||
-- connect one of the workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Query #7 from the TPC-H decision support benchmark
|
||||
|
||||
|
@ -94,7 +94,7 @@ ORDER BY
|
|||
l_year;
|
||||
|
||||
-- connect to the other worker node
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- Query #7 from the TPC-H decision support benchmark
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
|
||||
-- connect to the coordinator
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Query #7 from the TPC-H benchmark; modified to include sub-selects
|
||||
|
||||
|
@ -58,7 +58,7 @@ ORDER BY
|
|||
l_year;
|
||||
|
||||
-- connect to one of the workers
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Query #7 from the TPC-H benchmark; modified to include sub-selects
|
||||
|
||||
|
@ -112,7 +112,7 @@ ORDER BY
|
|||
l_year;
|
||||
|
||||
-- connect to the coordinator
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- Query #7 from the TPC-H benchmark; modified to include sub-selects
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ SET citus.replication_model TO streaming;
|
|||
CREATE TABLE test_recovery (x text);
|
||||
SELECT create_distributed_table('test_recovery', 'x');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- Disable auto-recovery for the initial tests
|
||||
ALTER SYSTEM SET citus.recover_2pc_interval TO -1;
|
||||
|
@ -108,7 +108,7 @@ SELECT pg_reload_conf();
|
|||
|
||||
DROP TABLE table_should_commit;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP TABLE test_recovery_ref;
|
||||
DROP TABLE test_recovery;
|
||||
|
|
|
@ -43,7 +43,7 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO 'truncate_from_workers';
|
||||
|
||||
-- make sure that TRUNCATE workes expected from the worker node
|
||||
|
@ -76,11 +76,11 @@ BEGIN;
|
|||
ROLLBACK;
|
||||
|
||||
-- fill some data for the next test
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO 'truncate_from_workers';
|
||||
INSERT INTO "refer'ence_table" SELECT i FROM generate_series(0, 100) i;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO 'truncate_from_workers';
|
||||
|
||||
-- make sure that DMLs-SELECTs works along with TRUNCATE worker fine
|
||||
|
@ -93,7 +93,7 @@ ROLLBACK;
|
|||
|
||||
RESET client_min_messages;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- also test the infrastructure that is used for supporting
|
||||
-- TRUNCATE from worker nodes
|
||||
|
|
|
@ -14,9 +14,9 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
|
|||
SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
|
||||
SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt too_long_*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
|
@ -52,18 +52,18 @@ ALTER TABLE name_lengths ADD UNIQUE (float_col_123456789012345678901234567890123
|
|||
ALTER TABLE name_lengths ADD EXCLUDE (int_col_12345678901234567890123456789012345678901234567890 WITH =);
|
||||
ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345678901234567890 > '2014-01-01'::date);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Placeholders for unsupported add constraints with EXPLICIT names that are too long
|
||||
ALTER TABLE name_lengths ADD CONSTRAINT nl_unique_12345678901234567890123456789012345678901234567890 UNIQUE (float_col_12345678901234567890123456789012345678901234567890);
|
||||
ALTER TABLE name_lengths ADD CONSTRAINT nl_exclude_12345678901234567890123456789012345678901234567890 EXCLUDE (int_col_12345678901234567890123456789012345678901234567890 WITH =);
|
||||
ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_12345678901234567890123456789012345678901234567890 CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '2014-01-01'::date);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_lengths_225002'::regclass ORDER BY 1 DESC, 2 DESC;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Placeholders for RENAME operations
|
||||
\set VERBOSITY TERSE
|
||||
|
@ -76,19 +76,19 @@ ALTER TABLE name_lengths RENAME CONSTRAINT unique_123456789012345678901234567890
|
|||
|
||||
CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Verify that a new index name > 63 characters is auto-truncated
|
||||
-- by the parser/rewriter before further processing, just as in Postgres.
|
||||
CREATE INDEX tmp_idx_123456789012345678901234567890123456789012345678901234567890 ON name_lengths(col2);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "relname", "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relname LIKE 'tmp_idx_%' ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
|
@ -116,10 +116,10 @@ SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_n
|
|||
SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash');
|
||||
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\di public.sneaky*225006
|
||||
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass ORDER BY 1 DESC, 2 DESC;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
|
@ -135,9 +135,9 @@ CREATE TABLE sneaky_name_lengths (
|
|||
);
|
||||
SELECT create_distributed_table('sneaky_name_lengths', 'col1', 'hash');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\di unique*225008
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
|
@ -151,9 +151,9 @@ CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
|
|||
col2 integer not null);
|
||||
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt *225000000000*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
|
@ -171,10 +171,10 @@ SELECT shard_name(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B
|
|||
FROM pg_dist_shard
|
||||
WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt public.elephant_*
|
||||
\di public.elephant_*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.shard_count TO 2;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
|
|
|
@ -581,11 +581,11 @@ DROP TABLE http_request;
|
|||
|
||||
-- first create helper function
|
||||
CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$;
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- test table
|
||||
CREATE TABLE test_table (test_id integer NOT NULL, data text);
|
||||
|
|
|
@ -254,7 +254,7 @@ SELECT run_command_on_workers('GRANT ALL ON SCHEMA multi_real_time_transaction T
|
|||
SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA multi_real_time_transaction TO rls_user');
|
||||
|
||||
-- create trigger on one worker to reject access if GUC not
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path = 'multi_real_time_transaction';
|
||||
|
||||
ALTER TABLE test_table_1610000 ENABLE ROW LEVEL SECURITY;
|
||||
|
@ -262,7 +262,7 @@ ALTER TABLE test_table_1610000 ENABLE ROW LEVEL SECURITY;
|
|||
CREATE POLICY hide_by_default ON test_table_1610000 TO PUBLIC
|
||||
USING (COALESCE(current_setting('app.show_rows', TRUE)::bool, FALSE));
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET ROLE rls_user;
|
||||
SET search_path = 'multi_real_time_transaction';
|
||||
|
||||
|
@ -303,14 +303,14 @@ SET ROLE rls_user;
|
|||
SET search_path = 'multi_real_time_transaction';
|
||||
SELECT * FROM co_test_table ORDER BY id, col_1;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path = 'multi_real_time_transaction';
|
||||
|
||||
-- shard 1610004 contains data from tenant id 1
|
||||
SELECT * FROM co_test_table_1610004 ORDER BY id, col_1;
|
||||
SELECT * FROM co_test_table_1610006 ORDER BY id, col_1;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SET search_path = 'multi_real_time_transaction';
|
||||
|
||||
-- shard 1610005 contains data from tenant id 3
|
||||
|
@ -318,7 +318,7 @@ SELECT * FROM co_test_table_1610005 ORDER BY id, col_1;
|
|||
-- shard 1610007 contains data from tenant id 2
|
||||
SELECT * FROM co_test_table_1610007 ORDER BY id, col_1;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path = 'multi_real_time_transaction';
|
||||
|
||||
-- Let's set up a policy on the coordinator and workers which filters the tenants.
|
||||
|
|
|
@ -878,16 +878,16 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
|||
relid = 'reference_schema.reference_index_2'::regclass;
|
||||
|
||||
-- also to the shard placements
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl_1250019'::regclass;
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'reference_schema.reference_index_2_1250019'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP INDEX reference_schema.reference_index_2;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='reference_schema.reference_table_ddl_1250019'::regclass;
|
||||
\di reference_schema.reference_index_2*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- now test the renaming of the table, and back to the expected name
|
||||
ALTER TABLE reference_schema.reference_table_ddl RENAME TO reference_table_ddl_test;
|
||||
|
|
|
@ -71,7 +71,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -82,7 +82,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
|
@ -103,7 +103,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -114,7 +114,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- remove same node twice
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
@ -148,7 +148,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -159,7 +159,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
BEGIN;
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
@ -182,7 +182,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -193,7 +193,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- remove node in a transaction and COMMIT
|
||||
|
||||
|
@ -214,7 +214,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -225,7 +225,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
BEGIN;
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
@ -248,7 +248,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -259,7 +259,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
@ -283,7 +283,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -294,7 +294,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
BEGIN;
|
||||
INSERT INTO remove_node_reference_table VALUES(1);
|
||||
|
@ -321,7 +321,7 @@ WHERE colocationid IN
|
|||
--verify the data is inserted
|
||||
SELECT * FROM remove_node_reference_table;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -334,7 +334,7 @@ WHERE
|
|||
|
||||
SELECT * FROM remove_node_reference_table;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
@ -359,7 +359,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -370,7 +370,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
BEGIN;
|
||||
ALTER TABLE remove_node_reference_table ADD column2 int;
|
||||
|
@ -394,7 +394,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -405,7 +405,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.next_shard_id TO 1380001;
|
||||
|
||||
|
@ -483,7 +483,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -495,7 +495,7 @@ WHERE
|
|||
nodeport = :worker_2_port
|
||||
ORDER BY
|
||||
shardid;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT master_remove_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
|
@ -516,7 +516,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -527,7 +527,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_add_node(:'worker_2_host', :worker_2_port);
|
||||
|
@ -554,7 +554,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -566,7 +566,7 @@ WHERE
|
|||
nodeport = :worker_2_port
|
||||
ORDER BY shardid ASC;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT master_disable_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
|
@ -587,7 +587,7 @@ WHERE colocationid IN
|
|||
FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'remove_node_reference_table'::regclass);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
|
||||
|
||||
|
@ -598,7 +598,7 @@ FROM
|
|||
WHERE
|
||||
nodeport = :worker_2_port;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- re-add the node for next tests
|
||||
SELECT 1 FROM master_activate_node(:'worker_2_host', :worker_2_port);
|
||||
|
|
|
@ -65,7 +65,7 @@ CREATE TABLE repartition_udt_other (
|
|||
-- proceed with type creation as above; thus the OIDs will be different.
|
||||
-- so that the OID is off.
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- START type creation
|
||||
-- ... as well as a function to use as its comparator...
|
||||
|
@ -109,7 +109,7 @@ FUNCTION 1 test_udt_hash(test_udt);
|
|||
|
||||
-- END type creation
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
|
||||
-- START type creation
|
||||
-- ... as well as a function to use as its comparator...
|
||||
|
@ -155,7 +155,7 @@ FUNCTION 1 test_udt_hash(test_udt);
|
|||
|
||||
-- Connect to master
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- Distribute and populate the two tables.
|
||||
SET citus.shard_count TO 3;
|
||||
|
|
|
@ -7,7 +7,7 @@ SET citus.next_shard_id TO 830000;
|
|||
|
||||
|
||||
-- Create UDF in master and workers
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP FUNCTION IF EXISTS median(double precision[]);
|
||||
|
||||
CREATE FUNCTION median(double precision[]) RETURNS double precision
|
||||
|
@ -18,7 +18,7 @@ LANGUAGE sql IMMUTABLE AS $_$
|
|||
OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub;
|
||||
$_$;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
DROP FUNCTION IF EXISTS median(double precision[]);
|
||||
|
||||
CREATE FUNCTION median(double precision[]) RETURNS double precision
|
||||
|
@ -29,7 +29,7 @@ LANGUAGE sql IMMUTABLE AS $_$
|
|||
OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub;
|
||||
$_$;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
DROP FUNCTION IF EXISTS median(double precision[]);
|
||||
|
||||
CREATE FUNCTION median(double precision[]) RETURNS double precision
|
||||
|
@ -41,7 +41,7 @@ LANGUAGE sql IMMUTABLE AS $_$
|
|||
$_$;
|
||||
|
||||
-- Run query on master
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
|
||||
|
|
|
@ -1112,7 +1112,7 @@ SELECT master_create_worker_shards('failure_test', 2);
|
|||
SET citus.enable_ddl_propagation TO off;
|
||||
CREATE USER router_user;
|
||||
GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE USER router_user;
|
||||
GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user;
|
||||
\c - router_user - :master_port
|
||||
|
@ -1137,7 +1137,7 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement
|
|||
\c - postgres - :worker_1_port
|
||||
DROP OWNED BY router_user;
|
||||
DROP USER router_user;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP OWNED BY router_user;
|
||||
DROP USER router_user;
|
||||
DROP TABLE failure_test;
|
||||
|
|
|
@ -171,7 +171,7 @@ $$
|
|||
LANGUAGE 'plpgsql' IMMUTABLE;
|
||||
|
||||
-- create UDF in worker node 1
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE OR REPLACE FUNCTION dummyFunction(theValue integer)
|
||||
RETURNS text AS
|
||||
$$
|
||||
|
@ -184,7 +184,7 @@ $$
|
|||
LANGUAGE 'plpgsql' IMMUTABLE;
|
||||
|
||||
-- create UDF in worker node 2
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
CREATE OR REPLACE FUNCTION dummyFunction(theValue integer)
|
||||
RETURNS text AS
|
||||
$$
|
||||
|
@ -196,7 +196,7 @@ END;
|
|||
$$
|
||||
LANGUAGE 'plpgsql' IMMUTABLE;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- UDF in public, table in a schema other than public, search_path is not set
|
||||
SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1;
|
||||
|
@ -219,7 +219,7 @@ $$
|
|||
LANGUAGE 'plpgsql' IMMUTABLE;
|
||||
|
||||
-- create UDF in worker node 1 in schema
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO test_schema_support;
|
||||
CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer)
|
||||
RETURNS text AS
|
||||
|
@ -233,7 +233,7 @@ $$
|
|||
LANGUAGE 'plpgsql' IMMUTABLE;
|
||||
|
||||
-- create UDF in worker node 2 in schema
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SET search_path TO test_schema_support;
|
||||
CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer)
|
||||
RETURNS text AS
|
||||
|
@ -246,7 +246,7 @@ END;
|
|||
$$
|
||||
LANGUAGE 'plpgsql' IMMUTABLE;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- UDF in schema, table in a schema other than public, search_path is not set
|
||||
SET search_path TO public;
|
||||
|
@ -271,7 +271,7 @@ CREATE OPERATOR test_schema_support.=== (
|
|||
);
|
||||
|
||||
-- create operator in worker node 1
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE OPERATOR test_schema_support.=== (
|
||||
LEFTARG = int,
|
||||
RIGHTARG = int,
|
||||
|
@ -282,7 +282,7 @@ CREATE OPERATOR test_schema_support.=== (
|
|||
);
|
||||
|
||||
-- create operator in worker node 2
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
CREATE OPERATOR test_schema_support.=== (
|
||||
LEFTARG = int,
|
||||
RIGHTARG = int,
|
||||
|
@ -292,7 +292,7 @@ CREATE OPERATOR test_schema_support.=== (
|
|||
HASHES, MERGES
|
||||
);
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- test with search_path is not set
|
||||
SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey OPERATOR(test_schema_support.===) 1;
|
||||
|
@ -322,7 +322,7 @@ SET search_path TO public;
|
|||
SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset
|
||||
CREATE COLLATION test_schema_support.english (LOCALE = :current_locale);
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
CREATE TABLE test_schema_support.nation_hash_collation(
|
||||
n_nationkey integer not null,
|
||||
|
@ -406,18 +406,18 @@ ALTER TABLE test_schema_support.nation_hash ADD COLUMN new_col INT;
|
|||
|
||||
-- verify column is added
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS non_existent_column;
|
||||
ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col;
|
||||
|
||||
-- verify column is dropped
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
--test with search_path is set
|
||||
SET search_path TO test_schema_support;
|
||||
|
@ -425,9 +425,9 @@ ALTER TABLE nation_hash ADD COLUMN new_col INT;
|
|||
|
||||
-- verify column is added
|
||||
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET search_path TO test_schema_support;
|
||||
ALTER TABLE nation_hash DROP COLUMN IF EXISTS non_existent_column;
|
||||
|
@ -435,9 +435,9 @@ ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col;
|
|||
|
||||
-- verify column is dropped
|
||||
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test CREATE/DROP INDEX with schemas
|
||||
|
@ -449,19 +449,19 @@ CREATE INDEX index1 ON test_schema_support.nation_hash(n_name);
|
|||
--verify INDEX is created
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'test_schema_support.index1'::regclass;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'test_schema_support.index1_1190003'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- DROP index
|
||||
DROP INDEX test_schema_support.index1;
|
||||
|
||||
--verify INDEX is dropped
|
||||
\d test_schema_support.index1
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\d test_schema_support.index1_1190003
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
--test with search_path is set
|
||||
SET search_path TO test_schema_support;
|
||||
|
@ -472,10 +472,10 @@ CREATE INDEX index1 ON nation_hash(n_name);
|
|||
--verify INDEX is created
|
||||
SELECT "Column", "Type", "Definition" FROM public.index_attrs WHERE
|
||||
relid = 'test_schema_support.index1'::regclass;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
|
||||
relid = 'test_schema_support.index1_1190003'::regclass;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- DROP index
|
||||
SET search_path TO test_schema_support;
|
||||
|
@ -483,9 +483,9 @@ DROP INDEX index1;
|
|||
|
||||
--verify INDEX is dropped
|
||||
\d test_schema_support.index1
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\d test_schema_support.index1_1190003
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test master_copy_shard_placement with schemas
|
||||
|
@ -515,10 +515,10 @@ SET search_path TO public;
|
|||
SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_append') ;
|
||||
|
||||
-- verify shard is dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\d test_schema_support.nation_append_119*
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- test with search_path is set
|
||||
SET search_path TO test_schema_support;
|
||||
|
@ -535,10 +535,10 @@ SET search_path TO test_schema_support;
|
|||
SELECT master_apply_delete_command('DELETE FROM nation_append') ;
|
||||
|
||||
-- verify shard is dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\d test_schema_support.nation_append_119*
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- check joins of tables which are in schemas other than public
|
||||
-- we create new tables with replication factor of 1
|
||||
|
@ -731,26 +731,26 @@ CREATE SCHEMA new_schema;
|
|||
SELECT objid::oid::regnamespace as "Distributed Schemas"
|
||||
FROM citus.pg_dist_object
|
||||
WHERE objid::oid::regnamespace IN ('old_schema', 'new_schema');
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT table_schema AS "Shards' Schema"
|
||||
FROM information_schema.tables
|
||||
WHERE table_name LIKE 'table\_set\_schema\_%' AND
|
||||
table_schema IN ('old_schema', 'new_schema', 'public')
|
||||
GROUP BY table_schema;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
ALTER TABLE old_schema.table_set_schema SET SCHEMA new_schema;
|
||||
|
||||
SELECT objid::oid::regnamespace as "Distributed Schemas"
|
||||
FROM citus.pg_dist_object
|
||||
WHERE objid::oid::regnamespace IN ('old_schema', 'new_schema');
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT table_schema AS "Shards' Schema"
|
||||
FROM information_schema.tables
|
||||
WHERE table_name LIKE 'table\_set\_schema\_%' AND
|
||||
table_schema IN ('old_schema', 'new_schema', 'public')
|
||||
GROUP BY table_schema;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT * FROM new_schema.table_set_schema;
|
||||
|
||||
DROP SCHEMA old_schema CASCADE;
|
||||
|
@ -765,26 +765,26 @@ CREATE SCHEMA new_schema;
|
|||
SELECT objid::oid::regnamespace as "Distributed Schemas"
|
||||
FROM citus.pg_dist_object
|
||||
WHERE objid='new_schema'::regnamespace::oid;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT table_schema AS "Shards' Schema"
|
||||
FROM information_schema.tables
|
||||
WHERE table_name LIKE 'table\_set\_schema\_%' AND
|
||||
table_schema IN ('new_schema', 'public')
|
||||
GROUP BY table_schema;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
ALTER TABLE table_set_schema SET SCHEMA new_schema;
|
||||
|
||||
SELECT objid::oid::regnamespace as "Distributed Schemas"
|
||||
FROM citus.pg_dist_object
|
||||
WHERE objid='new_schema'::regnamespace::oid;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT table_schema AS "Shards' Schema"
|
||||
FROM information_schema.tables
|
||||
WHERE table_name LIKE 'table\_set\_schema\_%' AND
|
||||
table_schema IN ('new_schema', 'public')
|
||||
GROUP BY table_schema;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT * FROM new_schema.table_set_schema;
|
||||
|
||||
DROP SCHEMA new_schema CASCADE;
|
||||
|
@ -801,13 +801,13 @@ CREATE SCHEMA new_schema;
|
|||
SELECT objid::oid::regnamespace as "Distributed Schemas"
|
||||
FROM citus.pg_dist_object
|
||||
WHERE objid::oid::regnamespace IN ('old_schema', 'new_schema');
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT table_schema AS "Shards' Schema", COUNT(*) AS "Counts"
|
||||
FROM information_schema.tables
|
||||
WHERE table_name LIKE 'table\_set\_schema\_%' AND
|
||||
table_schema IN ('old_schema', 'new_schema', 'public')
|
||||
GROUP BY table_schema;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SET search_path TO old_schema;
|
||||
ALTER TABLE table_set_schema SET SCHEMA new_schema;
|
||||
|
@ -815,13 +815,13 @@ ALTER TABLE table_set_schema SET SCHEMA new_schema;
|
|||
SELECT objid::oid::regnamespace as "Distributed Schemas"
|
||||
FROM citus.pg_dist_object
|
||||
WHERE objid::oid::regnamespace IN ('old_schema', 'new_schema');
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT table_schema AS "Shards' Schema", COUNT(*) AS "Counts"
|
||||
FROM information_schema.tables
|
||||
WHERE table_name LIKE 'table\_set\_schema\_%' AND
|
||||
table_schema IN ('old_schema', 'new_schema', 'public')
|
||||
GROUP BY table_schema;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT * FROM new_schema.table_set_schema;
|
||||
|
||||
SET search_path to public;
|
||||
|
@ -856,12 +856,12 @@ SELECT create_distributed_table('"cItuS.T E E N''sSchema"."be$t''''t*ble"', 'id'
|
|||
|
||||
ALTER TABLE "cItuS.T E E N'sSchema"."be$t''t*ble" SET SCHEMA "citus-teen's scnd schm.";
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT table_schema AS "Shards' Schema"
|
||||
FROM information_schema.tables
|
||||
WHERE table_name LIKE 'be$t''''t*ble%'
|
||||
GROUP BY table_schema;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
SELECT * FROM "citus-teen's scnd schm."."be$t''t*ble";
|
||||
|
||||
|
@ -877,10 +877,10 @@ CREATE TABLE schema_with_user.test_table(column1 int);
|
|||
SELECT create_reference_table('schema_with_user.test_table');
|
||||
|
||||
-- verify that owner of the created schema is test-user
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dn schema_with_user
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- we do not use run_command_on_coordinator_and_workers here because when there is CASCADE, it causes deadlock
|
||||
DROP OWNED BY "test-user" CASCADE;
|
||||
|
|
|
@ -83,9 +83,9 @@ ALTER SEQUENCE standalone_sequence OWNED BY testserialtable.group_id;
|
|||
ALTER SEQUENCE testserialtable_id_seq OWNED BY testserialtable.id;
|
||||
|
||||
-- drop distributed table
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE testserialtable;
|
||||
|
||||
-- verify owned sequence is dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\ds
|
||||
|
|
|
@ -20,7 +20,7 @@ SELECT pg_reload_conf();
|
|||
SELECT recover_prepared_transactions();
|
||||
|
||||
-- Create some "fake" prepared transactions to recover
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
BEGIN;
|
||||
CREATE TABLE should_abort (value int);
|
||||
|
@ -34,7 +34,7 @@ BEGIN;
|
|||
CREATE TABLE should_be_sorted_into_middle (value int);
|
||||
PREPARE TRANSACTION 'citus_0_should_be_sorted_into_middle';
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
BEGIN;
|
||||
CREATE TABLE should_abort (value int);
|
||||
|
@ -62,11 +62,11 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort';
|
|||
SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit';
|
||||
|
||||
-- Confirm that transactions were correctly rolled forward
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort';
|
||||
SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit';
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET citus.force_max_query_parallelization TO ON;
|
||||
SET citus.shard_replication_factor TO 2;
|
||||
SET citus.shard_count TO 2;
|
||||
|
|
|
@ -31,9 +31,9 @@ ORDER BY
|
|||
\dt transactional_drop_shards
|
||||
|
||||
-- verify shards are not dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_shards_*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT
|
||||
|
@ -56,9 +56,9 @@ ORDER BY
|
|||
\dt transactional_drop_shards
|
||||
|
||||
-- verify shards are dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_shards_*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test master_delete_protocol in transaction, then ROLLBACK
|
||||
|
@ -82,9 +82,9 @@ ORDER BY
|
|||
shardid, nodename, nodeport;
|
||||
|
||||
-- verify shards are not dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_shards_*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test master_delete_protocol in transaction, then COMMIT
|
||||
|
@ -104,9 +104,9 @@ ORDER BY
|
|||
shardid, nodename, nodeport;
|
||||
|
||||
-- verify shards are dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_shards_*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test DROP table in a transaction after insertion
|
||||
|
@ -132,9 +132,9 @@ ORDER BY
|
|||
\dt transactional_drop_shards
|
||||
|
||||
-- verify shards are not dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_shards_*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test master_apply_delete_command in a transaction after insertion
|
||||
|
@ -155,7 +155,7 @@ ORDER BY
|
|||
shardid, nodename, nodeport;
|
||||
|
||||
-- verify shards are not dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_shards_*
|
||||
|
||||
|
||||
|
@ -168,7 +168,7 @@ $fdt$ LANGUAGE plpgsql;
|
|||
|
||||
CREATE EVENT TRIGGER fail_drop_table ON sql_drop EXECUTE PROCEDURE fail_drop_table();
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
\set VERBOSITY terse
|
||||
DROP TABLE transactional_drop_shards;
|
||||
|
@ -189,9 +189,9 @@ ORDER BY
|
|||
\dt transactional_drop_shards
|
||||
|
||||
-- verify shards are not dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_shards_*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test DROP reference table with failing worker
|
||||
|
@ -217,9 +217,9 @@ ORDER BY
|
|||
\dt transactional_drop_reference
|
||||
|
||||
-- verify shards are not dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_reference*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test master_apply_delete_command table with failing worker
|
||||
|
@ -239,10 +239,10 @@ ORDER BY
|
|||
shardid, nodename, nodeport;
|
||||
|
||||
-- verify shards are not dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_shards_*
|
||||
DROP EVENT TRIGGER fail_drop_table;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test with SERIAL column + with more shards
|
||||
|
@ -270,10 +270,10 @@ ORDER BY
|
|||
\dt transactional_drop_serial
|
||||
|
||||
-- verify shards and sequence are not dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_serial_*
|
||||
\ds transactional_drop_serial_column2_seq
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT
|
||||
|
@ -296,10 +296,10 @@ ORDER BY
|
|||
\dt transactional_drop_serial
|
||||
|
||||
-- verify shards and sequence are dropped
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
\dt transactional_drop_serial_*
|
||||
\ds transactional_drop_serial_column2_seq
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
|
||||
-- test with MX, DROP TABLE, then ROLLBACK
|
||||
|
@ -315,7 +315,7 @@ UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='transactional_drop
|
|||
SELECT start_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
|
||||
-- see metadata is propogated to the worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid;
|
||||
SELECT
|
||||
shardid, shardstate, nodename, nodeport
|
||||
|
@ -326,13 +326,13 @@ WHERE
|
|||
ORDER BY
|
||||
shardid, nodename, nodeport;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
BEGIN;
|
||||
DROP TABLE transactional_drop_mx;
|
||||
ROLLBACK;
|
||||
|
||||
-- verify metadata is not deleted
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid;
|
||||
SELECT
|
||||
shardid, shardstate, nodename, nodeport
|
||||
|
@ -344,13 +344,13 @@ ORDER BY
|
|||
shardid, nodename, nodeport;
|
||||
|
||||
-- test with MX, DROP TABLE, then COMMIT
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
BEGIN;
|
||||
DROP TABLE transactional_drop_mx;
|
||||
COMMIT;
|
||||
|
||||
-- verify metadata is deleted
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410015, 1410016, 1410017, 1410018) ORDER BY shardid;
|
||||
SELECT
|
||||
shardid, shardstate, nodename, nodeport
|
||||
|
@ -361,7 +361,7 @@ WHERE
|
|||
ORDER BY
|
||||
shardid, nodename, nodeport;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
-- try using the coordinator as a worker and then dropping the table
|
||||
SELECT 1 FROM master_add_node('localhost', :master_port);
|
||||
|
|
|
@ -51,7 +51,7 @@ INSERT INTO mx_ref_table VALUES (-34, 'augue');
|
|||
SELECT * FROM mx_table ORDER BY col_1;
|
||||
|
||||
-- Try commands from metadata worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
CREATE TABLE mx_table_worker(col_1 text);
|
||||
|
||||
|
@ -79,10 +79,10 @@ SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass;
|
|||
INSERT INTO pg_dist_shard SELECT * FROM pg_dist_shard_temp;
|
||||
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE mx_ref_table;
|
||||
CREATE UNIQUE INDEX mx_test_uniq_index ON mx_table(col_1);
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- changing isdatanode
|
||||
SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', false);
|
||||
|
@ -110,18 +110,18 @@ SELECT 1 FROM master_add_inactive_node('localhost', 5432);
|
|||
SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432;
|
||||
|
||||
-- master_remove_node
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP INDEX mx_test_uniq_index;
|
||||
SELECT 1 FROM master_add_inactive_node('localhost', 5432);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT master_remove_node('localhost', 5432);
|
||||
SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT master_remove_node('localhost', 5432);
|
||||
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- mark_tables_colocated
|
||||
UPDATE pg_dist_partition SET colocationid = 0 WHERE logicalrelid='mx_table_2'::regclass;
|
||||
|
@ -142,20 +142,20 @@ SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
|||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||
|
||||
-- stop_metadata_sync_to_node
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT start_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||
SELECT stop_metadata_sync_to_node(:'worker_2_host', :worker_2_port);
|
||||
SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition;
|
||||
DELETE FROM pg_dist_node;
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
|
||||
-- DROP TABLE
|
||||
-- terse verbosity because pg10 has slightly different output
|
||||
|
@ -209,14 +209,14 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_tabl
|
|||
ROLLBACK;
|
||||
|
||||
-- Cleanup
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE mx_table;
|
||||
DROP TABLE mx_table_2;
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
DELETE FROM pg_dist_node;
|
||||
SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition;
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
|
||||
|
||||
RESET citus.shard_replication_factor;
|
||||
|
|
|
@ -533,9 +533,9 @@ GROUP BY shardid
|
|||
ORDER BY shardid;
|
||||
|
||||
-- verify that shard is replicated to other worker
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
\dt upgrade_reference_table_transaction_commit_*
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
|
||||
DROP TABLE upgrade_reference_table_transaction_commit;
|
||||
|
||||
|
@ -697,7 +697,7 @@ GROUP BY shardid
|
|||
ORDER BY shardid;
|
||||
|
||||
-- situation on metadata worker
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT
|
||||
partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel
|
||||
FROM
|
||||
|
@ -722,7 +722,7 @@ WHERE shardid IN
|
|||
GROUP BY shardid
|
||||
ORDER BY shardid;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
DROP TABLE upgrade_reference_table_mx;
|
||||
SELECT stop_metadata_sync_to_node(:'worker_1_host', :worker_1_port);
|
||||
RESET client_min_messages;
|
||||
|
|
|
@ -133,7 +133,7 @@ SELECT master_create_worker_shards('second_dustbunnies', 1, 2);
|
|||
-- following approach adapted from PostgreSQL's stats.sql file
|
||||
|
||||
-- save relevant stat counter values in refreshable view
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
CREATE MATERIALIZED VIEW prevcounts AS
|
||||
SELECT analyze_count, vacuum_count FROM pg_stat_user_tables
|
||||
WHERE relname='dustbunnies_990002';
|
||||
|
@ -174,7 +174,7 @@ begin
|
|||
end
|
||||
$$ language plpgsql;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
CREATE MATERIALIZED VIEW prevcounts AS
|
||||
SELECT analyze_count, vacuum_count FROM pg_stat_user_tables
|
||||
WHERE relname='dustbunnies_990001';
|
||||
|
@ -215,12 +215,12 @@ end
|
|||
$$ language plpgsql;
|
||||
|
||||
-- run VACUUM and ANALYZE against the table on the master
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
VACUUM dustbunnies;
|
||||
ANALYZE dustbunnies;
|
||||
|
||||
-- verify that the VACUUM and ANALYZE ran
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT wait_for_stats();
|
||||
REFRESH MATERIALIZED VIEW prevcounts;
|
||||
SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass);
|
||||
|
@ -231,12 +231,12 @@ SELECT relfilenode AS oldnode FROM pg_class WHERE oid='dustbunnies_990002'::regc
|
|||
\gset
|
||||
|
||||
-- send a VACUUM FULL and a VACUUM ANALYZE
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
VACUUM (FULL) dustbunnies;
|
||||
VACUUM ANALYZE dustbunnies;
|
||||
|
||||
-- verify that relfilenode changed
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relfilenode != :oldnode AS table_rewritten FROM pg_class
|
||||
WHERE oid='dustbunnies_990002'::regclass;
|
||||
|
||||
|
@ -251,12 +251,12 @@ SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid='dustbunnies_990002'::r
|
|||
\gset
|
||||
|
||||
-- send a VACUUM FREEZE after adding a new row
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
INSERT INTO dustbunnies VALUES (5, 'peter');
|
||||
VACUUM (FREEZE) dustbunnies;
|
||||
|
||||
-- verify that relfrozenxid increased
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
|
||||
WHERE oid='dustbunnies_990002'::regclass;
|
||||
|
||||
|
@ -265,16 +265,16 @@ SELECT attname, null_frac FROM pg_stats
|
|||
WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
|
||||
|
||||
-- add NULL values, then perform column-specific ANALYZE
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
INSERT INTO dustbunnies VALUES (6, NULL, NULL);
|
||||
ANALYZE dustbunnies (name);
|
||||
|
||||
-- verify that name's NULL ratio is updated but age's is not
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SELECT attname, null_frac FROM pg_stats
|
||||
WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
-- verify warning for unqualified VACUUM
|
||||
VACUUM;
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ WHERE
|
|||
AND second_distributed_table.dept IN (2);
|
||||
|
||||
-- run some queries from worker nodes
|
||||
\c - - - :worker_1_port
|
||||
\c - - :real_worker_1_host :worker_1_port
|
||||
SET search_path TO recursive_dml_queries_mx, public;
|
||||
|
||||
-- the subquery foo is recursively planned
|
||||
|
@ -99,7 +99,7 @@ WHERE
|
|||
AND second_distributed_table.dept IN (3);
|
||||
|
||||
-- use the second worker
|
||||
\c - - - :worker_2_port
|
||||
\c - - :real_worker_2_host :worker_2_port
|
||||
SET search_path TO recursive_dml_queries_mx, public;
|
||||
|
||||
CREATE TABLE recursive_dml_queries_mx.local_table (id text, name text);
|
||||
|
@ -150,7 +150,7 @@ RETURNING
|
|||
|
||||
DROP TABLE local_table;
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - :real_master_host :master_port
|
||||
SET search_path TO recursive_dml_queries_mx, public;
|
||||
|
||||
RESET client_min_messages;
|
||||
|
|
Loading…
Reference in New Issue