mirror of https://github.com/citusdata/citus.git
590 lines
25 KiB
Plaintext
590 lines
25 KiB
Plaintext
SET citus.next_shard_id TO 990000;
|
|
-- ===================================================================
|
|
-- test utility statement functionality
|
|
-- ===================================================================
|
|
SET citus.shard_count TO 2;
|
|
SET citus.shard_replication_factor TO 1;
|
|
CREATE TABLE sharded_table ( name text, id bigint );
|
|
SELECT create_distributed_table('sharded_table', 'id', 'hash');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- COPY out is supported with distributed tables
|
|
COPY sharded_table TO STDOUT;
|
|
COPY (SELECT COUNT(*) FROM sharded_table) TO STDOUT;
|
|
0
|
|
BEGIN;
|
|
SET TRANSACTION READ ONLY;
|
|
COPY sharded_table TO STDOUT;
|
|
COPY (SELECT COUNT(*) FROM sharded_table) TO STDOUT;
|
|
0
|
|
COMMIT;
|
|
-- ANALYZE is supported in a transaction block
|
|
BEGIN;
|
|
ANALYZE sharded_table;
|
|
ANALYZE sharded_table;
|
|
END;
|
|
-- cursors may not involve distributed tables
|
|
DECLARE all_sharded_rows CURSOR FOR SELECT * FROM sharded_table;
|
|
ERROR: DECLARE CURSOR can only be used in transaction blocks
|
|
-- verify PREPARE functionality
|
|
PREPARE sharded_insert AS INSERT INTO sharded_table VALUES ('adam', 1);
|
|
PREPARE sharded_update AS UPDATE sharded_table SET name = 'bob' WHERE id = 1;
|
|
PREPARE sharded_delete AS DELETE FROM sharded_table WHERE id = 1;
|
|
PREPARE sharded_query AS SELECT name FROM sharded_table WHERE id = 1;
|
|
EXECUTE sharded_query;
|
|
name
|
|
---------------------------------------------------------------------
|
|
(0 rows)
|
|
|
|
EXECUTE sharded_insert;
|
|
EXECUTE sharded_query;
|
|
name
|
|
---------------------------------------------------------------------
|
|
adam
|
|
(1 row)
|
|
|
|
EXECUTE sharded_update;
|
|
EXECUTE sharded_query;
|
|
name
|
|
---------------------------------------------------------------------
|
|
bob
|
|
(1 row)
|
|
|
|
EXECUTE sharded_delete;
|
|
EXECUTE sharded_query;
|
|
name
|
|
---------------------------------------------------------------------
|
|
(0 rows)
|
|
|
|
-- drop all shards
|
|
SELECT citus_drop_all_shards('sharded_table','','');
|
|
citus_drop_all_shards
|
|
---------------------------------------------------------------------
|
|
2
|
|
(1 row)
|
|
|
|
SET citus.shard_count TO 4;
|
|
SET citus.next_shard_id TO 999001;
|
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1400000;
|
|
CREATE TABLE lockable_table ( name text, id bigint );
|
|
SELECT create_distributed_table('lockable_table', 'id', 'hash', colocate_with := 'none');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SET citus.shard_count TO 2;
|
|
SET citus.next_shard_id TO 990002;
|
|
-- lock shard metadata: take some share locks and exclusive locks
|
|
BEGIN;
|
|
SELECT lock_shard_metadata(5, ARRAY[999001, 999002, 999002]);
|
|
lock_shard_metadata
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT lock_shard_metadata(7, ARRAY[999001, 999003, 999004]);
|
|
lock_shard_metadata
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT
|
|
CASE
|
|
WHEN l.objsubid = 5 THEN 'shard'
|
|
WHEN l.objsubid = 4 THEN 'shard_metadata'
|
|
ELSE 'colocated_shards_metadata'
|
|
END AS locktype,
|
|
objid,
|
|
classid,
|
|
mode,
|
|
granted
|
|
FROM pg_locks l
|
|
WHERE l.locktype = 'advisory'
|
|
ORDER BY locktype, objid, classid, mode;
|
|
locktype | objid | classid | mode | granted
|
|
---------------------------------------------------------------------
|
|
colocated_shards_metadata | 1400000 | 0 | ExclusiveLock | t
|
|
colocated_shards_metadata | 1400000 | 0 | ShareLock | t
|
|
colocated_shards_metadata | 1400000 | 1 | ShareLock | t
|
|
colocated_shards_metadata | 1400000 | 2 | ExclusiveLock | t
|
|
colocated_shards_metadata | 1400000 | 3 | ExclusiveLock | t
|
|
(5 rows)
|
|
|
|
END;
|
|
-- lock shard metadata: unsupported lock type
|
|
SELECT lock_shard_metadata(0, ARRAY[990001, 999002]);
|
|
ERROR: unsupported lockmode 0
|
|
-- lock shard metadata: invalid shard ID
|
|
SELECT lock_shard_metadata(5, ARRAY[0]);
|
|
ERROR: could not find valid entry for shard xxxxx
|
|
-- lock shard metadata: lock nothing
|
|
SELECT lock_shard_metadata(5, ARRAY[]::bigint[]);
|
|
ERROR: no locks specified
|
|
-- lock shard resources: take some share locks and exclusive locks
|
|
BEGIN;
|
|
SELECT lock_shard_resources(5, ARRAY[999001, 999002, 999002]);
|
|
lock_shard_resources
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT lock_shard_resources(7, ARRAY[999001, 999003, 999004]);
|
|
lock_shard_resources
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT locktype, objid, mode, granted
|
|
FROM pg_locks
|
|
WHERE objid IN (999001, 999002, 999003, 999004)
|
|
ORDER BY objid, mode;
|
|
locktype | objid | mode | granted
|
|
---------------------------------------------------------------------
|
|
advisory | 999001 | ExclusiveLock | t
|
|
advisory | 999001 | ShareLock | t
|
|
advisory | 999002 | ShareLock | t
|
|
advisory | 999003 | ExclusiveLock | t
|
|
advisory | 999004 | ExclusiveLock | t
|
|
(5 rows)
|
|
|
|
END;
|
|
-- lock shard metadata: unsupported lock type
|
|
SELECT lock_shard_resources(0, ARRAY[990001, 999002]);
|
|
ERROR: unsupported lockmode 0
|
|
-- lock shard metadata: invalid shard ID
|
|
SELECT lock_shard_resources(5, ARRAY[-1]);
|
|
lock_shard_resources
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- lock shard metadata: lock nothing
|
|
SELECT lock_shard_resources(5, ARRAY[]::bigint[]);
|
|
ERROR: no locks specified
|
|
-- drop table
|
|
DROP TABLE sharded_table;
|
|
DROP TABLE lockable_table;
|
|
-- VACUUM tests
|
|
-- create a table with a single shard (for convenience)
|
|
SET citus.shard_count TO 1;
|
|
SET citus.shard_replication_factor TO 2;
|
|
CREATE TABLE dustbunnies (id integer, name text, age integer);
|
|
SELECT create_distributed_table('dustbunnies', 'id', 'hash');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- add some data to the distributed table
|
|
\copy dustbunnies (id, name) from stdin with csv
|
|
CREATE TABLE second_dustbunnies(id integer, name text, age integer);
|
|
SELECT master_create_distributed_table('second_dustbunnies', 'id', 'hash');
|
|
master_create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT master_create_worker_shards('second_dustbunnies', 1, 2);
|
|
master_create_worker_shards
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- run VACUUM and ANALYZE against the table on the master
|
|
\c - - :master_host :master_port
|
|
VACUUM dustbunnies;
|
|
ANALYZE dustbunnies;
|
|
-- send a VACUUM FULL and a VACUUM ANALYZE
|
|
VACUUM (FULL) dustbunnies;
|
|
VACUUM ANALYZE dustbunnies;
|
|
\c - - :public_worker_1_host :worker_1_port
|
|
-- disable auto-VACUUM for next test
|
|
ALTER TABLE dustbunnies_990002 SET (autovacuum_enabled = false);
|
|
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid='dustbunnies_990002'::regclass
|
|
\gset
|
|
-- send a VACUUM FREEZE after adding a new row
|
|
\c - - :master_host :master_port
|
|
INSERT INTO dustbunnies VALUES (5, 'peter');
|
|
VACUUM (FREEZE) dustbunnies;
|
|
-- verify that relfrozenxid increased
|
|
\c - - :public_worker_1_host :worker_1_port
|
|
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
|
|
WHERE oid='dustbunnies_990002'::regclass;
|
|
frozen_performed
|
|
---------------------------------------------------------------------
|
|
t
|
|
(1 row)
|
|
|
|
-- check there are no nulls in either column
|
|
SELECT attname, null_frac FROM pg_stats
|
|
WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
|
|
attname | null_frac
|
|
---------------------------------------------------------------------
|
|
age | 1
|
|
id | 0
|
|
name | 0
|
|
(3 rows)
|
|
|
|
-- add NULL values, then perform column-specific ANALYZE
|
|
\c - - :master_host :master_port
|
|
INSERT INTO dustbunnies VALUES (6, NULL, NULL);
|
|
ANALYZE dustbunnies (name);
|
|
-- verify that name's NULL ratio is updated but age's is not
|
|
\c - - :public_worker_1_host :worker_1_port
|
|
SELECT attname, null_frac FROM pg_stats
|
|
WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
|
|
attname | null_frac
|
|
---------------------------------------------------------------------
|
|
age | 1
|
|
id | 0
|
|
name | 0.166667
|
|
(3 rows)
|
|
|
|
\c - - :master_host :master_port
|
|
SET citus.log_remote_commands TO ON;
|
|
-- check for multiple table vacuum
|
|
VACUUM dustbunnies, second_dustbunnies;
|
|
NOTICE: issuing VACUUM public.dustbunnies_990002
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM public.dustbunnies_990002
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM public.second_dustbunnies_990003
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM public.second_dustbunnies_990003
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- and do not propagate when using targeted VACUUM without DDL propagation
|
|
SET citus.enable_ddl_propagation to false;
|
|
VACUUM dustbunnies;
|
|
ANALYZE dustbunnies;
|
|
SET citus.enable_ddl_propagation to DEFAULT;
|
|
-- test worker_hash
|
|
SELECT worker_hash(123);
|
|
worker_hash
|
|
---------------------------------------------------------------------
|
|
-205084363
|
|
(1 row)
|
|
|
|
SELECT worker_hash('1997-08-08'::date);
|
|
worker_hash
|
|
---------------------------------------------------------------------
|
|
-499701663
|
|
(1 row)
|
|
|
|
-- test a custom type (this test should run after multi_data_types)
|
|
SELECT worker_hash('(1, 2)');
|
|
ERROR: cannot find a hash function for the input type
|
|
HINT: Cast input to a data type with a hash function.
|
|
SELECT worker_hash('(1, 2)'::test_composite_type);
|
|
worker_hash
|
|
---------------------------------------------------------------------
|
|
-1895345704
|
|
(1 row)
|
|
|
|
SELECT citus_truncate_trigger();
|
|
ERROR: must be called as trigger
|
|
-- make sure worker_create_or_alter_role does not crash with NULL input
|
|
SELECT worker_create_or_alter_role(NULL, NULL, NULL);
|
|
ERROR: role name cannot be NULL
|
|
SELECT worker_create_or_alter_role(NULL, 'create role dontcrash', NULL);
|
|
ERROR: role name cannot be NULL
|
|
-- confirm that citus_create_restore_point works
|
|
SELECT 1 FROM citus_create_restore_point('regression-test');
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SELECT pg_catalog.pg_create_restore_point($1::text)
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SELECT pg_catalog.pg_create_restore_point($1::text)
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
?column?
|
|
---------------------------------------------------------------------
|
|
1
|
|
(1 row)
|
|
|
|
SET citus.shard_count TO 1;
|
|
SET citus.shard_replication_factor TO 1;
|
|
SET citus.next_shard_id TO 970000;
|
|
SET citus.log_remote_commands TO OFF;
|
|
CREATE TABLE local_vacuum_table(id int primary key, b text);
|
|
CREATE TABLE reference_vacuum_table(id int);
|
|
SELECT create_reference_table('reference_vacuum_table');
|
|
create_reference_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE TABLE distributed_vacuum_table(id int);
|
|
SELECT create_distributed_table('distributed_vacuum_table', 'id');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SET citus.log_remote_commands TO ON;
|
|
-- should propagate to all workers because no table is specified
|
|
VACUUM;
|
|
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- should not propagate because no distributed table is specified
|
|
insert into local_vacuum_table select i from generate_series(1,1000000) i;
|
|
delete from local_vacuum_table;
|
|
VACUUM local_vacuum_table;
|
|
SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') );
|
|
pg_size_pretty
|
|
---------------------------------------------------------------------
|
|
21 MB
|
|
(1 row)
|
|
|
|
-- vacuum full deallocates pages of dead tuples whereas normal vacuum only marks dead tuples on visibility map
|
|
VACUUM FULL local_vacuum_table;
|
|
SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') );
|
|
pg_size_pretty
|
|
---------------------------------------------------------------------
|
|
16 kB
|
|
(1 row)
|
|
|
|
-- should propagate to all workers because table is reference table
|
|
VACUUM reference_vacuum_table;
|
|
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- should propagate to all workers because table is distributed table
|
|
VACUUM distributed_vacuum_table;
|
|
NOTICE: issuing VACUUM public.distributed_vacuum_table_970001
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- only distributed_vacuum_table and reference_vacuum_table should propagate
|
|
VACUUM distributed_vacuum_table, local_vacuum_table, reference_vacuum_table;
|
|
NOTICE: issuing VACUUM public.distributed_vacuum_table_970001
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- only reference_vacuum_table should propagate
|
|
VACUUM local_vacuum_table, reference_vacuum_table;
|
|
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- vacuum (disable_page_skipping) aggressively process pages of the relation, it does not respect visibility map
|
|
VACUUM (DISABLE_PAGE_SKIPPING true) local_vacuum_table;
|
|
VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table;
|
|
-- vacuum (index_cleanup on, parallel 1) should execute index vacuuming and index cleanup phases in parallel
|
|
insert into local_vacuum_table select i from generate_series(1,1000000) i;
|
|
delete from local_vacuum_table;
|
|
VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table;
|
|
SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') );
|
|
pg_size_pretty
|
|
---------------------------------------------------------------------
|
|
56 MB
|
|
(1 row)
|
|
|
|
insert into local_vacuum_table select i from generate_series(1,1000000) i;
|
|
delete from local_vacuum_table;
|
|
VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table;
|
|
SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') );
|
|
pg_size_pretty
|
|
---------------------------------------------------------------------
|
|
21 MB
|
|
(1 row)
|
|
|
|
----------------- PROCESS_TOAST is only available for pg14
|
|
-- vacuum (process_toast false) should not be vacuuming toast tables (default is true)
|
|
--select reltoastrelid from pg_class where relname='local_vacuum_table'
|
|
--\gset
|
|
--SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
|
|
--\gset
|
|
--VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table;
|
|
--SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
|
|
--WHERE oid=:reltoastrelid::regclass;
|
|
--SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
|
|
--\gset
|
|
--VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table;
|
|
--SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class
|
|
--WHERE oid=:reltoastrelid::regclass;
|
|
---------------------------------------------------------------------
|
|
-- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true)
|
|
insert into local_vacuum_table select i from generate_series(1,1000000) i;
|
|
delete from local_vacuum_table;
|
|
vacuum (TRUNCATE false) local_vacuum_table;
|
|
SELECT pg_total_relation_size('local_vacuum_table') as size1 \gset
|
|
insert into local_vacuum_table select i from generate_series(1,1000000) i;
|
|
delete from local_vacuum_table;
|
|
vacuum (TRUNCATE true) local_vacuum_table;
|
|
SELECT pg_total_relation_size('local_vacuum_table') as size2 \gset
|
|
SELECT :size1 > :size2 as truncate_less_size;
|
|
truncate_less_size
|
|
---------------------------------------------------------------------
|
|
t
|
|
(1 row)
|
|
|
|
-- vacuum (analyze) should be analyzing the table to generate statistics after vacuuming
|
|
select analyze_count from pg_stat_all_tables where relname = 'local_vacuum_table' or relname = 'reference_vacuum_table';
|
|
analyze_count
|
|
---------------------------------------------------------------------
|
|
0
|
|
0
|
|
(2 rows)
|
|
|
|
vacuum (analyze) local_vacuum_table, reference_vacuum_table;
|
|
NOTICE: issuing VACUUM (ANALYZE) public.reference_vacuum_table_970000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM (ANALYZE) public.reference_vacuum_table_970000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- give enough time for stats to be updated.(updated per 500ms by default)
|
|
select pg_sleep(1);
|
|
pg_sleep
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
select analyze_count from pg_stat_all_tables where relname = 'local_vacuum_table' or relname = 'reference_vacuum_table';
|
|
analyze_count
|
|
---------------------------------------------------------------------
|
|
1
|
|
1
|
|
(2 rows)
|
|
|
|
-- should not propagate because ddl propagation is disabled
|
|
SET citus.enable_ddl_propagation TO OFF;
|
|
VACUUM distributed_vacuum_table;
|
|
SET citus.enable_ddl_propagation TO ON;
|
|
SET citus.log_remote_commands TO OFF;
|
|
-- ANALYZE tests
|
|
CREATE TABLE local_analyze_table(id int);
|
|
CREATE TABLE reference_analyze_table(id int);
|
|
SELECT create_reference_table('reference_analyze_table');
|
|
create_reference_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE TABLE distributed_analyze_table(id int);
|
|
SELECT create_distributed_table('distributed_analyze_table', 'id');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE TABLE loc (a INT, b INT);
|
|
CREATE TABLE dist (a INT);
|
|
SELECT create_distributed_table ('dist', 'a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SET citus.log_remote_commands TO ON;
|
|
-- should propagate to all workers because no table is specified
|
|
ANALYZE;
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing ANALYZE
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing ANALYZE
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- should not propagate because no distributed table is specified
|
|
ANALYZE local_analyze_table;
|
|
-- should propagate to all workers because table is reference table
|
|
ANALYZE reference_analyze_table;
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- should propagate to all workers because table is distributed table
|
|
ANALYZE distributed_analyze_table;
|
|
NOTICE: issuing ANALYZE public.distributed_analyze_table_970003
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- only distributed_analyze_table and reference_analyze_table should propagate
|
|
ANALYZE distributed_analyze_table, local_analyze_table, reference_analyze_table;
|
|
NOTICE: issuing ANALYZE public.distributed_analyze_table_970003
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- only reference_analyze_table should propagate
|
|
ANALYZE local_analyze_table, reference_analyze_table;
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- should not propagate because ddl propagation is disabled
|
|
SET citus.enable_ddl_propagation TO OFF;
|
|
ANALYZE distributed_analyze_table;
|
|
SET citus.enable_ddl_propagation TO ON;
|
|
-- analyze only specified columns for corresponding tables
|
|
ANALYZE loc(b), dist(a);
|
|
NOTICE: issuing ANALYZE public.dist_970004 (a)
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|