mirror of https://github.com/citusdata/citus.git
649 lines
27 KiB
Plaintext
649 lines
27 KiB
Plaintext
SHOW server_version \gset
|
|
SELECT substring(:'server_version', '\d+')::int > 13 AS server_version_above_thirteen
|
|
\gset
|
|
\if :server_version_above_thirteen
|
|
\else
|
|
\q
|
|
\endif
|
|
create schema pg14;
|
|
set search_path to pg14;
|
|
SET citus.next_shard_id TO 980000;
|
|
SET citus.shard_count TO 2;
|
|
-- test the new vacuum option, process_toast
|
|
CREATE TABLE t1 (a int);
|
|
SELECT create_distributed_table('t1','a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SET citus.log_remote_commands TO ON;
|
|
VACUUM (FULL) t1;
|
|
NOTICE: issuing VACUUM (FULL) pg14.t1_980000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM (FULL) pg14.t1_980000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM (FULL) pg14.t1_980001
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM (FULL) pg14.t1_980001
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
VACUUM (FULL, PROCESS_TOAST) t1;
|
|
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
VACUUM (FULL, PROCESS_TOAST true) t1;
|
|
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
VACUUM (FULL, PROCESS_TOAST false) t1;
|
|
ERROR: PROCESS_TOAST required with VACUUM FULL
|
|
VACUUM (PROCESS_TOAST false) t1;
|
|
NOTICE: issuing VACUUM pg14.t1_980000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM pg14.t1_980000
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM pg14.t1_980001
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing VACUUM pg14.t1_980001
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
SET citus.log_remote_commands TO OFF;
|
|
create table dist(a int, b int);
|
|
select create_distributed_table('dist','a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
create index idx on dist(a);
|
|
set citus.log_remote_commands to on;
|
|
-- make sure that we send the tablespace option
|
|
SET citus.multi_shard_commit_protocol TO '1pc';
|
|
SET citus.multi_shard_modify_mode TO 'sequential';
|
|
reindex(TABLESPACE test_tablespace) index idx;
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
reindex(TABLESPACE test_tablespace, verbose) index idx;
|
|
INFO: index "idx" was reindexed
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
reindex(TABLESPACE test_tablespace, verbose false) index idx ;
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
reindex(verbose, TABLESPACE test_tablespace) index idx ;
|
|
INFO: index "idx" was reindexed
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing REINDEX (VERBOSE, TABLESPACE test_tablespace) INDEX pg14.xxxxx
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
NOTICE: issuing COMMIT
|
|
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|
-- should error saying table space doesn't exist
|
|
reindex(TABLESPACE test_tablespace1) index idx;
|
|
ERROR: tablespace "test_tablespace1" does not exist
|
|
reset citus.log_remote_commands;
|
|
-- CREATE STATISTICS only allow simple column references
|
|
CREATE TABLE tbl1(a timestamp, b int);
|
|
SELECT create_distributed_table('tbl1','a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- the last one should error out
|
|
CREATE STATISTICS s1 (dependencies) ON a, b FROM tbl1;
|
|
CREATE STATISTICS s2 (mcv) ON a, b FROM tbl1;
|
|
CREATE STATISTICS s3 (ndistinct) ON date_trunc('month', a), date_trunc('day', a) FROM tbl1;
|
|
ERROR: only simple column references are allowed in CREATE STATISTICS
|
|
set citus.log_remote_commands to off;
|
|
-- error out in case of ALTER TABLE .. DETACH PARTITION .. CONCURRENTLY/FINALIZE
|
|
-- only if it's a distributed partitioned table
|
|
CREATE TABLE par (a INT UNIQUE) PARTITION BY RANGE(a);
|
|
CREATE TABLE par_1 PARTITION OF par FOR VALUES FROM (1) TO (4);
|
|
CREATE TABLE par_2 PARTITION OF par FOR VALUES FROM (5) TO (8);
|
|
-- works as it's not distributed
|
|
ALTER TABLE par DETACH PARTITION par_1 CONCURRENTLY;
|
|
-- errors out
|
|
SELECT create_distributed_table('par','a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
ALTER TABLE par DETACH PARTITION par_2 CONCURRENTLY;
|
|
ERROR: ALTER TABLE .. DETACH PARTITION .. CONCURRENTLY commands are currently unsupported.
|
|
ALTER TABLE par DETACH PARTITION par_2 FINALIZE;
|
|
ERROR: ALTER TABLE .. DETACH PARTITION .. FINALIZE commands are currently unsupported.
|
|
-- test column compression propagation in distribution
|
|
SET citus.shard_replication_factor TO 1;
|
|
CREATE TABLE col_compression (a TEXT COMPRESSION pglz, b TEXT);
|
|
SELECT create_distributed_table('col_compression', 'a', shard_count:=4);
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT attname || ' ' || attcompression AS column_compression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'col\_compression%' AND attnum > 0 ORDER BY 1;
|
|
column_compression
|
|
---------------------------------------------------------------------
|
|
a p
|
|
b
|
|
(2 rows)
|
|
|
|
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
|
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
|
)$$);
|
|
column_compression
|
|
---------------------------------------------------------------------
|
|
{"a p","a p","b ","b "}
|
|
{"a p","a p","b ","b "}
|
|
(2 rows)
|
|
|
|
-- test column compression propagation in rebalance
|
|
SELECT shardid INTO moving_shard FROM citus_shards WHERE table_name='col_compression'::regclass AND nodeport=:worker_1_port LIMIT 1;
|
|
SELECT citus_move_shard_placement((SELECT * FROM moving_shard), :'public_worker_1_host', :worker_1_port, :'public_worker_2_host', :worker_2_port);
|
|
citus_move_shard_placement
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT rebalance_table_shards('col_compression', rebalance_strategy := 'by_shard_count');
|
|
NOTICE: Moving shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
|
|
rebalance_table_shards
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
CALL citus_cleanup_orphaned_shards();
|
|
NOTICE: cleaned up 1 orphaned shards
|
|
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
|
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
|
)$$);
|
|
column_compression
|
|
---------------------------------------------------------------------
|
|
{"a p","a p","b ","b "}
|
|
{"a p","a p","b ","b "}
|
|
(2 rows)
|
|
|
|
-- test propagation of ALTER TABLE .. ALTER COLUMN .. SET COMPRESSION ..
|
|
ALTER TABLE col_compression ALTER COLUMN b SET COMPRESSION pglz;
|
|
ALTER TABLE col_compression ALTER COLUMN a SET COMPRESSION default;
|
|
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
|
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
|
)$$);
|
|
column_compression
|
|
---------------------------------------------------------------------
|
|
{"a ","a ","b p","b p"}
|
|
{"a ","a ","b p","b p"}
|
|
(2 rows)
|
|
|
|
-- test propagation of ALTER TABLE .. ADD COLUMN .. COMPRESSION ..
|
|
ALTER TABLE col_compression ADD COLUMN c TEXT COMPRESSION pglz;
|
|
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
|
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_compression%' AND attnum > 0 ORDER BY 1
|
|
)$$);
|
|
column_compression
|
|
---------------------------------------------------------------------
|
|
{"a ","a ","b p","b p","c p","c p"}
|
|
{"a ","a ","b p","b p","c p","c p"}
|
|
(2 rows)
|
|
|
|
-- test attaching to a partitioned table with column compression
|
|
CREATE TABLE col_comp_par (a TEXT COMPRESSION pglz, b TEXT) PARTITION BY RANGE (a);
|
|
SELECT create_distributed_table('col_comp_par', 'a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE TABLE col_comp_par_1 PARTITION OF col_comp_par FOR VALUES FROM ('abc') TO ('def');
|
|
SELECT result AS column_compression FROM run_command_on_workers($$SELECT ARRAY(
|
|
SELECT attname || ' ' || attcompression FROM pg_attribute WHERE attrelid::regclass::text LIKE 'pg14.col\_comp\_par\_1\_%' AND attnum > 0 ORDER BY 1
|
|
)$$);
|
|
column_compression
|
|
---------------------------------------------------------------------
|
|
{"a p","b "}
|
|
{"a p","b "}
|
|
(2 rows)
|
|
|
|
RESET citus.multi_shard_modify_mode;
|
|
-- test procedure OUT parameters with procedure pushdown
|
|
CREATE TABLE test_proc_table (a int);
|
|
create or replace procedure proc_pushdown(dist_key integer, OUT created int4[], OUT res_out text)
|
|
language plpgsql
|
|
as $$
|
|
DECLARE
|
|
res INT := 0;
|
|
begin
|
|
INSERT INTO pg14.test_proc_table VALUES (dist_key);
|
|
SELECT count(*) INTO res FROM pg14.test_proc_table;
|
|
created := created || res;
|
|
PERFORM array_prepend(res, created);
|
|
res_out := res::text;
|
|
commit;
|
|
end;$$;
|
|
-- show the behaviour before distributing
|
|
CALL proc_pushdown(1, NULL, NULL);
|
|
created | res_out
|
|
---------------------------------------------------------------------
|
|
{1} | 1
|
|
(1 row)
|
|
|
|
CALL proc_pushdown(1, ARRAY[2000,1], 'AAAA');
|
|
created | res_out
|
|
---------------------------------------------------------------------
|
|
{2} | 2
|
|
(1 row)
|
|
|
|
SELECT create_distributed_table('test_proc_table', 'a');
|
|
NOTICE: Copying data from local table...
|
|
NOTICE: copying the data has completed
|
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg14.test_proc_table$$)
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT create_distributed_function('proc_pushdown(integer)', 'dist_key', 'test_proc_table' );
|
|
create_distributed_function
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- make sure that metadata is synced, it may take few seconds
|
|
CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 15000)
|
|
RETURNS void
|
|
LANGUAGE C STRICT
|
|
AS 'citus';
|
|
SELECT wait_until_metadata_sync(30000);
|
|
wait_until_metadata_sync
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT bool_and(hasmetadata) FROM pg_dist_node WHERE nodeport IN (:worker_1_port, :worker_2_port);
|
|
bool_and
|
|
---------------------------------------------------------------------
|
|
t
|
|
(1 row)
|
|
|
|
-- still, we do not pushdown procedures with OUT parameters
|
|
SET client_min_messages TO DEBUG1;
|
|
CALL proc_pushdown(1, NULL, NULL);
|
|
DEBUG: not pushing down procedures with OUT parameters
|
|
created | res_out
|
|
---------------------------------------------------------------------
|
|
{3} | 3
|
|
(1 row)
|
|
|
|
CALL proc_pushdown(1, ARRAY[2000,1], 'AAAA');
|
|
DEBUG: not pushing down procedures with OUT parameters
|
|
created | res_out
|
|
---------------------------------------------------------------------
|
|
{4} | 4
|
|
(1 row)
|
|
|
|
RESET client_min_messages;
|
|
-- we don't need metadata syncing anymore
|
|
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
|
NOTICE: dropping metadata on the node (localhost,57637)
|
|
stop_metadata_sync_to_node
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
|
NOTICE: dropping metadata on the node (localhost,57638)
|
|
stop_metadata_sync_to_node
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- ALTER STATISTICS .. OWNER TO CURRENT_ROLE
|
|
CREATE TABLE st1 (a int, b int);
|
|
CREATE STATISTICS role_s1 ON a, b FROM st1;
|
|
SELECT create_distributed_table('st1','a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
ALTER STATISTICS role_s1 OWNER TO CURRENT_ROLE;
|
|
SET citus.enable_ddl_propagation TO off; -- for enterprise
|
|
CREATE ROLE role_1 WITH LOGIN SUPERUSER;
|
|
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
|
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
|
SET citus.enable_ddl_propagation TO on;
|
|
SELECT run_command_on_workers($$CREATE ROLE role_1 WITH LOGIN SUPERUSER;$$);
|
|
run_command_on_workers
|
|
---------------------------------------------------------------------
|
|
(localhost,57637,t,"CREATE ROLE")
|
|
(localhost,57638,t,"CREATE ROLE")
|
|
(2 rows)
|
|
|
|
ALTER STATISTICS role_s1 OWNER TO CURRENT_ROLE;
|
|
SELECT run_command_on_workers($$SELECT rolname FROM pg_roles WHERE oid IN (SELECT stxowner FROM pg_statistic_ext WHERE stxname LIKE 'role\_s1%');$$);
|
|
run_command_on_workers
|
|
---------------------------------------------------------------------
|
|
(localhost,57637,t,postgres)
|
|
(localhost,57638,t,postgres)
|
|
(2 rows)
|
|
|
|
SET ROLE role_1;
|
|
ALTER STATISTICS role_s1 OWNER TO CURRENT_ROLE;
|
|
SELECT run_command_on_workers($$SELECT rolname FROM pg_roles WHERE oid IN (SELECT stxowner FROM pg_statistic_ext WHERE stxname LIKE 'role\_s1%');$$);
|
|
run_command_on_workers
|
|
---------------------------------------------------------------------
|
|
(localhost,57637,t,role_1)
|
|
(localhost,57638,t,role_1)
|
|
(2 rows)
|
|
|
|
SET ROLE postgres;
|
|
ALTER STATISTICS role_s1 OWNER TO CURRENT_USER;
|
|
SELECT run_command_on_workers($$SELECT rolname FROM pg_roles WHERE oid IN (SELECT stxowner FROM pg_statistic_ext WHERE stxname LIKE 'role\_s1%');$$);
|
|
run_command_on_workers
|
|
---------------------------------------------------------------------
|
|
(localhost,57637,t,postgres)
|
|
(localhost,57638,t,postgres)
|
|
(2 rows)
|
|
|
|
SET ROLE to NONE;
|
|
ALTER STATISTICS role_s1 OWNER TO CURRENT_ROLE;
|
|
SELECT run_command_on_workers($$SELECT rolname FROM pg_roles WHERE oid IN (SELECT stxowner FROM pg_statistic_ext WHERE stxname LIKE 'role\_s1%');$$);
|
|
run_command_on_workers
|
|
---------------------------------------------------------------------
|
|
(localhost,57637,t,postgres)
|
|
(localhost,57638,t,postgres)
|
|
(2 rows)
|
|
|
|
create TABLE test_jsonb_subscript (
|
|
id int,
|
|
test_json jsonb
|
|
);
|
|
SELECT create_distributed_table('test_jsonb_subscript', 'id');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
insert into test_jsonb_subscript values
|
|
(1, '{}'), -- empty jsonb
|
|
(2, '{"key": "value"}'); -- jsonb with data
|
|
-- update empty jsonb
|
|
update test_jsonb_subscript set test_json['a'] = '1' where id = 1;
|
|
select * from test_jsonb_subscript ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
1 | {"a": 1}
|
|
2 | {"key": "value"}
|
|
(2 rows)
|
|
|
|
-- update jsonb with some data
|
|
update test_jsonb_subscript set test_json['a'] = '1' where id = 2;
|
|
select * from test_jsonb_subscript ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
1 | {"a": 1}
|
|
2 | {"a": 1, "key": "value"}
|
|
(2 rows)
|
|
|
|
-- replace jsonb
|
|
update test_jsonb_subscript set test_json['a'] = '"test"';
|
|
select * from test_jsonb_subscript ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
1 | {"a": "test"}
|
|
2 | {"a": "test", "key": "value"}
|
|
(2 rows)
|
|
|
|
-- replace by object
|
|
update test_jsonb_subscript set test_json['a'] = '{"b": 1}'::jsonb;
|
|
select * from test_jsonb_subscript ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
1 | {"a": {"b": 1}}
|
|
2 | {"a": {"b": 1}, "key": "value"}
|
|
(2 rows)
|
|
|
|
-- replace by array
|
|
update test_jsonb_subscript set test_json['a'] = '[1, 2, 3]'::jsonb;
|
|
select * from test_jsonb_subscript ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
1 | {"a": [1, 2, 3]}
|
|
2 | {"a": [1, 2, 3], "key": "value"}
|
|
(2 rows)
|
|
|
|
-- use jsonb subscription in where clause
|
|
select * from test_jsonb_subscript where test_json['key'] = '"value"' ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
2 | {"a": [1, 2, 3], "key": "value"}
|
|
(1 row)
|
|
|
|
select * from test_jsonb_subscript where test_json['key_doesnt_exists'] = '"value"' ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
(0 rows)
|
|
|
|
select * from test_jsonb_subscript where test_json['key'] = '"wrong_value"' ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
(0 rows)
|
|
|
|
-- NULL
|
|
update test_jsonb_subscript set test_json[NULL] = '1';
|
|
ERROR: jsonb subscript in assignment must not be null
|
|
CONTEXT: while executing command on localhost:xxxxx
|
|
update test_jsonb_subscript set test_json['another_key'] = NULL;
|
|
select * from test_jsonb_subscript ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
1 | {"a": [1, 2, 3], "another_key": null}
|
|
2 | {"a": [1, 2, 3], "key": "value", "another_key": null}
|
|
(2 rows)
|
|
|
|
-- NULL as jsonb source
|
|
insert into test_jsonb_subscript values (3, NULL);
|
|
update test_jsonb_subscript set test_json['a'] = '1' where id = 3;
|
|
select * from test_jsonb_subscript ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
1 | {"a": [1, 2, 3], "another_key": null}
|
|
2 | {"a": [1, 2, 3], "key": "value", "another_key": null}
|
|
3 | {"a": 1}
|
|
(3 rows)
|
|
|
|
update test_jsonb_subscript set test_json = NULL where id = 3;
|
|
update test_jsonb_subscript set test_json[0] = '1';
|
|
select * from test_jsonb_subscript ORDER BY 1,2;
|
|
id | test_json
|
|
---------------------------------------------------------------------
|
|
1 | {"0": 1, "a": [1, 2, 3], "another_key": null}
|
|
2 | {"0": 1, "a": [1, 2, 3], "key": "value", "another_key": null}
|
|
3 | [1]
|
|
(3 rows)
|
|
|
|
-- JOIN ALIAS
|
|
CREATE TABLE J1_TBL (
|
|
i integer,
|
|
j integer,
|
|
t text
|
|
);
|
|
CREATE TABLE J2_TBL (
|
|
i integer,
|
|
k integer
|
|
);
|
|
INSERT INTO J1_TBL VALUES (1, 4, 'one');
|
|
INSERT INTO J1_TBL VALUES (2, 3, 'two');
|
|
INSERT INTO J1_TBL VALUES (3, 2, 'three');
|
|
INSERT INTO J1_TBL VALUES (4, 1, 'four');
|
|
INSERT INTO J1_TBL VALUES (5, 0, 'five');
|
|
INSERT INTO J1_TBL VALUES (6, 6, 'six');
|
|
INSERT INTO J1_TBL VALUES (7, 7, 'seven');
|
|
INSERT INTO J1_TBL VALUES (8, 8, 'eight');
|
|
INSERT INTO J1_TBL VALUES (0, NULL, 'zero');
|
|
INSERT INTO J2_TBL VALUES (1, -1);
|
|
INSERT INTO J2_TBL VALUES (2, 2);
|
|
INSERT INTO J2_TBL VALUES (3, -3);
|
|
INSERT INTO J2_TBL VALUES (2, 4);
|
|
INSERT INTO J2_TBL VALUES (5, -5);
|
|
INSERT INTO J2_TBL VALUES (5, -5);
|
|
INSERT INTO J2_TBL VALUES (0, NULL);
|
|
SELECT create_distributed_table('J1_TBL','i');
|
|
NOTICE: Copying data from local table...
|
|
NOTICE: copying the data has completed
|
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg14.j1_tbl$$)
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
SELECT create_distributed_table('J2_TBL','i');
|
|
NOTICE: Copying data from local table...
|
|
NOTICE: copying the data has completed
|
|
DETAIL: The local data in the table is no longer visible, but is still on disk.
|
|
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg14.j2_tbl$$)
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
-- test join using aliases
|
|
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) WHERE J1_TBL.t = 'one' ORDER BY 1,2,3,4; -- ok
|
|
i | j | t | k
|
|
---------------------------------------------------------------------
|
|
1 | 4 | one | -1
|
|
(1 row)
|
|
|
|
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one' ORDER BY 1,2,3,4; -- ok
|
|
i | j | t | k
|
|
---------------------------------------------------------------------
|
|
1 | 4 | one | -1
|
|
(1 row)
|
|
|
|
SELECT * FROM (J1_TBL JOIN J2_TBL USING (i)) AS x WHERE J1_TBL.t = 'one' ORDER BY 1,2,3,4; -- error
|
|
ERROR: invalid reference to FROM-clause entry for table "j1_tbl"
|
|
HINT: There is an entry for table "j1_tbl", but it cannot be referenced from this part of the query.
|
|
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.i = 1 ORDER BY 1,2,3,4; -- ok
|
|
i | j | t | k
|
|
---------------------------------------------------------------------
|
|
1 | 4 | one | -1
|
|
(1 row)
|
|
|
|
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.t = 'one' ORDER BY 1,2,3,4; -- error
|
|
ERROR: column x.t does not exist
|
|
SELECT * FROM (J1_TBL JOIN J2_TBL USING (i) AS x) AS xx WHERE x.i = 1 ORDER BY 1,2,3,4; -- error (XXX could use better hint)
|
|
ERROR: missing FROM-clause entry for table "x"
|
|
SELECT * FROM J1_TBL a1 JOIN J2_TBL a2 USING (i) AS a1 ORDER BY 1,2,3,4; -- error
|
|
ERROR: table name "a1" specified more than once
|
|
SELECT x.* FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one' ORDER BY 1;
|
|
i
|
|
---------------------------------------------------------------------
|
|
1
|
|
(1 row)
|
|
|
|
SELECT ROW(x.*) FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one' ORDER BY 1;
|
|
row
|
|
---------------------------------------------------------------------
|
|
(1)
|
|
(1 row)
|
|
|
|
SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.i > 1 ORDER BY 1,2,3,4;
|
|
i | j | t | k
|
|
---------------------------------------------------------------------
|
|
2 | 3 | two | 2
|
|
2 | 3 | two | 4
|
|
3 | 2 | three | -3
|
|
5 | 0 | five | -5
|
|
5 | 0 | five | -5
|
|
(5 rows)
|
|
|
|
-- ORDER BY is not supported for json and this returns 1 row, so it is okay.
|
|
SELECT row_to_json(x.*) FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one';
|
|
row_to_json
|
|
---------------------------------------------------------------------
|
|
{"f1":1}
|
|
(1 row)
|
|
|
|
-- we don't support REINDEX TABLE queries on distributed partitioned tables
|
|
CREATE TABLE dist_part_table (a int) PARTITION BY RANGE (a);
|
|
CREATE TABLE dist_part_table_1 PARTITION OF dist_part_table FOR VALUES FROM (1) TO (5);
|
|
CREATE TABLE dist_part_table_2 PARTITION OF dist_part_table FOR VALUES FROM (5) TO (10);
|
|
SELECT create_distributed_table('dist_part_table', 'a');
|
|
create_distributed_table
|
|
---------------------------------------------------------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE INDEX dist_part_idx ON dist_part_table(a);
|
|
REINDEX TABLE dist_part_table;
|
|
ERROR: REINDEX TABLE queries on distributed partitioned tables are not supported
|
|
-- but we support REINDEXing partitions
|
|
REINDEX TABLE dist_part_table_1;
|
|
set client_min_messages to error;
|
|
drop schema pg14 cascade;
|