citus/src/test/regress/expected/pg15.out

1500 lines
56 KiB
Plaintext

--
-- PG15
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
\gset
\if :server_version_ge_15
\else
\q
\endif
CREATE SCHEMA pg15;
SET search_path TO pg15;
SET citus.next_shard_id TO 960000;
SET citus.shard_count TO 4;
--
-- In PG15, there is an added option to use ICU as global locale provider.
-- pg_collation has three locale-related fields: collcollate and collctype,
-- which are libc-related fields, and a new one colliculocale, which is the
-- ICU-related field. Only the libc-related fields or the ICU-related field
-- is set, never both.
-- Relevant PG commits:
-- f2553d43060edb210b36c63187d52a632448e1d2
-- 54637508f87bd5f07fb9406bac6b08240283be3b
--
-- fail, needs "locale"
CREATE COLLATION german_phonebook_test (provider = icu, lc_collate = 'de-u-co-phonebk');
ERROR: parameter "locale" must be specified
-- fail, needs "locale"
CREATE COLLATION german_phonebook_test (provider = icu, lc_collate = 'de-u-co-phonebk', lc_ctype = 'de-u-co-phonebk');
ERROR: parameter "locale" must be specified
-- works
CREATE COLLATION german_phonebook_test (provider = icu, locale = 'de-u-co-phonebk');
-- with icu provider, colliculocale will be set, collcollate and collctype will be null
SELECT result FROM run_command_on_all_nodes('
SELECT collcollate FROM pg_collation WHERE collname = ''german_phonebook_test'';
');
result
---------------------------------------------------------------------
(3 rows)
SELECT result FROM run_command_on_all_nodes('
SELECT collctype FROM pg_collation WHERE collname = ''german_phonebook_test'';
');
result
---------------------------------------------------------------------
(3 rows)
SELECT result FROM run_command_on_all_nodes('
SELECT colliculocale FROM pg_collation WHERE collname = ''german_phonebook_test'';
');
result
---------------------------------------------------------------------
de-u-co-phonebk
de-u-co-phonebk
de-u-co-phonebk
(3 rows)
-- with non-icu provider, colliculocale will be null, collcollate and collctype will be set
CREATE COLLATION default_provider (provider = libc, lc_collate = "POSIX", lc_ctype = "POSIX");
SELECT result FROM run_command_on_all_nodes('
SELECT collcollate FROM pg_collation WHERE collname = ''default_provider'';
');
result
---------------------------------------------------------------------
POSIX
POSIX
POSIX
(3 rows)
SELECT result FROM run_command_on_all_nodes('
SELECT collctype FROM pg_collation WHERE collname = ''default_provider'';
');
result
---------------------------------------------------------------------
POSIX
POSIX
POSIX
(3 rows)
SELECT result FROM run_command_on_all_nodes('
SELECT colliculocale FROM pg_collation WHERE collname = ''default_provider'';
');
result
---------------------------------------------------------------------
(3 rows)
--
-- In PG15, Renaming triggers on partitioned tables had two problems
-- recurses to renaming the triggers on the partitions as well.
-- Here we test that distributed triggers behave the same way.
-- Relevant PG commit:
-- 80ba4bb383538a2ee846fece6a7b8da9518b6866
--
SET citus.enable_unsafe_triggers TO true;
CREATE TABLE sale(
sale_date date not null,
state_code text,
product_sku text,
units integer)
PARTITION BY list (state_code);
ALTER TABLE sale ADD CONSTRAINT sale_pk PRIMARY KEY (state_code, sale_date);
CREATE TABLE sale_newyork PARTITION OF sale FOR VALUES IN ('NY');
CREATE TABLE sale_california PARTITION OF sale FOR VALUES IN ('CA');
CREATE TABLE record_sale(
operation_type text not null,
product_sku text,
state_code text,
units integer,
PRIMARY KEY(state_code, product_sku, operation_type, units));
SELECT create_distributed_table('sale', 'state_code');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('record_sale', 'state_code', colocate_with := 'sale');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE OR REPLACE FUNCTION record_sale()
RETURNS trigger
AS $$
BEGIN
INSERT INTO pg15.record_sale(operation_type, product_sku, state_code, units)
VALUES (TG_OP, NEW.product_sku, NEW.state_code, NEW.units);
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER record_sale_trigger
AFTER INSERT OR UPDATE OR DELETE ON sale
FOR EACH ROW EXECUTE FUNCTION pg15.record_sale();
CREATE VIEW sale_triggers AS
SELECT tgname, tgrelid::regclass, tgenabled
FROM pg_trigger
WHERE tgrelid::regclass::text like 'sale%'
ORDER BY 1, 2;
SELECT * FROM sale_triggers ORDER BY 1, 2;
tgname | tgrelid | tgenabled
---------------------------------------------------------------------
record_sale_trigger | sale | O
record_sale_trigger | sale_newyork | O
record_sale_trigger | sale_california | O
truncate_trigger_xxxxxxx | sale | O
truncate_trigger_xxxxxxx | sale_california | O
truncate_trigger_xxxxxxx | sale_newyork | O
(6 rows)
ALTER TRIGGER "record_sale_trigger" ON "pg15"."sale" RENAME TO "new_record_sale_trigger";
SELECT * FROM sale_triggers ORDER BY 1, 2;
tgname | tgrelid | tgenabled
---------------------------------------------------------------------
new_record_sale_trigger | sale | O
new_record_sale_trigger | sale_newyork | O
new_record_sale_trigger | sale_california | O
truncate_trigger_xxxxxxx | sale | O
truncate_trigger_xxxxxxx | sale_california | O
truncate_trigger_xxxxxxx | sale_newyork | O
(6 rows)
-- test that we can't rename a distributed clone trigger
ALTER TRIGGER "new_record_sale_trigger" ON "pg15"."sale_newyork" RENAME TO "another_trigger_name";
ERROR: cannot rename trigger "new_record_sale_trigger" on table "sale_newyork"
HINT: Rename the trigger on the partitioned table "sale" instead.
--
-- In PG15, For GENERATED columns, all dependencies of the generation
-- expression are recorded as NORMAL dependencies of the column itself.
-- This requires CASCADE to drop generated cols with the original col.
-- Test this behavior in distributed table, specifically with
-- undistribute_table within a transaction.
-- Relevant PG Commit: cb02fcb4c95bae08adaca1202c2081cfc81a28b5
--
CREATE TABLE generated_stored_ref (
col_1 int,
col_2 int,
col_3 int generated always as (col_1+col_2) stored,
col_4 int,
col_5 int generated always as (col_4*2-col_1) stored
);
SELECT create_reference_table ('generated_stored_ref');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- populate the table
INSERT INTO generated_stored_ref (col_1, col_4) VALUES (1,2), (11,12);
INSERT INTO generated_stored_ref (col_1, col_2, col_4) VALUES (100,101,102), (200,201,202);
SELECT * FROM generated_stored_ref ORDER BY 1,2,3,4,5;
col_1 | col_2 | col_3 | col_4 | col_5
---------------------------------------------------------------------
1 | | | 2 | 3
11 | | | 12 | 13
100 | 101 | 201 | 102 | 104
200 | 201 | 401 | 202 | 204
(4 rows)
-- fails, CASCADE must be specified
-- will test CASCADE inside the transcation
ALTER TABLE generated_stored_ref DROP COLUMN col_1;
ERROR: cannot drop column col_1 of table generated_stored_ref because other objects depend on it
DETAIL: column col_3 of table generated_stored_ref depends on column col_1 of table generated_stored_ref
column col_5 of table generated_stored_ref depends on column col_1 of table generated_stored_ref
HINT: Use DROP ... CASCADE to drop the dependent objects too.
BEGIN;
-- drops col_1, col_3, col_5
ALTER TABLE generated_stored_ref DROP COLUMN col_1 CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to column col_3 of table generated_stored_ref
drop cascades to column col_5 of table generated_stored_ref
ALTER TABLE generated_stored_ref DROP COLUMN col_4;
-- show that undistribute_table works fine
SELECT undistribute_table('generated_stored_ref');
NOTICE: creating a new table for pg15.generated_stored_ref
NOTICE: moving the data of pg15.generated_stored_ref
NOTICE: dropping the old pg15.generated_stored_ref
NOTICE: renaming the new table to pg15.generated_stored_ref
undistribute_table
---------------------------------------------------------------------
(1 row)
INSERT INTO generated_stored_ref VALUES (5);
SELECT * FROM generated_stored_REF ORDER BY 1;
col_2
---------------------------------------------------------------------
5
101
201
(5 rows)
ROLLBACK;
SELECT undistribute_table('generated_stored_ref');
NOTICE: creating a new table for pg15.generated_stored_ref
NOTICE: moving the data of pg15.generated_stored_ref
NOTICE: dropping the old pg15.generated_stored_ref
NOTICE: renaming the new table to pg15.generated_stored_ref
undistribute_table
---------------------------------------------------------------------
(1 row)
--
-- In PG15, there is a new command called MERGE
-- It is currently not supported for Citus non-local tables
-- Test the behavior with various commands with Citus table types
-- Relevant PG Commit: 7103ebb7aae8ab8076b7e85f335ceb8fe799097c
--
CREATE TABLE tbl1
(
x INT
);
CREATE TABLE tbl2
(
x INT
);
-- on local tables works fine
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE;
-- add coordinator node as a worker
SET client_min_messages to ERROR;
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
-- one table is Citus local table, fails
SELECT citus_add_local_table_to_metadata('tbl1');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE;
SELECT undistribute_table('tbl1');
NOTICE: creating a new table for pg15.tbl1
NOTICE: moving the data of pg15.tbl1
NOTICE: dropping the old pg15.tbl1
NOTICE: renaming the new table to pg15.tbl1
undistribute_table
---------------------------------------------------------------------
(1 row)
-- the other table is Citus local table, fails
SELECT citus_add_local_table_to_metadata('tbl2');
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE;
-- one table is reference, the other local, not supported
SELECT create_reference_table('tbl2');
create_reference_table
---------------------------------------------------------------------
(1 row)
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE;
ERROR: MERGE command is not supported on reference tables yet
-- now, both are reference, still not supported
SELECT create_reference_table('tbl1');
create_reference_table
---------------------------------------------------------------------
(1 row)
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE;
ERROR: MERGE command is not supported on reference tables yet
-- now, both distributed, not works
SELECT undistribute_table('tbl1');
NOTICE: creating a new table for pg15.tbl1
NOTICE: moving the data of pg15.tbl1
NOTICE: dropping the old pg15.tbl1
NOTICE: renaming the new table to pg15.tbl1
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT undistribute_table('tbl2');
NOTICE: creating a new table for pg15.tbl2
NOTICE: moving the data of pg15.tbl2
NOTICE: dropping the old pg15.tbl2
NOTICE: renaming the new table to pg15.tbl2
undistribute_table
---------------------------------------------------------------------
(1 row)
-- Make sure that we allow foreign key columns on local tables added to
-- metadata to have SET NULL/DEFAULT on column basis.
CREATE TABLE PKTABLE_local (tid int, id int, PRIMARY KEY (tid, id));
CREATE TABLE FKTABLE_local (
tid int, id int,
fk_id_del_set_null int,
fk_id_del_set_default int DEFAULT 0,
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES PKTABLE_local ON DELETE SET NULL (fk_id_del_set_null),
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES PKTABLE_local ON DELETE SET DEFAULT (fk_id_del_set_default)
);
SELECT citus_add_local_table_to_metadata('FKTABLE_local', cascade_via_foreign_keys=>true);
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
-- show that the definition is expected
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'FKTABLE_local'::regclass::oid ORDER BY oid;
pg_get_constraintdef
---------------------------------------------------------------------
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable_local(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable_local(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
(2 rows)
\c - - - :worker_1_port
SET search_path TO pg15;
-- show that the definition is expected on the worker as well
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'FKTABLE_local'::regclass::oid ORDER BY oid;
pg_get_constraintdef
---------------------------------------------------------------------
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable_local(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable_local(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
(2 rows)
-- also, make sure that it works as expected
INSERT INTO PKTABLE_local VALUES (1, 0), (1, 1), (1, 2);
INSERT INTO FKTABLE_local VALUES
(1, 1, 1, NULL),
(1, 2, NULL, 2);
DELETE FROM PKTABLE_local WHERE id = 1 OR id = 2;
SELECT * FROM FKTABLE_local ORDER BY id;
tid | id | fk_id_del_set_null | fk_id_del_set_default
---------------------------------------------------------------------
1 | 1 | |
1 | 2 | | 0
(2 rows)
\c - - - :master_port
SET search_path TO pg15;
SET client_min_messages to ERROR;
DROP TABLE FKTABLE_local, PKTABLE_local;
RESET client_min_messages;
SELECT 1 FROM citus_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT create_distributed_table('tbl1', 'x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('tbl2', 'x');
create_distributed_table
---------------------------------------------------------------------
(1 row)
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE;
ERROR: MERGE command is only supported when all distributed tables are co-located and joined on their distribution columns
-- also, not inside subqueries & ctes
WITH targq AS (
SELECT * FROM tbl2
)
MERGE INTO tbl1 USING targq ON (true)
WHEN MATCHED THEN DELETE;
ERROR: MERGE command is only supported when all distributed tables are co-located and joined on their distribution columns
WITH foo AS (
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE
) SELECT * FROM foo;
ERROR: MERGE not supported in WITH query
COPY (
MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE
) TO stdout;
ERROR: MERGE not supported in COPY
MERGE INTO tbl1 t
USING tbl2
ON (true)
WHEN MATCHED THEN
DO NOTHING;
ERROR: MERGE command is only supported when all distributed tables are co-located and joined on their distribution columns
MERGE INTO tbl1 t
USING tbl2
ON (true)
WHEN MATCHED THEN
UPDATE SET x = (SELECT count(*) FROM tbl2);
ERROR: updating the distribution column is not allowed in MERGE actions
-- test numeric types with negative scale
CREATE TABLE numeric_negative_scale(numeric_column numeric(3,-1), orig_value int);
INSERT into numeric_negative_scale SELECT x,x FROM generate_series(111, 115) x;
-- verify that we can not distribute by a column that has numeric type with negative scale
SELECT create_distributed_table('numeric_negative_scale','numeric_column');
ERROR: cannot distribute relation: numeric_negative_scale
DETAIL: Distribution column must not use numeric type with negative scale
-- However, we can distribute by other columns
SELECT create_distributed_table('numeric_negative_scale','orig_value');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg15.numeric_negative_scale$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- Verify that we can not change the distribution column to the numeric column
SELECT alter_distributed_table('numeric_negative_scale',
distribution_column := 'numeric_column');
NOTICE: creating a new table for pg15.numeric_negative_scale
ERROR: cannot distribute relation: numeric_negative_scale_xxxxxx"
DETAIL: Distribution column must not use numeric type with negative scale
SELECT * FROM numeric_negative_scale ORDER BY 1,2;
numeric_column | orig_value
---------------------------------------------------------------------
110 | 111
110 | 112
110 | 113
110 | 114
120 | 115
(5 rows)
-- verify that numeric types with scale greater than precision are also ok
-- a precision of 2, and scale of 3 means that all the numbers should be less than 10^-1 and of the form 0,0XY
CREATE TABLE numeric_scale_gt_precision(numeric_column numeric(2,3));
SELECT * FROM create_distributed_table('numeric_scale_gt_precision','numeric_column');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO numeric_scale_gt_precision SELECT x FROM generate_series(0.01234, 0.09, 0.005) x;
-- verify that we store only 2 digits, and discard the rest of them.
SELECT * FROM numeric_scale_gt_precision ORDER BY 1;
numeric_column
---------------------------------------------------------------------
0.012
0.017
0.022
0.027
0.032
0.037
0.042
0.047
0.052
0.057
0.062
0.067
0.072
0.077
0.082
0.087
(16 rows)
-- verify we can route queries to the right shards
SELECT * FROM numeric_scale_gt_precision WHERE numeric_column=0.027;
numeric_column
---------------------------------------------------------------------
0.027
(1 row)
-- test repartition joins on tables distributed on numeric types with negative scale
CREATE TABLE numeric_repartition_first(id int, data int, numeric_column numeric(3,-1));
CREATE TABLE numeric_repartition_second(id int, data int, numeric_column numeric(3,-1));
-- populate tables
INSERT INTO numeric_repartition_first SELECT x, x, x FROM generate_series (100, 115) x;
INSERT INTO numeric_repartition_second SELECT x, x, x FROM generate_series (100, 115) x;
-- Run some queries before distributing the tables to see results in vanilla PG
SELECT count(*)
FROM numeric_repartition_first f,
numeric_repartition_second s
WHERE f.id = s.numeric_column;
count
---------------------------------------------------------------------
15
(1 row)
SELECT count(*)
FROM numeric_repartition_first f,
numeric_repartition_second s
WHERE f.numeric_column = s.numeric_column;
count
---------------------------------------------------------------------
126
(1 row)
-- distribute tables and re-run the same queries
SELECT * FROM create_distributed_table('numeric_repartition_first','id');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg15.numeric_repartition_first$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM create_distributed_table('numeric_repartition_second','id');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg15.numeric_repartition_second$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
SET citus.enable_repartition_joins TO 1;
SELECT count(*)
FROM numeric_repartition_first f,
numeric_repartition_second s
WHERE f.id = s.numeric_column;
ERROR: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
-- show that the same query works if we use an int column instead of a numeric on the filter clause
SELECT count(*)
FROM numeric_repartition_first f,
numeric_repartition_second s
WHERE f.id = s.data;
count
---------------------------------------------------------------------
16
(1 row)
SELECT count(*)
FROM numeric_repartition_first f,
numeric_repartition_second s
WHERE f.numeric_column = s.numeric_column;
count
---------------------------------------------------------------------
126
(1 row)
-- test new regex functions
-- print order comments that contain the word `fluffily` at least twice
SELECT o_comment FROM public.orders WHERE regexp_count(o_comment, 'FluFFily', 1, 'i')>=2 ORDER BY 1;
o_comment
---------------------------------------------------------------------
al, bold deposits cajole fluffily fluffily final foxes. pending ideas beli
ly regular packages are fluffily even ideas. fluffily final
ng instructions integrate fluffily among the fluffily silent accounts. bli
ructions wake fluffily fluffily final gifts! furiou
s boost blithely fluffily idle ideas? fluffily even pin
(5 rows)
-- print the same items using a different regexp function
SELECT o_comment FROM public.orders WHERE regexp_like(o_comment, 'fluffily.*fluffily') ORDER BY 1;
o_comment
---------------------------------------------------------------------
al, bold deposits cajole fluffily fluffily final foxes. pending ideas beli
ly regular packages are fluffily even ideas. fluffily final
ng instructions integrate fluffily among the fluffily silent accounts. bli
ructions wake fluffily fluffily final gifts! furiou
s boost blithely fluffily idle ideas? fluffily even pin
(5 rows)
-- print the position where we find the second fluffily in the comment
SELECT o_comment, regexp_instr(o_comment, 'fluffily.*(fluffily)') FROM public.orders ORDER BY 2 desc LIMIT 5;
o_comment | regexp_instr
---------------------------------------------------------------------
ng instructions integrate fluffily among the fluffily silent accounts. bli | 27
al, bold deposits cajole fluffily fluffily final foxes. pending ideas beli | 26
ly regular packages are fluffily even ideas. fluffily final | 25
s boost blithely fluffily idle ideas? fluffily even pin | 18
ructions wake fluffily fluffily final gifts! furiou | 15
(5 rows)
-- print the substrings between two `fluffily`
SELECT regexp_substr(o_comment, 'fluffily.*fluffily') FROM public.orders ORDER BY 1 LIMIT 5;
regexp_substr
---------------------------------------------------------------------
fluffily among the fluffily
fluffily even ideas. fluffily
fluffily fluffily
fluffily fluffily
fluffily idle ideas? fluffily
(5 rows)
-- replace second `fluffily` with `silkily`
SELECT regexp_replace(o_comment, 'fluffily', 'silkily', 1, 2) FROM public.orders WHERE regexp_like(o_comment, 'fluffily.*fluffily') ORDER BY 1 desc;
regexp_replace
---------------------------------------------------------------------
s boost blithely fluffily idle ideas? silkily even pin
ructions wake fluffily silkily final gifts! furiou
ng instructions integrate fluffily among the silkily silent accounts. bli
ly regular packages are fluffily even ideas. silkily final
al, bold deposits cajole fluffily silkily final foxes. pending ideas beli
(5 rows)
-- test new COPY features
-- COPY TO statements with text format and headers
CREATE TABLE copy_test(id int, data int);
SELECT create_distributed_table('copy_test', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO copy_test SELECT x, x FROM generate_series(1,100) x;
COPY copy_test TO :'temp_dir''copy_test.txt' WITH ( HEADER true, FORMAT text);
-- Create another distributed table with different column names and test COPY FROM with header match
CREATE TABLE copy_test2(id int, data_ int);
SELECT create_distributed_table('copy_test2', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
COPY copy_test2 FROM :'temp_dir''copy_test.txt' WITH ( HEADER match, FORMAT text);
ERROR: column name mismatch in header line field 2: got "data", expected "data_"
CONTEXT: COPY copy_test2, line 1: "id data"
-- verify that the command works if we rename the column
ALTER TABLE copy_test2 RENAME COLUMN data_ TO data;
COPY copy_test2 FROM :'temp_dir''copy_test.txt' WITH ( HEADER match, FORMAT text);
SELECT count(*)=100 FROM copy_test2;
?column?
---------------------------------------------------------------------
t
(1 row)
--
-- In PG15, unlogged sequences are supported
-- we support this for distributed sequences as well
--
CREATE SEQUENCE seq1;
CREATE UNLOGGED SEQUENCE "pg15"."seq 2";
-- first, test that sequence persistence is distributed correctly
-- when the sequence is distributed
SELECT relname,
CASE relpersistence
WHEN 'u' THEN 'unlogged'
WHEN 'p' then 'logged'
ELSE 'unknown'
END AS logged_info
FROM pg_class
WHERE relname IN ('seq1', 'seq 2') AND relnamespace='pg15'::regnamespace
ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
seq 2 | unlogged
seq1 | logged
(2 rows)
CREATE TABLE "seq test"(a int, b int default nextval ('seq1'), c int default nextval ('"pg15"."seq 2"'));
SELECT create_distributed_table('"pg15"."seq test"','a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
\c - - - :worker_1_port
SELECT relname,
CASE relpersistence
WHEN 'u' THEN 'unlogged'
WHEN 'p' then 'logged'
ELSE 'unknown'
END AS logged_info
FROM pg_class
WHERE relname IN ('seq1', 'seq 2') AND relnamespace='pg15'::regnamespace
ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
seq 2 | unlogged
seq1 | logged
(2 rows)
\c - - - :master_port
SET search_path TO pg15;
-- now, check that we can change sequence persistence using ALTER SEQUENCE
ALTER SEQUENCE seq1 SET UNLOGGED;
-- use IF EXISTS
ALTER SEQUENCE IF EXISTS "seq 2" SET LOGGED;
-- check non-existent sequence as well
ALTER SEQUENCE seq_non_exists SET LOGGED;
ERROR: relation "seq_non_exists" does not exist
ALTER SEQUENCE IF EXISTS seq_non_exists SET LOGGED;
NOTICE: relation "seq_non_exists" does not exist, skipping
SELECT relname,
CASE relpersistence
WHEN 'u' THEN 'unlogged'
WHEN 'p' then 'logged'
ELSE 'unknown'
END AS logged_info
FROM pg_class
WHERE relname IN ('seq1', 'seq 2') AND relnamespace='pg15'::regnamespace
ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
seq 2 | logged
seq1 | unlogged
(2 rows)
\c - - - :worker_1_port
SELECT relname,
CASE relpersistence
WHEN 'u' THEN 'unlogged'
WHEN 'p' then 'logged'
ELSE 'unknown'
END AS logged_info
FROM pg_class
WHERE relname IN ('seq1', 'seq 2') AND relnamespace='pg15'::regnamespace
ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
seq 2 | logged
seq1 | unlogged
(2 rows)
\c - - - :master_port
SET search_path TO pg15;
-- now, check that we can change sequence persistence using ALTER TABLE
ALTER TABLE seq1 SET LOGGED;
ALTER TABLE "seq 2" SET UNLOGGED;
SELECT relname,
CASE relpersistence
WHEN 'u' THEN 'unlogged'
WHEN 'p' then 'logged'
ELSE 'unknown'
END AS logged_info
FROM pg_class
WHERE relname IN ('seq1', 'seq 2') AND relnamespace='pg15'::regnamespace
ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
seq 2 | unlogged
seq1 | logged
(2 rows)
\c - - - :worker_1_port
SELECT relname,
CASE relpersistence
WHEN 'u' THEN 'unlogged'
WHEN 'p' then 'logged'
ELSE 'unknown'
END AS logged_info
FROM pg_class
WHERE relname IN ('seq1', 'seq 2') AND relnamespace='pg15'::regnamespace
ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
seq 2 | unlogged
seq1 | logged
(2 rows)
\c - - - :master_port
SET search_path TO pg15;
-- An identity/serial sequence now automatically gets and follows the
-- persistence level (logged/unlogged) of its owning table.
-- Test this behavior as well
CREATE UNLOGGED TABLE test(a bigserial, b bigserial);
SELECT create_distributed_table('test', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- show that associated sequence is unlooged
SELECT relname,
CASE relpersistence
WHEN 'u' THEN 'unlogged'
WHEN 'p' then 'logged'
ELSE 'unknown'
END AS logged_info
FROM pg_class
WHERE relname IN ('test_a_seq', 'test_b_seq') AND relnamespace='pg15'::regnamespace
ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
test_a_seq | unlogged
test_b_seq | unlogged
(2 rows)
\c - - - :worker_1_port
SELECT relname,
CASE relpersistence
WHEN 'u' THEN 'unlogged'
WHEN 'p' then 'logged'
ELSE 'unknown'
END AS logged_info
FROM pg_class
WHERE relname IN ('test_a_seq', 'test_b_seq') AND relnamespace='pg15'::regnamespace
ORDER BY relname;
relname | logged_info
---------------------------------------------------------------------
test_a_seq | unlogged
test_b_seq | unlogged
(2 rows)
\c - - - :master_port
SET search_path TO pg15;
-- allow foreign key columns to have SET NULL/DEFAULT on column basis
-- currently only reference tables can support that
CREATE TABLE PKTABLE (tid int, id int, PRIMARY KEY (tid, id));
CREATE TABLE FKTABLE (
tid int, id int,
fk_id_del_set_null int,
fk_id_del_set_default int DEFAULT 0,
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES PKTABLE ON DELETE SET NULL (fk_id_del_set_null),
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES PKTABLE ON DELETE SET DEFAULT (fk_id_del_set_default)
);
SELECT create_reference_table('PKTABLE');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- ok, Citus could relax this constraint in the future
SELECT create_distributed_table('FKTABLE', 'tid');
ERROR: cannot create foreign key constraint
DETAIL: SET NULL or SET DEFAULT is not supported in ON DELETE operation when distribution key is included in the foreign key constraint
-- with reference tables it should all work fine
SELECT create_reference_table('FKTABLE');
create_reference_table
---------------------------------------------------------------------
(1 row)
-- show that the definition is expected
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid;
pg_get_constraintdef
---------------------------------------------------------------------
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
(2 rows)
\c - - - :worker_1_port
SET search_path TO pg15;
-- show that the definition is expected on the worker as well
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid;
pg_get_constraintdef
---------------------------------------------------------------------
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
(2 rows)
-- also, make sure that it works as expected
INSERT INTO PKTABLE VALUES (1, 0), (1, 1), (1, 2);
INSERT INTO FKTABLE VALUES
(1, 1, 1, NULL),
(1, 2, NULL, 2);
DELETE FROM PKTABLE WHERE id = 1 OR id = 2;
SELECT * FROM FKTABLE ORDER BY id;
tid | id | fk_id_del_set_null | fk_id_del_set_default
---------------------------------------------------------------------
1 | 1 | |
1 | 2 | | 0
(2 rows)
\c - - - :master_port
SET search_path TO pg15;
-- test NULL NOT DISTINCT clauses
-- set the next shard id so that the error messages are easier to maintain
SET citus.next_shard_id TO 960150;
CREATE TABLE null_distinct_test(id INT, c1 INT, c2 INT, c3 VARCHAR(10)) ;
SELECT create_distributed_table('null_distinct_test', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE UNIQUE INDEX idx1_null_distinct_test ON null_distinct_test(id, c1) NULLS DISTINCT ;
CREATE UNIQUE INDEX idx2_null_distinct_test ON null_distinct_test(id, c2) NULLS NOT DISTINCT ;
-- populate with some initial data
INSERT INTO null_distinct_test VALUES (1, 1, 1, 'data1') ;
INSERT INTO null_distinct_test VALUES (1, 2, NULL, 'data2') ;
INSERT INTO null_distinct_test VALUES (1, NULL, 3, 'data3') ;
-- should fail as we already have a null value in c2 column
INSERT INTO null_distinct_test VALUES (1, NULL, NULL, 'data4') ;
ERROR: duplicate key value violates unique constraint "idx2_null_distinct_test_960150"
DETAIL: Key (id, c2)=(1, null) already exists.
CONTEXT: while executing command on localhost:xxxxx
INSERT INTO null_distinct_test VALUES (1, NULL, NULL, 'data4') ON CONFLICT DO NOTHING;
INSERT INTO null_distinct_test VALUES (1, NULL, NULL, 'data4') ON CONFLICT (id, c2) DO UPDATE SET c2=100 RETURNING *;
id | c1 | c2 | c3
---------------------------------------------------------------------
1 | 2 | 100 | data2
(1 row)
-- should not fail as null values are distinct for c1 column
INSERT INTO null_distinct_test VALUES (1, NULL, 5, 'data5') ;
-- test that unique constraints also work properly
-- since we have multiple (1,NULL) pairs for columns (id,c1) the first will work, second will fail
ALTER TABLE null_distinct_test ADD CONSTRAINT uniq_distinct_c1 UNIQUE NULLS DISTINCT (id,c1);
ALTER TABLE null_distinct_test ADD CONSTRAINT uniq_c1 UNIQUE NULLS NOT DISTINCT (id,c1);
ERROR: could not create unique index "uniq_c1_960150"
DETAIL: Key (id, c1)=(1, null) is duplicated.
CONTEXT: while executing command on localhost:xxxxx
-- show all records in the table for fact checking
SELECT * FROM null_distinct_test ORDER BY c3;
id | c1 | c2 | c3
---------------------------------------------------------------------
1 | 1 | 1 | data1
1 | 2 | 100 | data2
1 | | 3 | data3
1 | | 5 | data5
(4 rows)
-- test unique nulls not distinct constraints on a reference table
CREATE TABLE reference_uniq_test (
x int, y int,
UNIQUE NULLS NOT DISTINCT (x, y)
);
SELECT create_reference_table('reference_uniq_test');
create_reference_table
---------------------------------------------------------------------
(1 row)
INSERT INTO reference_uniq_test VALUES (1, 1), (1, NULL), (NULL, 1);
-- the following will fail
INSERT INTO reference_uniq_test VALUES (1, NULL);
ERROR: duplicate key value violates unique constraint "reference_uniq_test_x_y_key_960154"
DETAIL: Key (x, y)=(1, null) already exists.
CONTEXT: while executing command on localhost:xxxxx
--
-- PG15 introduces CLUSTER command support for partitioned tables. However, similar to
-- CLUSTER commands with no table name, these queries can not be run inside a transaction
-- block. Therefore, we do not propagate such queries.
--
-- Should print a warning that it will not be propagated to worker nodes.
CLUSTER sale USING sale_pk;
WARNING: not propagating CLUSTER command for partitioned table to worker nodes
HINT: Provide a child partition table names in order to CLUSTER distributed partitioned tables.
-- verify that we can cluster the partition tables only when replication factor is 1
CLUSTER sale_newyork USING sale_newyork_pkey;
ERROR: modifications on partitions when replication factor is greater than 1 is not supported
HINT: Run the query on the parent table "sale" instead.
-- create a new partitioned table with shard replicaiton factor 1
SET citus.shard_replication_factor = 1;
CREATE TABLE sale_repl_factor_1 ( LIKE sale )
PARTITION BY list (state_code);
ALTER TABLE sale_repl_factor_1 ADD CONSTRAINT sale_repl_factor_1_pk PRIMARY KEY (state_code, sale_date);
CREATE TABLE sale_newyork_repl_factor_1 PARTITION OF sale_repl_factor_1 FOR VALUES IN ('NY');
CREATE TABLE sale_california_repl_factor_1 PARTITION OF sale_repl_factor_1 FOR VALUES IN ('CA');
SELECT create_distributed_table('sale_repl_factor_1', 'state_code');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- Should print a warning that it will not be propagated to worker nodes.
CLUSTER sale_repl_factor_1 USING sale_repl_factor_1_pk;
WARNING: not propagating CLUSTER command for partitioned table to worker nodes
HINT: Provide a child partition table names in order to CLUSTER distributed partitioned tables.
-- verify that we can still cluster the partition tables now since replication factor is 1
CLUSTER sale_newyork_repl_factor_1 USING sale_newyork_repl_factor_1_pkey;
create table reservations ( room_id integer not null, booked_during daterange );
insert into reservations values
-- 1: has a meets and a gap
(1, daterange('2018-07-01', '2018-07-07')),
(1, daterange('2018-07-07', '2018-07-14')),
(1, daterange('2018-07-20', '2018-07-22')),
-- 2: just a single row
(2, daterange('2018-07-01', '2018-07-03')),
-- 3: one null range
(3, NULL),
-- 4: two null ranges
(4, NULL),
(4, NULL),
-- 5: a null range and a non-null range
(5, NULL),
(5, daterange('2018-07-01', '2018-07-03')),
-- 6: has overlap
(6, daterange('2018-07-01', '2018-07-07')),
(6, daterange('2018-07-05', '2018-07-10')),
-- 7: two ranges that meet: no gap or overlap
(7, daterange('2018-07-01', '2018-07-07')),
(7, daterange('2018-07-07', '2018-07-14')),
-- 8: an empty range
(8, 'empty'::daterange);
SELECT create_distributed_table('reservations', 'room_id');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg15.reservations$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- should be fine to pushdown range_agg
SELECT room_id, range_agg(booked_during ORDER BY booked_during)
FROM reservations
GROUP BY room_id
ORDER BY room_id;
room_id | range_agg
---------------------------------------------------------------------
1 | {[07-01-2018,07-14-2018),[07-20-2018,07-22-2018)}
2 | {[07-01-2018,07-03-2018)}
3 |
4 |
5 | {[07-01-2018,07-03-2018)}
6 | {[07-01-2018,07-10-2018)}
7 | {[07-01-2018,07-14-2018)}
8 | {}
(8 rows)
-- should be fine to apply range_agg on the coordinator
SELECT room_id + 1, range_agg(booked_during ORDER BY booked_during)
FROM reservations
GROUP BY room_id + 1
ORDER BY room_id + 1;
?column? | range_agg
---------------------------------------------------------------------
2 | {[07-01-2018,07-14-2018),[07-20-2018,07-22-2018)}
3 | {[07-01-2018,07-03-2018)}
4 |
5 |
6 | {[07-01-2018,07-03-2018)}
7 | {[07-01-2018,07-10-2018)}
8 | {[07-01-2018,07-14-2018)}
9 | {}
(8 rows)
-- min() and max() for xid8
create table xid8_t1 (x xid8, y int);
insert into xid8_t1 values ('0', 1), ('010', 2), ('42', 3), ('0xffffffffffffffff', 4), ('-1', 5);
SELECT create_distributed_table('xid8_t1', 'x');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed
DETAIL: The local data in the table is no longer visible, but is still on disk.
HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$pg15.xid8_t1$$)
create_distributed_table
---------------------------------------------------------------------
(1 row)
select min(x), max(x) from xid8_t1 ORDER BY 1,2;
min | max
---------------------------------------------------------------------
0 | 18446744073709551615
(1 row)
select min(x), max(x) from xid8_t1 GROUP BY x ORDER BY 1,2;
min | max
---------------------------------------------------------------------
0 | 0
8 | 8
42 | 42
18446744073709551615 | 18446744073709551615
(4 rows)
select min(x), max(x) from xid8_t1 GROUP BY y ORDER BY 1,2;
min | max
---------------------------------------------------------------------
0 | 0
8 | 8
42 | 42
18446744073709551615 | 18446744073709551615
18446744073709551615 | 18446744073709551615
(5 rows)
--
-- PG15 introduces security invoker views
-- Citus supports these views because permissions in the shards
-- are already checked for the view invoker
--
-- create a distributed table and populate it
CREATE TABLE events (tenant_id int, event_id int, descr text);
SELECT create_distributed_table('events','tenant_id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO events VALUES (1, 1, 'push');
INSERT INTO events VALUES (2, 2, 'push');
-- create a security invoker view with underlying distributed table
-- the view will be distributed with security_invoker option as well
CREATE VIEW sec_invoker_view WITH (security_invoker=true) AS SELECT * FROM events;
\c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class
WHERE relname = 'sec_invoker_view' AND relnamespace = 'pg15'::regnamespace;
relname | reloptions
---------------------------------------------------------------------
sec_invoker_view | {security_invoker=true}
(1 row)
\c - - - :master_port
SET search_path TO pg15;
-- test altering the security_invoker flag
ALTER VIEW sec_invoker_view SET (security_invoker = false);
\c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class
WHERE relname = 'sec_invoker_view' AND relnamespace = 'pg15'::regnamespace;
relname | reloptions
---------------------------------------------------------------------
sec_invoker_view | {security_invoker=false}
(1 row)
\c - - - :master_port
SET search_path TO pg15;
ALTER VIEW sec_invoker_view SET (security_invoker = true);
-- create a new user but don't give select permission to events table
-- only give select permission to the view
CREATE ROLE rls_tenant_1 WITH LOGIN;
GRANT USAGE ON SCHEMA pg15 TO rls_tenant_1;
GRANT SELECT ON sec_invoker_view TO rls_tenant_1;
-- this user shouldn't be able to query the view
-- because the view is security invoker
-- which means it will check the invoker's rights
-- against the view's underlying tables
SET ROLE rls_tenant_1;
SELECT * FROM sec_invoker_view ORDER BY event_id;
ERROR: permission denied for table events
RESET ROLE;
-- now grant select on the underlying distributed table
-- and try again
-- now it should work!
GRANT SELECT ON TABLE events TO rls_tenant_1;
SET ROLE rls_tenant_1;
SELECT * FROM sec_invoker_view ORDER BY event_id;
tenant_id | event_id | descr
---------------------------------------------------------------------
1 | 1 | push
2 | 2 | push
(2 rows)
RESET ROLE;
-- Enable row level security
ALTER TABLE events ENABLE ROW LEVEL SECURITY;
-- Create policy for tenants to read access their own rows
CREATE POLICY user_mod ON events
FOR SELECT TO rls_tenant_1
USING (current_user = 'rls_tenant_' || tenant_id::text);
-- all rows should be visible because we are querying with
-- the table owner user now
SELECT * FROM sec_invoker_view ORDER BY event_id;
tenant_id | event_id | descr
---------------------------------------------------------------------
1 | 1 | push
2 | 2 | push
(2 rows)
-- Switch user that has been granted rights,
-- should be able to see rows that the policy allows
SET ROLE rls_tenant_1;
SELECT * FROM sec_invoker_view ORDER BY event_id;
tenant_id | event_id | descr
---------------------------------------------------------------------
1 | 1 | push
(1 row)
RESET ROLE;
-- ordinary view on top of security invoker view permissions
-- ordinary means security definer view
-- The PG expected behavior is that this doesn't change anything!!!
-- Can't escape security invoker views by defining a security definer view on top of it!
CREATE VIEW sec_definer_view AS SELECT * FROM sec_invoker_view ORDER BY event_id;
\c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class
WHERE relname = 'sec_definer_view' AND relnamespace = 'pg15'::regnamespace;
relname | reloptions
---------------------------------------------------------------------
sec_definer_view |
(1 row)
\c - - - :master_port
SET search_path TO pg15;
CREATE ROLE rls_tenant_2 WITH LOGIN;
GRANT USAGE ON SCHEMA pg15 TO rls_tenant_2;
GRANT SELECT ON sec_definer_view TO rls_tenant_2;
-- it doesn't matter that the parent view is security definer
-- still the security invoker view will check the invoker's permissions
-- and will not allow rls_tenant_2 to query the view
SET ROLE rls_tenant_2;
SELECT * FROM sec_definer_view ORDER BY event_id;
ERROR: permission denied for table events
RESET ROLE;
-- grant select rights to rls_tenant_2
GRANT SELECT ON TABLE events TO rls_tenant_2;
-- we still have row level security so rls_tenant_2
-- will be able to query but won't be able to see anything
SET ROLE rls_tenant_2;
SELECT * FROM sec_definer_view ORDER BY event_id;
tenant_id | event_id | descr
---------------------------------------------------------------------
(0 rows)
RESET ROLE;
-- give some rights to rls_tenant_2
CREATE POLICY user_mod_1 ON events
FOR SELECT TO rls_tenant_2
USING (current_user = 'rls_tenant_' || tenant_id::text);
-- Row level security will be applied as well! We are safe!
SET ROLE rls_tenant_2;
SELECT * FROM sec_definer_view ORDER BY event_id;
tenant_id | event_id | descr
---------------------------------------------------------------------
2 | 2 | push
(1 row)
RESET ROLE;
-- no need to test updatable views because they are currently not
-- supported in Citus when the query view contains citus tables
UPDATE sec_invoker_view SET event_id = 5;
ERROR: cannot modify views when the query contains citus tables
--
-- Not allow ON DELETE/UPDATE SET DEFAULT actions on columns that
-- default to sequences
-- Adding a special test here since in PG15 we can
-- specify column list for foreign key ON DELETE SET actions
-- Relevant PG commit:
-- d6f96ed94e73052f99a2e545ed17a8b2fdc1fb8a
--
CREATE TABLE set_on_default_test_referenced(
col_1 int, col_2 int, col_3 int, col_4 int,
unique (col_1, col_3)
);
SELECT create_reference_table('set_on_default_test_referenced');
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE set_on_default_test_referencing(
col_1 int, col_2 int, col_3 serial, col_4 int,
FOREIGN KEY(col_1, col_3)
REFERENCES set_on_default_test_referenced(col_1, col_3)
ON DELETE SET DEFAULT (col_1)
ON UPDATE SET DEFAULT
);
-- should error since col_3 defaults to a sequence
SELECT create_reference_table('set_on_default_test_referencing');
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
DROP TABLE set_on_default_test_referencing;
CREATE TABLE set_on_default_test_referencing(
col_1 int, col_2 int, col_3 serial, col_4 int,
FOREIGN KEY(col_1, col_3)
REFERENCES set_on_default_test_referenced(col_1, col_3)
ON DELETE SET DEFAULT (col_1)
);
-- should not error since this doesn't set any sequence based columns to default
SELECT create_reference_table('set_on_default_test_referencing');
create_reference_table
---------------------------------------------------------------------
(1 row)
INSERT INTO set_on_default_test_referenced (col_1, col_3) VALUES (1, 1);
INSERT INTO set_on_default_test_referencing (col_1, col_3) VALUES (1, 1);
DELETE FROM set_on_default_test_referenced;
SELECT * FROM set_on_default_test_referencing ORDER BY 1,2;
col_1 | col_2 | col_3 | col_4
---------------------------------------------------------------------
| | 1 |
(1 row)
DROP TABLE set_on_default_test_referencing;
SET client_min_messages to ERROR;
SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
-- this works around bug #6476: the CREATE TABLE below will
-- self-deadlock on PG15 if it also replicates reference
-- tables to the coordinator.
SELECT replicate_reference_tables(shard_transfer_mode := 'block_writes');
replicate_reference_tables
---------------------------------------------------------------------
(1 row)
-- should error since col_3 defaults to a sequence
CREATE TABLE set_on_default_test_referencing(
col_1 int, col_2 int, col_3 serial, col_4 int,
FOREIGN KEY(col_1, col_3)
REFERENCES set_on_default_test_referenced(col_1, col_3)
ON DELETE SET DEFAULT (col_3)
);
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
--
-- PG15 has suppressed some casts on constants when querying foreign tables
-- For example, we can use text to represent a type that's an enum on the remote side
-- A comparison on such a column will get shipped as "var = 'foo'::text"
-- But there's no enum = text operator on the remote side
-- If we leave off the explicit cast, the comparison will work
-- Test we behave in the same way with a Citus foreign table
-- Reminder: foreign tables cannot be distributed/reference, can only be Citus local
-- Relevant PG commit:
-- f8abb0f5e114d8c309239f0faa277b97f696d829
--
\set VERBOSITY terse
SET citus.next_shard_id TO 960200;
SET citus.enable_local_execution TO ON;
-- add the foreign table to metadata with the guc
SET citus.use_citus_managed_tables TO ON;
CREATE TYPE user_enum AS ENUM ('foo', 'bar', 'buz');
CREATE TABLE foreign_table_test (c0 integer NOT NULL, c1 user_enum);
INSERT INTO foreign_table_test VALUES (1, 'foo');
CREATE EXTENSION postgres_fdw;
CREATE SERVER foreign_server
FOREIGN DATA WRAPPER postgres_fdw
OPTIONS (host 'localhost', port :'master_port', dbname 'regression');
CREATE USER MAPPING FOR CURRENT_USER
SERVER foreign_server
OPTIONS (user 'postgres');
CREATE FOREIGN TABLE foreign_table (
c0 integer NOT NULL,
c1 text
)
SERVER foreign_server
OPTIONS (schema_name 'pg15', table_name 'foreign_table_test');
-- check that the foreign table is a citus local table
SELECT partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid = 'foreign_table'::regclass ORDER BY logicalrelid;
partmethod | repmodel
---------------------------------------------------------------------
n | s
(1 row)
-- same tests as in the relevant PG commit
-- Check that Remote SQL in the EXPLAIN doesn't contain casting
EXPLAIN (VERBOSE, COSTS OFF)
SELECT * FROM foreign_table WHERE c1 = 'foo' LIMIT 1;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.c0, remote_scan.c1
Task Count: 1
Tasks Shown: All
-> Task
Query: SELECT c0, c1 FROM pg15.foreign_table_960201 foreign_table WHERE (c1 OPERATOR(pg_catalog.=) 'foo'::text) LIMIT 1
Node: host=localhost port=xxxxx dbname=regression
-> Foreign Scan on pg15.foreign_table_960201 foreign_table
Output: c0, c1
Remote SQL: SELECT c0, c1 FROM pg15.foreign_table_test WHERE ((c1 = 'foo')) LIMIT 1::bigint
(10 rows)
SELECT * FROM foreign_table WHERE c1 = 'foo' LIMIT 1;
c0 | c1
---------------------------------------------------------------------
1 | foo
(1 row)
-- Check that Remote SQL in the EXPLAIN doesn't contain casting
EXPLAIN (VERBOSE, COSTS OFF)
SELECT * FROM foreign_table WHERE 'foo' = c1 LIMIT 1;
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
Output: remote_scan.c0, remote_scan.c1
Task Count: 1
Tasks Shown: All
-> Task
Query: SELECT c0, c1 FROM pg15.foreign_table_960201 foreign_table WHERE ('foo'::text OPERATOR(pg_catalog.=) c1) LIMIT 1
Node: host=localhost port=xxxxx dbname=regression
-> Foreign Scan on pg15.foreign_table_960201 foreign_table
Output: c0, c1
Remote SQL: SELECT c0, c1 FROM pg15.foreign_table_test WHERE (('foo' = c1)) LIMIT 1::bigint
(10 rows)
SELECT * FROM foreign_table WHERE 'foo' = c1 LIMIT 1;
c0 | c1
---------------------------------------------------------------------
1 | foo
(1 row)
-- we declared c1 to be text locally, but it's still the same type on
-- the remote which will balk if we try to do anything incompatible
-- with that remote type
SELECT * FROM foreign_table WHERE c1 LIKE 'foo' LIMIT 1; -- ERROR
ERROR: operator does not exist: pg15.user_enum ~~ unknown
SELECT * FROM foreign_table WHERE c1::text LIKE 'foo' LIMIT 1; -- ERROR; cast not pushed down
ERROR: operator does not exist: pg15.user_enum ~~ unknown
-- Clean up foreign table test
RESET citus.use_citus_managed_tables;
SELECT undistribute_table('foreign_table');
NOTICE: creating a new table for pg15.foreign_table
NOTICE: dropping the old pg15.foreign_table
NOTICE: renaming the new table to pg15.foreign_table
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT undistribute_table('foreign_table_test');
NOTICE: creating a new table for pg15.foreign_table_test
NOTICE: moving the data of pg15.foreign_table_test
NOTICE: dropping the old pg15.foreign_table_test
NOTICE: renaming the new table to pg15.foreign_table_test
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT 1 FROM citus_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
DROP SERVER foreign_server CASCADE;
NOTICE: drop cascades to 2 other objects
-- PG15 now supports specifying oid on CREATE DATABASE
-- verify that we print meaningful notice messages.
CREATE DATABASE db_with_oid OID 987654;
NOTICE: Citus partially supports CREATE DATABASE for distributed databases
DROP DATABASE db_with_oid;
-- SET ACCESS METHOD
-- Create a heap2 table am handler with heapam handler
CREATE ACCESS METHOD heap2 TYPE TABLE HANDLER heap_tableam_handler;
SELECT run_command_on_workers($$CREATE ACCESS METHOD heap2 TYPE TABLE HANDLER heap_tableam_handler$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ACCESS METHOD")
(localhost,57638,t,"CREATE ACCESS METHOD")
(2 rows)
CREATE TABLE mx_ddl_table2 (
key int primary key,
value int
);
SELECT create_distributed_table('mx_ddl_table2', 'key', 'hash', shard_count=> 4);
create_distributed_table
---------------------------------------------------------------------
(1 row)
ALTER TABLE mx_ddl_table2 SET ACCESS METHOD heap2;
DROP TABLE mx_ddl_table2;
DROP ACCESS METHOD heap2;
SELECT run_command_on_workers($$DROP ACCESS METHOD heap2$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP ACCESS METHOD")
(localhost,57638,t,"DROP ACCESS METHOD")
(2 rows)
-- Clean up
\set VERBOSITY terse
SET client_min_messages TO ERROR;
DROP SCHEMA pg15 CASCADE;
DROP ROLE rls_tenant_1;
DROP ROLE rls_tenant_2;