-- -- MULTI_COPY -- -- set file paths \set customer1datafile :abs_srcdir '/data/customer.1.data' \set customer2datafile :abs_srcdir '/data/customer.2.data' \set customer3datafile :abs_srcdir '/data/customer.3.data' \set lineitem1datafile :abs_srcdir '/data/lineitem.1.data' ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 560000; -- Create a new hash-partitioned table into which to COPY CREATE TABLE customer_copy_hash ( c_custkey integer, c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117), primary key (c_custkey)); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('customer_copy_hash', 'c_custkey', shard_count:=64); -- Test empty copy COPY customer_copy_hash FROM STDIN; \. -- Test syntax error COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; 1,customer1 2,customer2, notinteger,customernot \. -- Test invalid option COPY customer_copy_hash (c_custkey,c_name) FROM STDIN (append_to_shard 1); -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; -- Test primary key violation COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 1,customer1 2,customer2 2,customer2 \. -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; -- Test headers option COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', HEADER true, FORCE_NULL (c_custkey)); # header 1,customer1 2,customer2 3,customer3 \. -- Confirm that only first row was skipped SELECT count(*) FROM customer_copy_hash; -- Test force_not_null option COPY customer_copy_hash (c_custkey, c_name, c_address) FROM STDIN WITH (FORMAT 'csv', QUOTE '"', FORCE_NOT_NULL (c_address)); "4","customer4","" \. -- Confirm that value is not null SELECT count(c_address) FROM customer_copy_hash WHERE c_custkey = 4; -- Test force_null option COPY customer_copy_hash (c_custkey, c_name, c_address) FROM STDIN WITH (FORMAT 'csv', QUOTE '"', FORCE_NULL (c_address)); "5","customer5","" \. -- Confirm that value is null SELECT count(c_address) FROM customer_copy_hash WHERE c_custkey = 5; -- Test null violation COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 6,customer6 7,customer7 8, \. -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; -- Test server-side copy from program COPY customer_copy_hash (c_custkey, c_name) FROM PROGRAM 'echo 9 customer9' WITH (DELIMITER ' '); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash WHERE c_custkey = 9; -- Test server-side copy from file \set client_side_copy_command '\\copy customer_copy_hash FROM ' :'customer2datafile' ' WITH (DELIMITER '''|''');' :client_side_copy_command -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash; -- Test client-side copy from file -- \copy does not support variable interpolation. Hence we store and execute -- the query in two steps for interpolation to kick in. -- See https://stackoverflow.com/a/67642094/4576416 for details. \set client_side_copy_command '\\copy customer_copy_hash FROM ' :'customer3datafile' ' WITH (DELIMITER '''|''');' :client_side_copy_command -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash; -- Update shard statistics for hash-partitioned table SELECT citus_update_shard_statistics(560000); SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560000; -- Create a new hash-partitioned table with default now() function CREATE TABLE customer_with_default( c_custkey integer, c_name varchar(25) not null, c_time timestamp default now()); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('customer_with_default', 'c_custkey', shard_count:=64); -- Test with default values for now() function COPY customer_with_default (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 1,customer1 2,customer2 \. -- Confirm that data was copied with now() function SELECT count(*) FROM customer_with_default where c_time IS NOT NULL; -- Add columns to the table and perform a COPY ALTER TABLE customer_copy_hash ADD COLUMN extra1 INT DEFAULT 0; ALTER TABLE customer_copy_hash ADD COLUMN extra2 INT DEFAULT 0; COPY customer_copy_hash (c_custkey, c_name, extra1, extra2) FROM STDIN CSV; 10,customer10,1,5 \. SELECT * FROM customer_copy_hash WHERE extra1 = 1; -- Test dropping an intermediate column ALTER TABLE customer_copy_hash DROP COLUMN extra1; COPY customer_copy_hash (c_custkey, c_name, extra2) FROM STDIN CSV; 11,customer11,5 \. SELECT * FROM customer_copy_hash WHERE c_custkey = 11; -- Test dropping the last column ALTER TABLE customer_copy_hash DROP COLUMN extra2; COPY customer_copy_hash (c_custkey, c_name) FROM STDIN CSV; 12,customer12 \. SELECT * FROM customer_copy_hash WHERE c_custkey = 12; -- Create a new range-partitioned table into which to COPY CREATE TABLE customer_copy_range ( c_custkey integer, c_name varchar(25), c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117), primary key (c_custkey)); SELECT create_distributed_table('customer_copy_range', 'c_custkey', 'range'); -- Test COPY into empty range-partitioned table \set client_side_copy_command '\\copy customer_copy_range FROM ' :'customer1datafile' ' WITH (DELIMITER '''|''');' :client_side_copy_command SELECT master_create_empty_shard('customer_copy_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('customer_copy_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1000 WHERE shardid = :new_shard_id; -- Test copy into range-partitioned table \set client_side_copy_command '\\copy customer_copy_range FROM ' :'customer1datafile' ' WITH (DELIMITER '''|''');' :client_side_copy_command -- Check whether data went into the right shard (maybe) SELECT min(c_custkey), max(c_custkey), avg(c_custkey), count(*) FROM customer_copy_range WHERE c_custkey <= 500; -- Check whether data was copied SELECT count(*) FROM customer_copy_range; SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_shard_id; -- Update shard statistics for range-partitioned shard SELECT citus_update_shard_statistics(:new_shard_id); SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_shard_id; -- Create a new append-partitioned table into which to COPY CREATE TABLE customer_copy_append ( c_custkey integer, c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117)); SELECT create_distributed_table('customer_copy_append', 'c_custkey', 'append'); SET citus.shard_replication_factor TO 2; -- Test syntax error BEGIN; SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); 1,customer1 2,customer2 notinteger,customernot \. END; -- Test that no shard is created for failing copy SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; -- Test empty copy BEGIN; SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset COPY customer_copy_append FROM STDIN WITH (append_to_shard :shardid); \. END; -- Test that a shard is created SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; -- Test proper copy BEGIN; SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); 1,customer1 2,customer2 \. END; -- Check whether data was copied properly SELECT * FROM customer_copy_append; -- Manipulate manipulate and check shard statistics for append-partitioned table shard UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560132; SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560132; -- Update shard statistics for append-partitioned shard SELECT citus_update_shard_statistics(560132); SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560132; -- Create lineitem table CREATE TABLE lineitem_copy_append ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null); SELECT create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append'); BEGIN; SELECT master_create_empty_shard('lineitem_copy_append') AS shardid \gset \set client_side_copy_command '\\copy lineitem_copy_append FROM ' :'lineitem1datafile' ' with (delimiter '''|''', append_to_shard ' :shardid ');' :client_side_copy_command END; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::regclass; -- trigger some errors on the append_to_shard option \set client_side_copy_command '\\copy lineitem_copy_append FROM ' :'lineitem1datafile' ' with (delimiter '''|''', append_to_shard 1);' :client_side_copy_command \set client_side_copy_command '\\copy lineitem_copy_append FROM ' :'lineitem1datafile' ' with (delimiter '''|''', append_to_shard 560000);' :client_side_copy_command -- Test schema support on append partitioned tables CREATE SCHEMA append; CREATE TABLE append.customer_copy ( c_custkey integer , c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117)); SELECT create_distributed_table('append.customer_copy', 'c_custkey', 'append'); SELECT master_create_empty_shard('append.customer_copy') AS shardid1 \gset SELECT master_create_empty_shard('append.customer_copy') AS shardid2 \gset -- Test copy from the master node \set client_side_copy_command '\\copy append.customer_copy FROM ' :'customer1datafile' ' with (delimiter '''|''', append_to_shard ' :shardid1 ');' :client_side_copy_command \set client_side_copy_command '\\copy append.customer_copy FROM ' :'customer2datafile' ' with (delimiter '''|''', append_to_shard ' :shardid2 ');' :client_side_copy_command -- Test the content of the table SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.customer_copy; -- Test with table name which contains special character CREATE TABLE "customer_with_special_\\_character"( c_custkey integer, c_name varchar(25) not null); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('"customer_with_special_\\_character"', 'c_custkey', 'hash', shard_count := 4); COPY "customer_with_special_\\_character" (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 1,customer1 2,customer2 \. -- Confirm that data was copied SELECT count(*) FROM "customer_with_special_\\_character"; -- Test with table name which starts with number CREATE TABLE "1_customer"( c_custkey integer, c_name varchar(25) not null); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('"1_customer"', 'c_custkey', 'hash', shard_count := 4); COPY "1_customer" (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 1,customer1 2,customer2 \. -- Confirm that data was copied SELECT count(*) FROM "1_customer"; -- Test COPY with types having different Oid at master and workers CREATE TYPE number_pack AS ( number1 integer, number2 integer ); CREATE TYPE super_number_pack AS ( packed_number1 number_pack, packed_number2 number_pack ); -- Test array of user-defined type with hash distribution CREATE TABLE packed_numbers_hash ( id integer, packed_numbers number_pack[] ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('packed_numbers_hash', 'id', 'hash', shard_count := 4); COPY (SELECT 1, ARRAY[ROW(42, 42), ROW(42, 42)]) TO :'temp_dir''copy_test_array_of_composite'; COPY packed_numbers_hash FROM :'temp_dir''copy_test_array_of_composite'; -- Verify data is actually copied SELECT * FROM packed_numbers_hash; -- Test composite type containing an element with different Oid with hash distribution CREATE TABLE super_packed_numbers_hash ( id integer, super_packed_number super_number_pack ); SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('super_packed_numbers_hash', 'id', 'hash', shard_count := 4); COPY (SELECT 1, ROW(ROW(42, 42), ROW(42, 42))) TO :'temp_dir''copy_test_composite_of_composite'; COPY super_packed_numbers_hash FROM :'temp_dir''copy_test_composite_of_composite'; -- Verify data is actually copied SELECT * FROM super_packed_numbers_hash; -- Test array of user-defined type with append distribution CREATE TABLE packed_numbers_append ( id integer, packed_numbers number_pack[] ); SELECT create_distributed_table('packed_numbers_append', 'id', 'append'); SELECT master_create_empty_shard('packed_numbers_append') AS shardid \gset COPY packed_numbers_append FROM :'temp_dir''copy_test_array_of_composite' WITH (append_to_shard :shardid); -- Verify data is actually copied SELECT * FROM packed_numbers_append; -- Test composite type containing an element with different Oid with append distribution CREATE TABLE super_packed_numbers_append ( id integer, super_packed_number super_number_pack ); SELECT create_distributed_table('super_packed_numbers_append', 'id', 'append'); SELECT master_create_empty_shard('super_packed_numbers_append') AS shardid \gset COPY super_packed_numbers_append FROM :'temp_dir''copy_test_composite_of_composite' WITH (append_to_shard :shardid); -- Verify data is actually copied SELECT * FROM super_packed_numbers_append; -- Test copy on append for composite type partition column CREATE TABLE composite_partition_column_table( id integer, composite_column number_pack ); SELECT create_distributed_table('composite_partition_column_table', 'composite_column', 'append'); SELECT master_create_empty_shard('composite_partition_column_table') AS shardid \gset COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); 1,"(1,1)" 2,"(2,2)" \. -- Test copy on append distributed tables do not create shards on removed workers SET citus.shard_replication_factor TO 2; CREATE TABLE numbers_append (a int, b int); SELECT create_distributed_table('numbers_append', 'a', 'append'); -- no shards is created yet SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1); 1,1 2,2 \. COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2); 4,6 \. -- verify there are shards at both workers SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; -- disable the first node SET client_min_messages TO ERROR; \set VERBOSITY terse SELECT master_disable_node('localhost', :worker_1_port); SELECT public.wait_until_metadata_sync(30000); RESET client_min_messages; \set VERBOSITY default -- set replication factor to 1 so that copy will -- succeed without replication count error SET citus.shard_replication_factor TO 1; -- add two new shards and verify they are created at the other node SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1); 5,7 6,8 \. COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2); 7,9 8,10 \. SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; -- add the node back SET client_min_messages TO ERROR; SELECT 1 FROM master_activate_node('localhost', :worker_1_port); RESET client_min_messages; RESET citus.shard_replication_factor; -- add two new shards and verify they are created at both workers SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1); 9,11 10,12 \. COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2); 11,13 12,14 \. SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; DROP TABLE numbers_append; -- Test copy failures against connection failures -- create and switch to test user CREATE USER test_user; \c - test_user SET citus.shard_count to 4; CREATE TABLE numbers_hash (a int, b int); SELECT create_distributed_table('numbers_hash', 'a', colocate_with:='none'); COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 3,3 4,4 5,5 6,6 7,7 8,8 \. -- verify each placement is active SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by shardid, nodeport; -- create a reference table CREATE TABLE numbers_reference(a int, b int); SELECT create_reference_table('numbers_reference'); COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 \. -- create another hash distributed table CREATE TABLE numbers_hash_other(a int, b int); SELECT create_distributed_table('numbers_hash_other', 'a', colocate_with:='numbers_hash'); SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash_other'::regclass order by shardid, nodeport; -- manually corrupt pg_dist_shard such that both copies of one shard is placed in -- worker_1+10. This is to test the behavior when no replica of a shard is accessible. -- Whole copy operation is supposed to fail and rollback. We use session_replication_role -- trick to disable the triggers on the pg_dist_shard_placement view \c - :default_user SET session_replication_role = replica; UPDATE pg_dist_shard_placement SET nodeport = :worker_1_port+10 WHERE shardid = 560176 and nodeport = :worker_2_port; SET session_replication_role = DEFAULT; -- disable test_user on the first worker \c - :default_user - :worker_1_port SET citus.enable_metadata_sync TO off; ALTER USER test_user WITH nologin; \c - test_user - :master_port -- reissue copy, and it should fail COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); -- verify shards in the none of the workers as marked invalid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by shardid, nodeport; -- try to insert into a reference table copy should fail COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); -- verify shards for reference table are still valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_reference'::regclass order by placementid; -- try to insert into numbers_hash_other. copy should fail and rollback -- since it can not insert into either copies of a shard. shards are expected to -- stay valid since the operation is rolled back. COPY numbers_hash_other FROM STDIN WITH (FORMAT 'csv'); -- verify shards for numbers_hash_other are still valid -- since copy has failed altogether SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash_other'::regclass order by shardid, nodeport; -- re-enable test_user on the first worker \c - :default_user - :worker_1_port SET citus.enable_metadata_sync TO off; ALTER USER test_user WITH login; \c - test_user - :master_port DROP TABLE numbers_hash; DROP TABLE numbers_hash_other; DROP TABLE numbers_reference; \c - :default_user -- test copy failure inside the node -- it will be done by changing definition of a shard table SET citus.shard_count to 4; SET citus.next_shard_id TO 560170; CREATE TABLE numbers_hash(a int, b int); SELECT create_distributed_table('numbers_hash', 'a', colocate_with:='none'); \c - - - :worker_1_port ALTER TABLE numbers_hash_560170 DROP COLUMN b; \c - - - :master_port -- operation will fail to modify a shard and roll back COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 3,3 4,4 5,5 6,6 7,7 8,8 \. -- verify no row is inserted SELECT count(a) FROM numbers_hash; -- verify shard is still marked as valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by shardid, nodeport; DROP TABLE numbers_hash; DROP USER test_user; -- Test copy with built-in type without binary output function CREATE TABLE test_binaryless_builtin ( col1 aclitem NOT NULL, col2 character varying(255) NOT NULL ); SELECT create_reference_table('test_binaryless_builtin'); \COPY test_binaryless_builtin FROM STDIN WITH (format CSV) user postgres=r/postgres, test \. SELECT * FROM test_binaryless_builtin; DROP TABLE test_binaryless_builtin; -- Test drop table with copy in the same transaction BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); \copy tt1 from STDIN; 1 2 \. DROP TABLE tt1; END; -- Test dropping a column in front of the partition column CREATE TABLE drop_copy_test_table (col1 int, col2 int, col3 int, col4 int); SELECT create_distributed_table('drop_copy_test_table','col3'); ALTER TABLE drop_copy_test_table drop column col1; COPY drop_copy_test_table (col2,col3,col4) from STDIN with CSV; ,1, ,2, \. SELECT * FROM drop_copy_test_table WHERE col3 = 1; ALTER TABLE drop_copy_test_table drop column col4; COPY drop_copy_test_table (col2,col3) from STDIN with CSV; ,1 ,2 \. SELECT * FROM drop_copy_test_table WHERE col3 = 1; DROP TABLE drop_copy_test_table; -- There should be no "tt1" shard on the worker nodes \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'tt1%'; \c - - - :master_port -- copy >8MB to a single worker to trigger a flush in PutRemoteCopyData BEGIN; CREATE UNLOGGED TABLE trigger_flush AS SELECT 1 AS a, s AS b, s AS c, s AS d, s AS e, s AS f, s AS g, s AS h FROM generate_series(1,150000) s; SELECT create_distributed_table('trigger_flush','a'); ABORT; -- trigger switch-over when using single connection per worker BEGIN; SET citus.shard_count TO 3; SET citus.multi_shard_modify_mode TO 'sequential'; CREATE UNLOGGED TABLE trigger_switchover(a int, b int, c int, d int, e int, f int, g int, h int); SELECT create_distributed_table('trigger_switchover','a'); INSERT INTO trigger_switchover SELECT s AS a, s AS b, s AS c, s AS d, s AS e, s AS f, s AS g, s AS h FROM generate_series(1,250000) s; ABORT; -- copy into a table with a JSONB column CREATE TABLE copy_jsonb (key text, value jsonb, extra jsonb default '["default"]'::jsonb); SELECT create_distributed_table('copy_jsonb', 'key', colocate_with => 'none'); -- JSONB from text should work \COPY copy_jsonb (key, value) FROM STDIN blue {"r":0,"g":0,"b":255} green {"r":0,"g":255,"b":0} \. SELECT * FROM copy_jsonb ORDER BY key; -- JSONB from binary should work COPY copy_jsonb TO :'temp_dir''copy_jsonb.pgcopy' WITH (format binary); COPY copy_jsonb FROM :'temp_dir''copy_jsonb.pgcopy' WITH (format binary); SELECT * FROM copy_jsonb ORDER BY key; -- JSONB parsing error without validation: no line number \COPY copy_jsonb (key, value) FROM STDIN red {"r":255,"g":0,"b":0 \. TRUNCATE copy_jsonb; -- JSONB when there is a complex column should work. Complex columns force -- non binary copy format between master and workers. CREATE TYPE complex_for_copy_test AS (r double precision, i double precision); CREATE TABLE copy_jsonb_with_complex(key int, value_1 jsonb, value_2 complex_for_copy_test); SELECT create_distributed_table('copy_jsonb_with_complex', 'key'); \COPY copy_jsonb_with_complex FROM STDIN 1 {"f1": 3803, "f2": "v1", "f3": {"f4": 1}} (10,9) 2 {"f5": null, "f6": true} (20,19) \. SELECT * FROM copy_jsonb_with_complex ORDER BY key; DROP TABLE copy_jsonb_with_complex; DROP TYPE complex_for_copy_test; SET citus.skip_jsonb_validation_in_copy TO off; -- JSONB from text should work \COPY copy_jsonb (key, value) FROM STDIN blue {"r":0,"g":0,"b":255} green {"r":0,"g":255,"b":0} \. SELECT * FROM copy_jsonb ORDER BY key; -- JSONB from binary should work COPY copy_jsonb TO :'temp_dir''copy_jsonb.pgcopy' WITH (format binary); COPY copy_jsonb FROM :'temp_dir''copy_jsonb.pgcopy' WITH (format binary); SELECT * FROM copy_jsonb ORDER BY key; -- JSONB parsing error with validation: should see line number \COPY copy_jsonb (key, value) FROM STDIN red {"r":255,"g":0,"b":0 \. DROP TABLE copy_jsonb;