From 12c626884546a59fd66cb16f029ef12a859261d4 Mon Sep 17 00:00:00 2001 From: naisila Date: Sun, 7 Dec 2025 22:06:04 +0300 Subject: [PATCH] Cleanup from dropping PG14 --- .../create_drop_database_propagation_pg15.out | 10 - ...reate_drop_database_propagation_pg15_0.out | 9 - .../regress/expected/merge_unsupported.out | 7 - .../regress/expected/merge_unsupported_0.out | 7 - .../regress/expected/merge_unsupported_1.out | 17 - .../expected/multi_metadata_sync_0.out | 2264 --------------- .../multi_mx_insert_select_repartition_0.out | 167 -- src/test/regress/expected/publication_0.out | 276 -- src/test/regress/expected/single_node_0.out | 2582 ----------------- .../create_drop_database_propagation_pg15.sql | 11 - src/test/regress/sql/merge_unsupported.sql | 9 - 11 files changed, 5359 deletions(-) delete mode 100644 src/test/regress/expected/create_drop_database_propagation_pg15_0.out delete mode 100644 src/test/regress/expected/merge_unsupported_1.out delete mode 100644 src/test/regress/expected/multi_metadata_sync_0.out delete mode 100644 src/test/regress/expected/multi_mx_insert_select_repartition_0.out delete mode 100644 src/test/regress/expected/publication_0.out delete mode 100644 src/test/regress/expected/single_node_0.out diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15.out b/src/test/regress/expected/create_drop_database_propagation_pg15.out index 9a501558a..e1646dac6 100644 --- a/src/test/regress/expected/create_drop_database_propagation_pg15.out +++ b/src/test/regress/expected/create_drop_database_propagation_pg15.out @@ -1,13 +1,3 @@ --- --- PG15 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q -\endif -- create/drop database for pg >= 15 set citus.enable_create_database_propagation=on; CREATE DATABASE mydatabase diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15_0.out b/src/test/regress/expected/create_drop_database_propagation_pg15_0.out deleted file mode 100644 index b1ed9cc5b..000000000 --- a/src/test/regress/expected/create_drop_database_propagation_pg15_0.out +++ /dev/null @@ -1,9 +0,0 @@ --- --- PG15 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q diff --git a/src/test/regress/expected/merge_unsupported.out b/src/test/regress/expected/merge_unsupported.out index 62f51a679..af465d3a9 100644 --- a/src/test/regress/expected/merge_unsupported.out +++ b/src/test/regress/expected/merge_unsupported.out @@ -2,7 +2,6 @@ SHOW server_version \gset SELECT CASE WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+' WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16' - WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14' ELSE 'Unsupported version' END AS version_category; version_category @@ -10,12 +9,6 @@ SELECT CASE 17+ (1 row) -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q -\endif -- -- MERGE test from PG community (adapted to Citus by converting all tables to Citus local) -- diff --git a/src/test/regress/expected/merge_unsupported_0.out b/src/test/regress/expected/merge_unsupported_0.out index b788c1670..e322a0f1e 100644 --- a/src/test/regress/expected/merge_unsupported_0.out +++ b/src/test/regress/expected/merge_unsupported_0.out @@ -2,7 +2,6 @@ SHOW server_version \gset SELECT CASE WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+' WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16' - WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14' ELSE 'Unsupported version' END AS version_category; version_category @@ -10,12 +9,6 @@ SELECT CASE 15_16 (1 row) -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q -\endif -- -- MERGE test from PG community (adapted to Citus by converting all tables to Citus local) -- diff --git a/src/test/regress/expected/merge_unsupported_1.out b/src/test/regress/expected/merge_unsupported_1.out deleted file mode 100644 index 187c5d630..000000000 --- a/src/test/regress/expected/merge_unsupported_1.out +++ /dev/null @@ -1,17 +0,0 @@ -SHOW server_version \gset -SELECT CASE - WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+' - WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16' - WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14' - ELSE 'Unsupported version' - END AS version_category; - version_category ---------------------------------------------------------------------- - 14 -(1 row) - -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q diff --git a/src/test/regress/expected/multi_metadata_sync_0.out b/src/test/regress/expected/multi_metadata_sync_0.out deleted file mode 100644 index 5ff926ff6..000000000 --- a/src/test/regress/expected/multi_metadata_sync_0.out +++ /dev/null @@ -1,2264 +0,0 @@ --- --- MULTI_METADATA_SYNC --- --- this test has different output for PG14 compared to PG15 --- In PG15, public schema is owned by pg_database_owner role --- Relevant PG commit: b073c3ccd06e4cb845e121387a43faa8c68a7b62 -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15; - server_version_ge_15 ---------------------------------------------------------------------- - f -(1 row) - --- Tests for metadata snapshot functions, metadata syncing functions and propagation of --- metadata changes to MX tables. --- Turn metadata sync off at first -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; -ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 2; -SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id -\gset -ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000; -SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset -SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset --- Create the necessary test utility function -SET citus.enable_metadata_sync TO OFF; -CREATE FUNCTION activate_node_snapshot() - RETURNS text[] - LANGUAGE C STRICT - AS 'citus'; -RESET citus.enable_metadata_sync; -COMMENT ON FUNCTION activate_node_snapshot() - IS 'commands to activate node snapshot'; --- Show that none of the existing tables are qualified to be MX tables -SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s'; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted ---------------------------------------------------------------------- -(0 rows) - --- Since password_encryption default has been changed to sha from md5 with PG14 --- we are updating it manually just for consistent test results between PG versions. -ALTER SYSTEM SET password_encryption TO md5; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(0.1); - pg_sleep ---------------------------------------------------------------------- - -(1 row) - -SET client_min_messages TO ERROR; -ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword'; -RESET client_min_messages; --- Show that, with no MX tables, activate node snapshot contains only the delete commands, --- pg_dist_node entries, pg_dist_object entries and roles. -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; -(33 rows) - --- Create a test table with constraints and SERIAL and default from user defined sequence -CREATE SEQUENCE user_defined_seq; -CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq')); -set citus.shard_count to 8; -set citus.shard_replication_factor to 1; -SELECT create_distributed_table('mx_test_table', 'col_1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -reset citus.shard_count; --- Set the replication model of the test table to streaming replication so that it is --- considered as an MX table -UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass; --- add a single shard table and verify the creation commands are included in the activate node snapshot -CREATE TABLE single_shard_tbl(a int); -SELECT create_distributed_table('single_shard_tbl', null); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO single_shard_tbl VALUES (1); -reset citus.shard_replication_factor; --- Show that the created MX table is and its sequences are included in the activate node snapshot -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE public.mx_test_table OWNER TO postgres - ALTER TABLE public.single_shard_tbl OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - CREATE TABLE public.single_shard_tbl (a integer) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS public.mx_test_table CASCADE - DROP TABLE IF EXISTS public.single_shard_tbl CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal.add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency('public.single_shard_tbl'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('public.mx_test_table') - SELECT worker_create_truncate_trigger('public.single_shard_tbl') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(61 rows) - --- Drop single shard table -DROP TABLE single_shard_tbl; --- Show that CREATE INDEX commands are included in the activate node snapshot -CREATE INDEX mx_index ON mx_test_table(col_2); -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE public.mx_test_table OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2) - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS public.mx_test_table CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('public.mx_test_table') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(52 rows) - --- Show that schema changes are included in the activate node snapshot -CREATE SCHEMA mx_testing_schema; -ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) - CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(54 rows) - --- Show that append distributed tables are not included in the activate node snapshot -CREATE TABLE non_mx_test_table (col_1 int, col_2 text); -SELECT create_distributed_table('non_mx_test_table', 'col_1', 'append'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) - CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(54 rows) - --- Show that range distributed tables are not included in the activate node snapshot -UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) - CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(54 rows) - --- Test start_metadata_sync_to_node and citus_activate_node UDFs --- Ensure that hasmetadata=false for all nodes except for the coordinator node -SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- Show that metadata can not be synced on secondary node -SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset -SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); - master_add_node ---------------------------------------------------------------------- - 5 -(1 row) - -SELECT start_metadata_sync_to_node('localhost', 8888); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; - hasmetadata ---------------------------------------------------------------------- - f -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', 8888); -NOTICE: (localhost,8888) is a secondary node: to clear the metadata, you should clear metadata from the primary node - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; - hasmetadata ---------------------------------------------------------------------- - f -(1 row) - --- Add a node to another cluster to make sure it's also synced -SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); - master_add_secondary_node ---------------------------------------------------------------------- - 6 -(1 row) - -\c - - - :master_port --- Run start_metadata_sync_to_node and citus_activate_node and check that it marked hasmetadata for that worker -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; - nodeid | hasmetadata ---------------------------------------------------------------------- - 2 | t -(1 row) - --- Check that the metadata has been copied to the worker -\c - - - :worker_1_port -SELECT * FROM pg_dist_local_group; - groupid ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------------------------------------------------------------------- - 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f - 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t - 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t - 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t - 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t -(5 rows) - -SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted ---------------------------------------------------------------------- - mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f -(1 row) - -SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- - mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 - mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 - mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 - mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 - mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 - mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 - mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 - mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 -(8 rows) - -SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------------------------------------------------------------- - 1310000 | 1 | 0 | localhost | 57637 | 100000 - 1310001 | 1 | 0 | localhost | 57638 | 100001 - 1310002 | 1 | 0 | localhost | 57637 | 100002 - 1310003 | 1 | 0 | localhost | 57638 | 100003 - 1310004 | 1 | 0 | localhost | 57637 | 100004 - 1310005 | 1 | 0 | localhost | 57638 | 100005 - 1310006 | 1 | 0 | localhost | 57637 | 100006 - 1310007 | 1 | 0 | localhost | 57638 | 100007 -(8 rows) - -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col_1 | integer | - col_2 | text | not null - col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) - col_4 | bigint | default nextval('user_defined_seq'::regclass) -(4 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_1 | integer | col_1 -(1 row) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_testing_schema.mx_index'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_2 | text | col_2 -(1 row) - --- Check that pg_dist_colocation is synced -SELECT * FROM pg_dist_colocation ORDER BY colocationid; - colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation ---------------------------------------------------------------------- - 2 | 8 | 1 | 23 | 0 -(1 row) - --- Make sure that truncate trigger has been set for the MX table on worker -SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- Make sure that citus_activate_node considers foreign key constraints -\c - - - :master_port --- Since we're superuser, we can set the replication model to 'streaming' to --- create some MX tables -SET citus.shard_replication_factor TO 1; -CREATE SCHEMA mx_testing_schema_2; -CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE(col1, col3)); -CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text, - FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1 (col1, col3)); -SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- Check that foreign key metadata exists on the worker -\c - - - :worker_1_port -SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass; - Constraint | Definition ---------------------------------------------------------------------- - fk_test_2_col1_col2_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) -(1 row) - -\c - - - :master_port -DROP TABLE mx_testing_schema_2.fk_test_2; -DROP TABLE mx_testing_schema.fk_test_1; -RESET citus.shard_replication_factor; --- Check that repeated calls to citus_activate_node has no side effects -\c - - - :master_port -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -\c - - - :worker_1_port -SELECT * FROM pg_dist_local_group; - groupid ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT * FROM pg_dist_node ORDER BY nodeid; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------------------------------------------------------------------- - 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f - 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t - 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t - 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t - 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t -(5 rows) - -SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted ---------------------------------------------------------------------- - mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varreturningtype 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f -(1 row) - -SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- - mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 - mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 - mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 - mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 - mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 - mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 - mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 - mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 -(8 rows) - -SELECT * FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%') ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------------------------------------------------------------- - 1310000 | 1 | 0 | localhost | 57637 | 100000 - 1310001 | 1 | 0 | localhost | 57638 | 100001 - 1310002 | 1 | 0 | localhost | 57637 | 100002 - 1310003 | 1 | 0 | localhost | 57638 | 100003 - 1310004 | 1 | 0 | localhost | 57637 | 100004 - 1310005 | 1 | 0 | localhost | 57638 | 100005 - 1310006 | 1 | 0 | localhost | 57637 | 100006 - 1310007 | 1 | 0 | localhost | 57638 | 100007 -(8 rows) - -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col_1 | integer | - col_2 | text | not null - col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) - col_4 | bigint | default nextval('user_defined_seq'::regclass) -(4 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_testing_schema.mx_test_table_col_1_key'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_1 | integer | col_1 -(1 row) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_testing_schema.mx_index'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_2 | text | col_2 -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- Make sure that citus_activate_node can be called inside a transaction and rollbacked -\c - - - :master_port -BEGIN; -SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -ROLLBACK; -SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; - hasmetadata ---------------------------------------------------------------------- - f -(1 row) - --- Check that the distributed table can be queried from the worker -\c - - - :master_port -SET citus.shard_replication_factor TO 1; -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -CREATE TABLE mx_query_test (a int, b text, c int); -SELECT create_distributed_table('mx_query_test', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE single_shard_tbl(a int); -SELECT create_distributed_table('single_shard_tbl', null); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO single_shard_tbl VALUES (1); -SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_query_test'::regclass; - repmodel ---------------------------------------------------------------------- - s -(1 row) - -INSERT INTO mx_query_test VALUES (1, 'one', 1); -INSERT INTO mx_query_test VALUES (2, 'two', 4); -INSERT INTO mx_query_test VALUES (3, 'three', 9); -INSERT INTO mx_query_test VALUES (4, 'four', 16); -INSERT INTO mx_query_test VALUES (5, 'five', 24); -\c - - - :worker_1_port -SELECT * FROM mx_query_test ORDER BY a; - a | b | c ---------------------------------------------------------------------- - 1 | one | 1 - 2 | two | 4 - 3 | three | 9 - 4 | four | 16 - 5 | five | 24 -(5 rows) - -INSERT INTO mx_query_test VALUES (6, 'six', 36); -UPDATE mx_query_test SET c = 25 WHERE a = 5; -SELECT * FROM single_shard_tbl ORDER BY a; - a ---------------------------------------------------------------------- - 1 -(1 row) - -INSERT INTO single_shard_tbl VALUES (2); -\c - - - :master_port -SELECT * FROM mx_query_test ORDER BY a; - a | b | c ---------------------------------------------------------------------- - 1 | one | 1 - 2 | two | 4 - 3 | three | 9 - 4 | four | 16 - 5 | five | 25 - 6 | six | 36 -(6 rows) - -SELECT * FROM single_shard_tbl ORDER BY a; - a ---------------------------------------------------------------------- - 1 - 2 -(2 rows) - -\c - - - :master_port -DROP TABLE mx_query_test; -DROP TABLE single_shard_tbl; --- Check that stop_metadata_sync_to_node function sets hasmetadata of the node to false -\c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata ---------------------------------------------------------------------- - t -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata ---------------------------------------------------------------------- - f -(1 row) - --- Test DDL propagation in MX tables -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SET citus.shard_count = 5; -CREATE SCHEMA mx_test_schema_1; -CREATE SCHEMA mx_test_schema_2; --- Create MX tables -SET citus.shard_replication_factor TO 1; -CREATE TABLE mx_test_schema_1.mx_table_1 (col1 int UNIQUE, col2 text); -CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1); -CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text); -CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2); -ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1); -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col1 | integer | - col2 | text | -(2 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_1.mx_table_1_col1_key'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col1 | integer | col1 -(1 row) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_1.mx_index_1'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col1 | integer | col1 -(1 row) - -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_2.mx_table_2'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col1 | integer | - col2 | text | -(2 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_2.mx_index_2'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col2 | text | col2 -(1 row) - -SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_2.mx_table_2'::regclass; - Constraint | Definition ---------------------------------------------------------------------- - mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1) -(1 row) - -SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table('mx_test_schema_2.mx_table_2', 'col1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- Check that created tables are marked as streaming replicated tables -SELECT - logicalrelid, repmodel -FROM - pg_dist_partition -WHERE - logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass - OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass -ORDER BY - logicalrelid::text; - logicalrelid | repmodel ---------------------------------------------------------------------- - mx_test_schema_1.mx_table_1 | s - mx_test_schema_2.mx_table_2 | s -(2 rows) - --- See the shards and placements of the mx tables -SELECT - logicalrelid, shardid, nodename, nodeport -FROM - pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE - logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass - OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass -ORDER BY - logicalrelid::text, shardid; - logicalrelid | shardid | nodename | nodeport ---------------------------------------------------------------------- - mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310023 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310024 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310025 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310026 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310027 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310028 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310029 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310030 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310031 | localhost | 57637 -(10 rows) - --- Check that metadata of MX tables exist on the metadata worker -\c - - - :worker_1_port --- Check that tables are created -\dt mx_test_schema_?.mx_table_? - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - mx_test_schema_1 | mx_table_1 | table | postgres - mx_test_schema_2 | mx_table_2 | table | postgres -(2 rows) - --- Check that table metadata are created -SELECT - logicalrelid, repmodel -FROM - pg_dist_partition -WHERE - logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass - OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass -ORDER BY - logicalrelid::text; - logicalrelid | repmodel ---------------------------------------------------------------------- - mx_test_schema_1.mx_table_1 | s - mx_test_schema_2.mx_table_2 | s -(2 rows) - --- Check that shard and placement data are created -SELECT - logicalrelid, shardid, nodename, nodeport -FROM - pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE - logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass - OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass -ORDER BY - logicalrelid::text, shardid; - logicalrelid | shardid | nodename | nodeport ---------------------------------------------------------------------- - mx_test_schema_1.mx_table_1 | 1310022 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310023 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310024 | localhost | 57637 - mx_test_schema_1.mx_table_1 | 1310025 | localhost | 57638 - mx_test_schema_1.mx_table_1 | 1310026 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310027 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310028 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310029 | localhost | 57637 - mx_test_schema_2.mx_table_2 | 1310030 | localhost | 57638 - mx_test_schema_2.mx_table_2 | 1310031 | localhost | 57637 -(10 rows) - --- Check that metadata of MX tables don't exist on the non-metadata worker -\c - - - :worker_2_port -\d mx_test_schema_1.mx_table_1 -\d mx_test_schema_2.mx_table_2 -SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_test_schema%'; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted ---------------------------------------------------------------------- -(0 rows) - -SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_test_schema%'; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- -(0 rows) - -SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; - shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------------------------------------------------------------- -(0 rows) - --- Check that CREATE INDEX statement is propagated -\c - - - :master_port -SET client_min_messages TO 'ERROR'; -CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1); -ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1); -\c - - - :worker_1_port -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_2.mx_index_3'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col1 | integer | col1 -(1 row) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_2.mx_table_2_col1_key'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col1 | integer | col1 -(1 row) - --- Check that DROP INDEX statement is propagated -\c - - - :master_port -DROP INDEX mx_test_schema_2.mx_index_3; -\c - - - :worker_1_port -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_test_schema_2.mx_index_3'::regclass; -ERROR: relation "mx_test_schema_2.mx_index_3" does not exist --- Check that ALTER TABLE statements are propagated -\c - - - :master_port -ALTER TABLE mx_test_schema_1.mx_table_1 ADD COLUMN col3 NUMERIC; -ALTER TABLE mx_test_schema_1.mx_table_1 ALTER COLUMN col3 SET DATA TYPE INT; -ALTER TABLE - mx_test_schema_1.mx_table_1 -ADD CONSTRAINT - mx_fk_constraint -FOREIGN KEY - (col1) -REFERENCES - mx_test_schema_2.mx_table_2(col1); -\c - - - :worker_1_port -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col1 | integer | - col2 | text | - col3 | integer | -(3 rows) - -SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Constraint | Definition ---------------------------------------------------------------------- - mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) -(1 row) - --- Check that foreign key constraint with NOT VALID works as well -\c - - - :master_port -ALTER TABLE mx_test_schema_1.mx_table_1 DROP CONSTRAINT mx_fk_constraint; -ALTER TABLE - mx_test_schema_1.mx_table_1 -ADD CONSTRAINT - mx_fk_constraint_2 -FOREIGN KEY - (col1) -REFERENCES - mx_test_schema_2.mx_table_2(col1) -NOT VALID; -\c - - - :worker_1_port -SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; - Constraint | Definition ---------------------------------------------------------------------- - mx_fk_constraint_2 | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) -(1 row) - --- Check that update_distributed_table_colocation call propagates the changes to the workers -\c - - - :master_port -SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gset -ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000; -SET citus.shard_count TO 7; -SET citus.shard_replication_factor TO 1; -CREATE TABLE mx_colocation_test_1 (a int); -SELECT create_distributed_table('mx_colocation_test_1', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE mx_colocation_test_2 (a int); -SELECT create_distributed_table('mx_colocation_test_2', 'a', colocate_with:='none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- Reset the colocation IDs of the test tables -DELETE FROM - pg_dist_colocation -WHERE EXISTS ( - SELECT 1 - FROM pg_dist_partition - WHERE - colocationid = pg_dist_partition.colocationid - AND pg_dist_partition.logicalrelid = 'mx_colocation_test_1'::regclass); --- Check the colocation IDs of the created tables -SELECT - logicalrelid, colocationid -FROM - pg_dist_partition -WHERE - logicalrelid = 'mx_colocation_test_1'::regclass - OR logicalrelid = 'mx_colocation_test_2'::regclass -ORDER BY logicalrelid::text; - logicalrelid | colocationid ---------------------------------------------------------------------- - mx_colocation_test_1 | 10000 - mx_colocation_test_2 | 10001 -(2 rows) - --- Update colocation and see the changes on the master and the worker -SELECT update_distributed_table_colocation('mx_colocation_test_1', colocate_with => 'mx_colocation_test_2'); - update_distributed_table_colocation ---------------------------------------------------------------------- - -(1 row) - -SELECT - logicalrelid, colocationid -FROM - pg_dist_partition -WHERE - logicalrelid = 'mx_colocation_test_1'::regclass - OR logicalrelid = 'mx_colocation_test_2'::regclass -ORDER BY - logicalrelid::text; - logicalrelid | colocationid ---------------------------------------------------------------------- - mx_colocation_test_1 | 10001 - mx_colocation_test_2 | 10001 -(2 rows) - -\c - - - :worker_1_port -SELECT - logicalrelid, colocationid -FROM - pg_dist_partition -WHERE - logicalrelid = 'mx_colocation_test_1'::regclass - OR logicalrelid = 'mx_colocation_test_2'::regclass -ORDER BY - logicalrelid::text; - logicalrelid | colocationid ---------------------------------------------------------------------- - mx_colocation_test_1 | 10001 - mx_colocation_test_2 | 10001 -(2 rows) - -\c - - - :master_port --- Check that DROP TABLE on MX tables works -DROP TABLE mx_colocation_test_1; -DROP TABLE mx_colocation_test_2; -\d mx_colocation_test_1 -\d mx_colocation_test_2 -\c - - - :worker_1_port -\d mx_colocation_test_1 -\d mx_colocation_test_2 --- Check that dropped MX table can be recreated again -\c - - - :master_port -SET citus.shard_count TO 7; -SET citus.shard_replication_factor TO 1; -CREATE TABLE mx_temp_drop_test (a int); -SELECT create_distributed_table('mx_temp_drop_test', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; - logicalrelid | repmodel ---------------------------------------------------------------------- - mx_temp_drop_test | s -(1 row) - -DROP TABLE mx_temp_drop_test; -CREATE TABLE mx_temp_drop_test (a int); -SELECT create_distributed_table('mx_temp_drop_test', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; - logicalrelid | repmodel ---------------------------------------------------------------------- - mx_temp_drop_test | s -(1 row) - -DROP TABLE mx_temp_drop_test; --- Check that MX tables can be created with SERIAL columns -\c - - - :master_port -SET citus.shard_count TO 3; -SET citus.shard_replication_factor TO 1; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - --- sync table with serial column after create_distributed_table -CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); -SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -DROP TABLE mx_table_with_small_sequence; --- Show that create_distributed_table works with a serial column -CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); -SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO mx_table_with_small_sequence VALUES (0); -\c - - - :worker_1_port --- Insert doesn't work because the defaults are of type int and smallint -INSERT INTO mx_table_with_small_sequence VALUES (1), (3); -ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint -\c - - - :master_port -SET citus.shard_replication_factor TO 1; --- Create an MX table with (BIGSERIAL) sequences -CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL); -SELECT create_distributed_table('mx_table_with_sequence', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO mx_table_with_sequence VALUES (0); -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - a | integer | - b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) - c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) -(3 rows) - -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_b_seq | sequence | postgres -(1 row) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_c_seq | sequence | postgres -(1 row) - --- Check that the sequences created on the metadata worker as well -\c - - - :worker_1_port -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - a | integer | - b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) - c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) -(3 rows) - -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_b_seq | sequence | postgres -(1 row) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_c_seq | sequence | postgres -(1 row) - --- Insert works because the defaults are of type bigint -INSERT INTO mx_table_with_sequence VALUES (1), (3); --- check that pg_depend records exist on the worker -SELECT refobjsubid FROM pg_depend -WHERE objid = 'mx_table_with_sequence_b_seq'::regclass AND refobjid = 'mx_table_with_sequence'::regclass; - refobjsubid ---------------------------------------------------------------------- - 2 -(1 row) - -SELECT refobjsubid FROM pg_depend -WHERE objid = 'mx_table_with_sequence_c_seq'::regclass AND refobjid = 'mx_table_with_sequence'::regclass; - refobjsubid ---------------------------------------------------------------------- - 3 -(1 row) - --- Check that the sequences on the worker have their own space -SELECT nextval('mx_table_with_sequence_b_seq'); - nextval ---------------------------------------------------------------------- - 281474976710659 -(1 row) - -SELECT nextval('mx_table_with_sequence_c_seq'); - nextval ---------------------------------------------------------------------- - 281474976710659 -(1 row) - --- Check that adding a new metadata node sets the sequence space correctly -\c - - - :master_port -SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -\c - - - :worker_2_port -SELECT groupid FROM pg_dist_local_group; - groupid ---------------------------------------------------------------------- - 2 -(1 row) - -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - a | integer | - b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) - c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) -(3 rows) - -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_b_seq | sequence | postgres -(1 row) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_table_with_sequence_c_seq | sequence | postgres -(1 row) - -SELECT nextval('mx_table_with_sequence_b_seq'); - nextval ---------------------------------------------------------------------- - 562949953421313 -(1 row) - -SELECT nextval('mx_table_with_sequence_c_seq'); - nextval ---------------------------------------------------------------------- - 562949953421313 -(1 row) - --- Insert doesn't work because the defaults are of type int and smallint -INSERT INTO mx_table_with_small_sequence VALUES (2), (4); -ERROR: nextval(sequence) calls in worker nodes are not supported for column defaults of type int or smallint --- Insert works because the defaults are of type bigint -INSERT INTO mx_table_with_sequence VALUES (2), (4); --- Check that dropping the mx table with sequences works as expected -\c - - - :master_port --- check our small sequence values -SELECT a, b, c FROM mx_table_with_small_sequence ORDER BY a,b,c; - a | b | c ---------------------------------------------------------------------- - 0 | 1 | 1 -(1 row) - ---check our bigint sequence values -SELECT a, b, c FROM mx_table_with_sequence ORDER BY a,b,c; - a | b | c ---------------------------------------------------------------------- - 0 | 1 | 1 - 1 | 281474976710657 | 281474976710657 - 2 | 562949953421314 | 562949953421314 - 3 | 281474976710658 | 281474976710658 - 4 | 562949953421315 | 562949953421315 -(5 rows) - --- Check that dropping the mx table with sequences works as expected -DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; -\d mx_table_with_sequence -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - --- Check that the sequences are dropped from the workers -\c - - - :worker_1_port -\d mx_table_with_sequence -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - --- Check that the sequences are dropped from the workers -\c - - - :worker_2_port -\ds mx_table_with_sequence_b_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - -\ds mx_table_with_sequence_c_seq - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- -(0 rows) - --- Check that MX sequences play well with non-super users -\c - - - :master_port --- Remove a node so that shards and sequences won't be created on table creation. Therefore, --- we can test that citus_activate_node can actually create the sequence with proper --- owner -CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement; -CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition; -CREATE TABLE pg_dist_object_temp AS SELECT * FROM pg_catalog.pg_dist_object; -DELETE FROM pg_dist_placement; -DELETE FROM pg_dist_partition; -DELETE FROM pg_catalog.pg_dist_object; -SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset -SELECT master_remove_node('localhost', :worker_2_port); - master_remove_node ---------------------------------------------------------------------- - -(1 row) - - -- the master user needs superuser permissions to change the replication model -CREATE USER mx_user WITH SUPERUSER; -\c - mx_user - :master_port --- Create an mx table as a different user -CREATE TABLE mx_table (a int, b BIGSERIAL); -SET citus.shard_replication_factor TO 1; -SELECT create_distributed_table('mx_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -\c - postgres - :master_port -SELECT master_add_node('localhost', :worker_2_port); - master_add_node ---------------------------------------------------------------------- - 7 -(1 row) - -\c - mx_user - :worker_1_port -SELECT nextval('mx_table_b_seq'); - nextval ---------------------------------------------------------------------- - 281474976710657 -(1 row) - -INSERT INTO mx_table (a) VALUES (37); -INSERT INTO mx_table (a) VALUES (38); -SELECT * FROM mx_table ORDER BY a; - a | b ---------------------------------------------------------------------- - 37 | 281474976710658 - 38 | 281474976710659 -(2 rows) - -\c - mx_user - :worker_2_port -SELECT nextval('mx_table_b_seq'); - nextval ---------------------------------------------------------------------- - 1125899906842625 -(1 row) - -INSERT INTO mx_table (a) VALUES (39); -INSERT INTO mx_table (a) VALUES (40); -SELECT * FROM mx_table ORDER BY a; - a | b ---------------------------------------------------------------------- - 37 | 281474976710658 - 38 | 281474976710659 - 39 | 1125899906842626 - 40 | 1125899906842627 -(4 rows) - -\c - mx_user - :master_port -DROP TABLE mx_table; --- put the metadata back into a consistent state -\c - postgres - :master_port -INSERT INTO pg_dist_placement SELECT * FROM pg_dist_placement_temp; -INSERT INTO pg_dist_partition SELECT * FROM pg_dist_partition_temp; -INSERT INTO pg_catalog.pg_dist_object SELECT * FROM pg_dist_object_temp ON CONFLICT ON CONSTRAINT pg_dist_object_pkey DO NOTHING; -DROP TABLE pg_dist_placement_temp; -DROP TABLE pg_dist_partition_temp; -DROP TABLE pg_dist_object_temp; -UPDATE pg_dist_placement - SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) - WHERE groupid = :old_worker_2_group; -\c - - - :worker_1_port -UPDATE pg_dist_placement - SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) - WHERE groupid = :old_worker_2_group; -\c - - - :worker_2_port -UPDATE pg_dist_placement - SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) - WHERE groupid = :old_worker_2_group; -\c - - - :master_port -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -DROP USER mx_user; --- Check that create_reference_table creates the metadata on workers -\c - - - :master_port -CREATE TABLE mx_ref (col_1 int, col_2 text); -SELECT create_reference_table('mx_ref'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - --- make sure that adding/removing nodes doesn't cause --- multiple colocation entries for reference tables -SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; - count ---------------------------------------------------------------------- - 1 -(1 row) - -\dt mx_ref - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_ref | table | postgres -(1 row) - -\c - - - :worker_1_port -\dt mx_ref - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_ref | table | postgres -(1 row) - -SELECT - logicalrelid, partmethod, repmodel, shardid, placementid, nodename, nodeport -FROM - pg_dist_partition - NATURAL JOIN pg_dist_shard - NATURAL JOIN pg_dist_shard_placement -WHERE - logicalrelid = 'mx_ref'::regclass -ORDER BY - nodeport; - logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport ---------------------------------------------------------------------- - mx_ref | n | t | 1310074 | 100074 | localhost | 57636 - mx_ref | n | t | 1310074 | 100075 | localhost | 57637 - mx_ref | n | t | 1310074 | 100076 | localhost | 57638 -(3 rows) - -SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset --- make sure we have the pg_dist_colocation record on the worker -SELECT count(*) FROM pg_dist_colocation WHERE distributioncolumntype = 0; - count ---------------------------------------------------------------------- - 1 -(1 row) - --- Check that DDL commands are propagated to reference tables on workers -\c - - - :master_port -ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0; -CREATE INDEX mx_ref_index ON mx_ref(col_1); -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col_1 | integer | - col_2 | text | - col_3 | numeric | default 0 -(3 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_ref_index'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_1 | integer | col_1 -(1 row) - -\c - - - :worker_1_port -SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- - col_1 | integer | - col_2 | text | - col_3 | numeric | default 0 -(3 rows) - -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_ref_index'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- - col_1 | integer | col_1 -(1 row) - --- Check that metada is cleaned successfully upon drop table -\c - - - :master_port -DROP TABLE mx_ref; -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_ref_index'::regclass; -ERROR: relation "mx_ref_index" does not exist -\c - - - :worker_1_port -SELECT "Column", "Type", "Definition" FROM index_attrs WHERE - relid = 'mx_ref_index'::regclass; -ERROR: relation "mx_ref_index" does not exist -SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid; - logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- -(0 rows) - -SELECT * FROM pg_dist_shard_placement WHERE shardid=:ref_table_shardid; - shardid | shardstate | shardlength | nodename | nodeport | placementid ---------------------------------------------------------------------- -(0 rows) - --- Check that master_add_node propagates the metadata about new placements of a reference table -\c - - - :master_port -SELECT groupid AS old_worker_2_group - FROM pg_dist_node WHERE nodeport = :worker_2_port \gset -CREATE TABLE tmp_placement AS - SELECT * FROM pg_dist_placement WHERE groupid = :old_worker_2_group; -DELETE FROM pg_dist_placement - WHERE groupid = :old_worker_2_group; -SELECT master_remove_node('localhost', :worker_2_port); -WARNING: could not find any shard placements for shardId 1310001 -WARNING: could not find any shard placements for shardId 1310023 -WARNING: could not find any shard placements for shardId 1310028 - master_remove_node ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE mx_ref (col_1 int, col_2 text); -SELECT create_reference_table('mx_ref'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -SELECT shardid, nodename, nodeport -FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE logicalrelid='mx_ref'::regclass; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 1310075 | localhost | 57636 - 1310075 | localhost | 57637 -(2 rows) - -\c - - - :worker_1_port -SELECT shardid, nodename, nodeport -FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE logicalrelid='mx_ref'::regclass; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 1310075 | localhost | 57636 - 1310075 | localhost | 57637 -(2 rows) - -\c - - - :master_port -SET client_min_messages TO ERROR; -SELECT master_add_node('localhost', :worker_2_port); - master_add_node ---------------------------------------------------------------------- - 8 -(1 row) - -RESET client_min_messages; -SELECT shardid, nodename, nodeport -FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE logicalrelid='mx_ref'::regclass -ORDER BY shardid, nodeport; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 1310075 | localhost | 57636 - 1310075 | localhost | 57637 -(2 rows) - -\c - - - :worker_1_port -SELECT shardid, nodename, nodeport -FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement -WHERE logicalrelid='mx_ref'::regclass -ORDER BY shardid, nodeport; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 1310075 | localhost | 57636 - 1310075 | localhost | 57637 -(2 rows) - --- Get the metadata back into a consistent state -\c - - - :master_port -INSERT INTO pg_dist_placement (SELECT * FROM tmp_placement); -DROP TABLE tmp_placement; -UPDATE pg_dist_placement - SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) - WHERE groupid = :old_worker_2_group; -\c - - - :worker_1_port -UPDATE pg_dist_placement - SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) - WHERE groupid = :old_worker_2_group; --- Confirm that shouldhaveshards is 'true' -\c - - - :master_port -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - t -(1 row) - -\c - postgres - :worker_1_port -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - t -(1 row) - --- Check that setting shouldhaveshards to false is correctly transferred to other mx nodes -\c - - - :master_port -SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', false); - master_set_node_property ---------------------------------------------------------------------- - -(1 row) - -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - f -(1 row) - -\c - postgres - :worker_1_port -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - f -(1 row) - --- Check that setting shouldhaveshards to true is correctly transferred to other mx nodes -\c - postgres - :master_port -SELECT * from master_set_node_property('localhost', 8888, 'shouldhaveshards', true); - master_set_node_property ---------------------------------------------------------------------- - -(1 row) - -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - t -(1 row) - -\c - postgres - :worker_1_port -select shouldhaveshards from pg_dist_node where nodeport = 8888; - shouldhaveshards ---------------------------------------------------------------------- - t -(1 row) - -\c - - - :master_port --- --- Check that metadata commands error out if any nodes are out-of-sync --- --- increase metadata_sync intervals to avoid metadata sync while we test -ALTER SYSTEM SET citus.metadata_sync_interval TO 300000; -ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 300000; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SET citus.shard_replication_factor TO 1; -CREATE TABLE dist_table_1(a int); -SELECT create_distributed_table('dist_table_1', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -UPDATE pg_dist_node SET metadatasynced=false WHERE nodeport=:worker_1_port; -SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport=:worker_1_port; - hasmetadata | metadatasynced ---------------------------------------------------------------------- - t | f -(1 row) - -CREATE TABLE dist_table_2(a int); -SELECT create_distributed_table('dist_table_2', 'a'); -ERROR: localhost:xxxxx is a metadata node, but is out of sync -HINT: If the node is up, wait until metadata gets synced to it and try again. -SELECT create_reference_table('dist_table_2'); -ERROR: localhost:xxxxx is a metadata node, but is out of sync -HINT: If the node is up, wait until metadata gets synced to it and try again. -ALTER TABLE dist_table_1 ADD COLUMN b int; -ERROR: localhost:xxxxx is a metadata node, but is out of sync -HINT: If the node is up, wait until metadata gets synced to it and try again. -SELECT citus_disable_node_and_wait('localhost', :worker_1_port); -ERROR: disabling the first worker node in the metadata is not allowed -DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations. -HINT: You can force disabling node, SELECT citus_disable_node('localhost', 57637, synchronous:=true); -CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)" -PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM -SELECT citus_disable_node_and_wait('localhost', :worker_2_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx -DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table -HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables -CONTEXT: SQL statement "SELECT pg_catalog.citus_disable_node(nodename, nodeport, force)" -PL/pgSQL function citus_disable_node_and_wait(text,integer,boolean) line XX at PERFORM -SELECT master_remove_node('localhost', :worker_1_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx -DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table -HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables -SELECT master_remove_node('localhost', :worker_2_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx -DETAIL: One of the table(s) that prevents the operation complete successfully is mx_testing_schema.mx_test_table -HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables --- master_update_node should succeed -SELECT nodeid AS worker_2_nodeid FROM pg_dist_node WHERE nodeport=:worker_2_port \gset -SELECT master_update_node(:worker_2_nodeid, 'localhost', 4444); - master_update_node ---------------------------------------------------------------------- - -(1 row) - -SELECT master_update_node(:worker_2_nodeid, 'localhost', :worker_2_port); - master_update_node ---------------------------------------------------------------------- - -(1 row) - -ALTER SYSTEM SET citus.metadata_sync_interval TO DEFAULT; -ALTER SYSTEM SET citus.metadata_sync_retry_interval TO DEFAULT; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - --- make sure that all the nodes have valid metadata before moving forward -SELECT wait_until_metadata_sync(60000); - wait_until_metadata_sync ---------------------------------------------------------------------- - -(1 row) - -SELECT master_add_node('localhost', :worker_2_port); - master_add_node ---------------------------------------------------------------------- - 8 -(1 row) - -CREATE SEQUENCE mx_test_sequence_0; -CREATE SEQUENCE mx_test_sequence_1; --- test create_distributed_table -CREATE TABLE test_table (id int DEFAULT nextval('mx_test_sequence_0')); -SELECT create_distributed_table('test_table', 'id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- shouldn't work since it's partition column -ALTER TABLE test_table ALTER COLUMN id SET DEFAULT nextval('mx_test_sequence_1'); -ERROR: cannot execute ALTER TABLE command involving partition column --- test different plausible commands -ALTER TABLE test_table ADD COLUMN id2 int DEFAULT nextval('mx_test_sequence_1'); -ALTER TABLE test_table ALTER COLUMN id2 DROP DEFAULT; -ALTER TABLE test_table ALTER COLUMN id2 SET DEFAULT nextval('mx_test_sequence_1'); -SELECT unnest(activate_node_snapshot()) order by 1; - unnest ---------------------------------------------------------------------- - ALTER DATABASE regression OWNER TO postgres; - ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres - ALTER SEQUENCE public.mx_test_sequence_0 OWNER TO postgres - ALTER SEQUENCE public.mx_test_sequence_1 OWNER TO postgres - ALTER SEQUENCE public.user_defined_seq OWNER TO postgres - ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_fk_constraint_2 FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) NOT VALID - ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_table_1_col1_key UNIQUE (col1) - ALTER TABLE mx_test_schema_1.mx_table_1 OWNER TO postgres - ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1) - ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1) - ALTER TABLE mx_test_schema_2.mx_table_2 OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - ALTER TABLE public.dist_table_1 OWNER TO postgres - ALTER TABLE public.mx_ref OWNER TO postgres - ALTER TABLE public.test_table OWNER TO postgres - CALL pg_catalog.worker_drop_all_shell_tables(true) - CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) - CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 USING btree (col1) - CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 USING btree (col2) - CREATE SCHEMA IF NOT EXISTS mx_test_schema_1 AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS mx_test_schema_2 AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS mx_testing_schema_2 AUTHORIZATION postgres - CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres - CREATE TABLE mx_test_schema_1.mx_table_1 (col1 integer, col2 text, col3 integer) USING heap - CREATE TABLE mx_test_schema_2.mx_table_2 (col1 integer, col2 text) USING heap - CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) USING heap - CREATE TABLE public.dist_table_1 (a integer) USING heap - CREATE TABLE public.mx_ref (col_1 integer, col_2 text) USING heap - CREATE TABLE public.test_table (id integer DEFAULT worker_nextval('public.mx_test_sequence_0'::regclass), id2 integer DEFAULT worker_nextval('public.mx_test_sequence_1'::regclass)) USING heap - DELETE FROM pg_catalog.pg_dist_colocation - DELETE FROM pg_catalog.pg_dist_object - DELETE FROM pg_catalog.pg_dist_schema - DELETE FROM pg_dist_node - DELETE FROM pg_dist_partition - DELETE FROM pg_dist_placement - DELETE FROM pg_dist_shard - DROP TABLE IF EXISTS mx_test_schema_1.mx_table_1 CASCADE - DROP TABLE IF EXISTS mx_test_schema_2.mx_table_2 CASCADE - DROP TABLE IF EXISTS mx_testing_schema.mx_test_table CASCADE - DROP TABLE IF EXISTS public.dist_table_1 CASCADE - DROP TABLE IF EXISTS public.mx_ref CASCADE - DROP TABLE IF EXISTS public.test_table CASCADE - GRANT CREATE ON SCHEMA public TO PUBLIC; - GRANT CREATE ON SCHEMA public TO postgres; - GRANT USAGE ON SCHEMA public TO PUBLIC; - GRANT USAGE ON SCHEMA public TO postgres; - INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (5, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(6, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(8, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE) - RESET ROLE - RESET ROLE - SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal.add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal.add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal.add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') - SELECT citus_internal.add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') - SELECT citus_internal.add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') - SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_1.mx_table_1'); - SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_2.mx_table_2'); - SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency('public.dist_table_1'); - SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_ref'); - SELECT pg_catalog.worker_drop_sequence_dependency('public.test_table'); - SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_sequence_0 AS integer INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE','integer') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_sequence_1 AS integer INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE','integer') - SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') - SELECT worker_create_or_alter_role('postgres', 'CREATE ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''', 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') - SELECT worker_create_truncate_trigger('mx_test_schema_1.mx_table_1') - SELECT worker_create_truncate_trigger('mx_test_schema_2.mx_table_2') - SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SELECT worker_create_truncate_trigger('public.dist_table_1') - SELECT worker_create_truncate_trigger('public.mx_ref') - SELECT worker_create_truncate_trigger('public.test_table') - SET ROLE postgres - SET ROLE postgres - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'off' - SET citus.enable_ddl_propagation TO 'on' - SET citus.enable_ddl_propagation TO 'on' - UPDATE pg_dist_local_group SET groupid = 1 - UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 - UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(118 rows) - --- shouldn't work since test_table is MX -ALTER TABLE test_table ADD COLUMN id3 bigserial; -ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers --- shouldn't work since the above operations should be the only subcommands -ALTER TABLE test_table ADD COLUMN id4 int DEFAULT nextval('mx_test_sequence_1') CHECK (id4 > 0); -ERROR: cannot execute ADD COLUMN .. DEFAULT nextval('..') command with other subcommands/constraints -HINT: You can issue each subcommand separately -ALTER TABLE test_table ADD COLUMN id4 int, ADD COLUMN id5 int DEFAULT nextval('mx_test_sequence_1'); -ERROR: cannot execute ADD COLUMN .. DEFAULT nextval('..') command with other subcommands/constraints -HINT: You can issue each subcommand separately -ALTER TABLE test_table ALTER COLUMN id1 SET DEFAULT nextval('mx_test_sequence_1'), ALTER COLUMN id2 DROP DEFAULT; -ERROR: cannot execute ALTER COLUMN COLUMN .. SET DEFAULT nextval('..') command with other subcommands -HINT: You can issue each subcommand separately -ALTER TABLE test_table ADD COLUMN id4 bigserial CHECK (id4 > 0); -ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers -\c - - - :worker_1_port -\ds - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_test_sequence_0 | sequence | postgres - public | mx_test_sequence_1 | sequence | postgres - public | mx_test_table_col_3_seq | sequence | postgres - public | sequence_rollback | sequence | postgres - public | sequence_rollback(citus_backup_0) | sequence | postgres - public | user_defined_seq | sequence | postgres -(6 rows) - -\c - - - :master_port -CREATE SEQUENCE local_sequence; --- verify that DROP SEQUENCE will propagate the command to workers for --- the distributed sequences mx_test_sequence_0 and mx_test_sequence_1 -DROP SEQUENCE mx_test_sequence_0, mx_test_sequence_1, local_sequence CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to default value for column id2 of table test_table -drop cascades to default value for column id of table test_table -\c - - - :worker_1_port -\ds - List of relations - Schema | Name | Type | Owner ---------------------------------------------------------------------- - public | mx_test_table_col_3_seq | sequence | postgres - public | sequence_rollback | sequence | postgres - public | sequence_rollback(citus_backup_0) | sequence | postgres - public | user_defined_seq | sequence | postgres -(4 rows) - -\c - - - :master_port -DROP TABLE test_table CASCADE; --- Cleanup -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE mx_test_schema_2.mx_table_2 CASCADE; -NOTICE: drop cascades to constraint mx_fk_constraint_2 on table mx_test_schema_1.mx_table_1 -DROP TABLE mx_test_schema_1.mx_table_1 CASCADE; -DROP TABLE mx_testing_schema.mx_test_table; -DROP TABLE mx_ref; -DROP TABLE dist_table_1, dist_table_2; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO off; -- for enterprise -CREATE USER non_super_metadata_user; -SET citus.enable_ddl_propagation TO on; -RESET client_min_messages; -SELECT run_command_on_workers('CREATE USER non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE ROLE") - (localhost,57638,t,"CREATE ROLE") -(2 rows) - -GRANT EXECUTE ON FUNCTION start_metadata_sync_to_node(text,int) TO non_super_metadata_user; -GRANT EXECUTE ON FUNCTION stop_metadata_sync_to_node(text,int,bool) TO non_super_metadata_user; -GRANT ALL ON pg_dist_node TO non_super_metadata_user; -GRANT ALL ON pg_dist_local_group TO non_super_metadata_user; -GRANT ALL ON SCHEMA citus TO non_super_metadata_user; -GRANT INSERT ON ALL TABLES IN SCHEMA citus TO non_super_metadata_user; -GRANT USAGE ON SCHEMA mx_testing_schema TO non_super_metadata_user; -GRANT USAGE ON SCHEMA mx_testing_schema_2 TO non_super_metadata_user; -GRANT USAGE ON SCHEMA mx_test_schema_1 TO non_super_metadata_user; -GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_super_metadata_user; -SELECT run_command_on_workers('GRANT ALL ON pg_dist_node TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT ALL ON pg_dist_local_group TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT ALL ON SCHEMA citus TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('ALTER SEQUENCE user_defined_seq OWNER TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"ALTER SEQUENCE") - (localhost,57638,t,"ALTER SEQUENCE") -(2 rows) - -SELECT run_command_on_workers('GRANT ALL ON ALL TABLES IN SCHEMA citus TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_testing_schema TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_testing_schema_2 TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_1 TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_super_metadata_user'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,GRANT) - (localhost,57638,t,GRANT) -(2 rows) - -SET ROLE non_super_metadata_user; --- user must be super user stop/start metadata -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -ERROR: operation is not allowed -HINT: Run the command with a superuser. -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -ERROR: operation is not allowed -HINT: Run the command with a superuser. -RESET ROLE; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -RESET citus.shard_count; -RESET citus.shard_replication_factor; -ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id; -ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id; -ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; -ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; --- Activate them at the end -SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out deleted file mode 100644 index 62271f9a7..000000000 --- a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out +++ /dev/null @@ -1,167 +0,0 @@ --- --- MULTI_MX_INSERT_SELECT_REPARTITION --- --- Test behaviour of repartitioned INSERT ... SELECT in MX setup --- --- This test file has an alternative output because of the change in the --- display of SQL-standard function's arguments in INSERT/SELECT in PG15. --- The alternative output can be deleted when we drop support for PG14 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15; - server_version_ge_15 ---------------------------------------------------------------------- - f -(1 row) - -CREATE SCHEMA multi_mx_insert_select_repartition; -SET search_path TO multi_mx_insert_select_repartition; -SET citus.next_shard_id TO 4213581; -SET citus.shard_replication_factor TO 1; -SET citus.shard_count TO 4; -CREATE TABLE source_table(a int, b int); -SELECT create_distributed_table('source_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO source_table SELECT floor(i/4), i*i FROM generate_series(1, 20) i; -SET citus.shard_count TO 3; -CREATE TABLE target_table(a int, b int); -SELECT create_distributed_table('target_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE FUNCTION square(int) RETURNS INT - AS $$ SELECT $1 * $1 $$ - LANGUAGE SQL; -select create_distributed_function('square(int)'); -NOTICE: procedure multi_mx_insert_select_repartition.square is already distributed -DETAIL: Citus distributes procedures with CREATE [PROCEDURE|FUNCTION|AGGREGATE] commands - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - -select public.colocate_proc_with_table('square', 'source_table'::regclass, 0); - colocate_proc_with_table ---------------------------------------------------------------------- - -(1 row) - --- Test along with function delegation --- function delegation only happens for "SELECT f()", and we don't use --- repartitioned INSERT/SELECT when task count is 1, so the following --- should go via coordinator -EXPLAIN (costs off) INSERT INTO target_table(a) SELECT square(4); - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (Citus INSERT ... SELECT) - INSERT/SELECT method: pull to coordinator - -> Result -(3 rows) - -INSERT INTO target_table(a) SELECT square(4); -SELECT * FROM target_table; - a | b ---------------------------------------------------------------------- - 16 | -(1 row) - -TRUNCATE target_table; --- --- Test repartitioned INSERT/SELECT from MX worker --- -\c - - - :worker_1_port -SET search_path TO multi_mx_insert_select_repartition; -EXPLAIN (costs off) INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (Citus INSERT ... SELECT) - INSERT/SELECT method: repartition - -> Custom Scan (Citus Adaptive) - Task Count: 4 - Tasks Shown: One of 4 - -> Task - Node: host=localhost port=xxxxx dbname=regression - -> HashAggregate - Group Key: a - -> Seq Scan on source_table_4213581 source_table -(10 rows) - -INSERT INTO target_table SELECT a, max(b) FROM source_table GROUP BY a; -SET citus.log_local_commands to on; --- INSERT .. SELECT via repartitioning with local execution -BEGIN; - select count(*) from source_table WHERE a = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE (a OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 4 -(1 row) - - -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky - SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; - insert into target_table SELECT a*2 FROM source_table RETURNING a; -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a -NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a - a ---------------------------------------------------------------------- - 0 - 0 - 0 - 2 - 2 - 2 - 2 - 4 - 4 - 4 - 4 - 6 - 6 - 6 - 6 - 8 - 8 - 8 - 8 - 10 -(20 rows) - -ROLLBACK; -BEGIN; - select count(*) from source_table WHERE a = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE (a OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 4 -(1 row) - - insert into target_table SELECT a FROM source_table LIMIT 10; -NOTICE: executing the command locally: SELECT a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true LIMIT '10'::bigint -NOTICE: executing the copy locally for shard xxxxx -ROLLBACK; -\c - - - :master_port -SET search_path TO multi_mx_insert_select_repartition; -SELECT * FROM target_table ORDER BY a; - a | b ---------------------------------------------------------------------- - 0 | 9 - 1 | 49 - 2 | 121 - 3 | 225 - 4 | 361 - 5 | 400 -(6 rows) - -RESET client_min_messages; -\set VERBOSITY terse -DROP SCHEMA multi_mx_insert_select_repartition CASCADE; -NOTICE: drop cascades to 3 other objects diff --git a/src/test/regress/expected/publication_0.out b/src/test/regress/expected/publication_0.out deleted file mode 100644 index e768a1d41..000000000 --- a/src/test/regress/expected/publication_0.out +++ /dev/null @@ -1,276 +0,0 @@ -CREATE SCHEMA publication; -CREATE SCHEMA "publication-1"; -SET search_path TO publication; -SET citus.shard_replication_factor TO 1; -CREATE OR REPLACE FUNCTION activate_node_snapshot() - RETURNS text[] - LANGUAGE C STRICT - AS 'citus'; -COMMENT ON FUNCTION activate_node_snapshot() - IS 'commands to activate node snapshot'; -\c - - - :worker_1_port -SET citus.enable_ddl_propagation TO off; -CREATE OR REPLACE FUNCTION activate_node_snapshot() - RETURNS text[] - LANGUAGE C STRICT - AS 'citus'; -COMMENT ON FUNCTION activate_node_snapshot() - IS 'commands to activate node snapshot'; -\c - - - :worker_2_port -SET citus.enable_ddl_propagation TO off; -CREATE OR REPLACE FUNCTION activate_node_snapshot() - RETURNS text[] - LANGUAGE C STRICT - AS 'citus'; -COMMENT ON FUNCTION activate_node_snapshot() - IS 'commands to activate node snapshot'; --- create some publications with conflicting names on worker node --- publication will be different from coordinator -CREATE PUBLICATION "pub-all"; --- publication will be same as coordinator -CREATE PUBLICATION "pub-all-insertupdateonly" FOR ALL TABLES WITH (publish = 'insert, update');; -\c - - - :master_port -SET search_path TO publication; -SET citus.shard_replication_factor TO 1; --- do not create publications on worker 2 initially -SELECT citus_remove_node('localhost', :worker_2_port); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - --- create a non-distributed publication -SET citus.enable_ddl_propagation TO off; -CREATE PUBLICATION pubnotdistributed WITH (publish = 'delete'); -RESET citus.enable_ddl_propagation; -ALTER PUBLICATION pubnotdistributed SET (publish = 'truncate'); --- create regular, distributed publications -CREATE PUBLICATION pubempty; -CREATE PUBLICATION pubinsertonly WITH (publish = 'insert'); -CREATE PUBLICATION "pub-all" FOR ALL TABLES; -CREATE PUBLICATION "pub-all-insertupdateonly" FOR ALL TABLES WITH (publish = 'insert, update'); --- add worker 2 with publications -SELECT 1 FROM citus_add_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- Check publications on all the nodes, if we see the same publication name twice then its definition differs --- Note that publications are special in the sense that the coordinator object might differ from --- worker objects due to the presence of regular tables. -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' ORDER BY 1) s$$) - ORDER BY c) s; - c ---------------------------------------------------------------------- - SELECT worker_create_or_replace_object('CREATE PUBLICATION "pub-all" FOR ALL TABLES WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')'); - SELECT worker_create_or_replace_object('CREATE PUBLICATION "pub-all-insertupdateonly" FOR ALL TABLES WITH (publish_via_partition_root = ''false'', publish = ''insert, update'')'); - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubempty WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')'); - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubinsertonly WITH (publish_via_partition_root = ''false'', publish = ''insert'')'); -(4 rows) - -CREATE TABLE test (x int primary key, y int, "column-1" int, doc xml); -CREATE TABLE "test-pubs" (x int primary key, y int, "column-1" int); -CREATE TABLE "publication-1"."test-pubs" (x int primary key, y int, "column-1" int); --- various operations on a publication with only local tables -CREATE PUBLICATION pubtables_orig FOR TABLE test, "test-pubs", "publication-1"."test-pubs" WITH (publish = 'insert, truncate'); -ALTER PUBLICATION pubtables_orig DROP TABLE test; -ALTER PUBLICATION pubtables_orig ADD TABLE test; --- publication will be empty on worker nodes, since all tables are local -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubtables%' ORDER BY 1) s$$) - ORDER BY c) s; - c ---------------------------------------------------------------------- - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubtables_orig WITH (publish_via_partition_root = ''false'', publish = ''insert, truncate'')'); -(1 row) - --- distribute a table and create a tenant schema, creating a mixed publication -SELECT create_distributed_table('test','x', colocate_with := 'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SET citus.enable_schema_based_sharding TO ON; -CREATE SCHEMA citus_schema_1; -CREATE TABLE citus_schema_1.test (x int primary key, y int, "column-1" int, doc xml); -SET citus.enable_schema_based_sharding TO OFF; -ALTER PUBLICATION pubtables_orig ADD TABLE citus_schema_1.test; --- some generic operations -ALTER PUBLICATION pubtables_orig RENAME TO pubtables; -ALTER PUBLICATION pubtables SET (publish = 'insert, update, delete'); -ALTER PUBLICATION pubtables OWNER TO postgres; -ALTER PUBLICATION pubtables SET (publish = 'inert, update, delete'); -ERROR: unrecognized "publish" value: "inert" -ALTER PUBLICATION pubtables ADD TABLE notexist; -ERROR: relation "notexist" does not exist --- operations with a distributed table -ALTER PUBLICATION pubtables DROP TABLE test; -ALTER PUBLICATION pubtables ADD TABLE test; -ALTER PUBLICATION pubtables SET TABLE test, "test-pubs", "publication-1"."test-pubs", citus_schema_1.test; --- operations with a tenant schema table -ALTER PUBLICATION pubtables DROP TABLE citus_schema_1.test; -ALTER PUBLICATION pubtables ADD TABLE citus_schema_1.test; -ALTER PUBLICATION pubtables SET TABLE test, "test-pubs", "publication-1"."test-pubs", citus_schema_1.test; --- operations with a local table in a mixed publication -ALTER PUBLICATION pubtables DROP TABLE "test-pubs"; -ALTER PUBLICATION pubtables ADD TABLE "test-pubs"; -SELECT create_distributed_table('"test-pubs"', 'x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- test and test-pubs will show up in worker nodes -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubtables%' ORDER BY 1) s$$) - ORDER BY c) s; - c ---------------------------------------------------------------------- - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubtables FOR TABLE publication.test, citus_schema_1.test, publication."test-pubs" WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete'')'); -(1 row) - --- operations with a strangely named distributed table in a mixed publication -ALTER PUBLICATION pubtables DROP TABLE "test-pubs"; -ALTER PUBLICATION pubtables ADD TABLE "test-pubs"; --- create a publication with distributed and local tables -DROP PUBLICATION pubtables; -CREATE PUBLICATION pubtables FOR TABLE test, "test-pubs", "publication-1"."test-pubs", citus_schema_1.test; --- change distributed tables -SELECT alter_distributed_table('test', shard_count := 5, cascade_to_colocated := true); -NOTICE: creating a new table for publication.test -NOTICE: moving the data of publication.test -NOTICE: dropping the old publication.test -NOTICE: renaming the new table to publication.test -NOTICE: creating a new table for publication."test-pubs" -NOTICE: moving the data of publication."test-pubs" -NOTICE: dropping the old publication."test-pubs" -NOTICE: renaming the new table to publication."test-pubs" - alter_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT undistribute_table('test'); -NOTICE: creating a new table for publication.test -NOTICE: moving the data of publication.test -NOTICE: dropping the old publication.test -NOTICE: renaming the new table to publication.test - undistribute_table ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_add_local_table_to_metadata('test'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table_concurrently('test', 'x'); - create_distributed_table_concurrently ---------------------------------------------------------------------- - -(1 row) - -SELECT undistribute_table('"test-pubs"'); -NOTICE: creating a new table for publication."test-pubs" -NOTICE: moving the data of publication."test-pubs" -NOTICE: dropping the old publication."test-pubs" -NOTICE: renaming the new table to publication."test-pubs" - undistribute_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_reference_table('"test-pubs"'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - --- publications are unchanged despite various tranformations -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubtables%' ORDER BY 1) s$$) - ORDER BY c) s; - c ---------------------------------------------------------------------- - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubtables FOR TABLE citus_schema_1.test, publication.test, publication."test-pubs" WITH (publish_via_partition_root = ''false'', publish = ''insert, update, delete, truncate'')'); -(1 row) - --- partitioned table -CREATE TABLE testpub_partitioned (a int, b text, c text) PARTITION BY RANGE (a); -CREATE TABLE testpub_partitioned_0 PARTITION OF testpub_partitioned FOR VALUES FROM (1) TO (10); -ALTER TABLE testpub_partitioned_0 ADD PRIMARY KEY (a); -ALTER TABLE testpub_partitioned_0 REPLICA IDENTITY USING INDEX testpub_partitioned_0_pkey; -CREATE TABLE testpub_partitioned_1 PARTITION OF testpub_partitioned FOR VALUES FROM (11) TO (20); -ALTER TABLE testpub_partitioned_1 ADD PRIMARY KEY (a); -ALTER TABLE testpub_partitioned_1 REPLICA IDENTITY USING INDEX testpub_partitioned_1_pkey; -CREATE PUBLICATION pubpartitioned FOR TABLE testpub_partitioned WITH (publish_via_partition_root = 'true'); -SELECT create_distributed_table('testpub_partitioned', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLICATION%' AND c LIKE '%pubpartitioned%' ORDER BY 1) s$$) - ORDER BY c) s; - c ---------------------------------------------------------------------- - SELECT worker_create_or_replace_object('CREATE PUBLICATION pubpartitioned FOR TABLE publication.testpub_partitioned WITH (publish_via_partition_root = ''true'', publish = ''insert, update, delete, truncate'')'); -(1 row) - -DROP PUBLICATION pubpartitioned; -CREATE PUBLICATION pubpartitioned FOR TABLE testpub_partitioned WITH (publish_via_partition_root = 'true'); --- add a partition -ALTER PUBLICATION pubpartitioned ADD TABLE testpub_partitioned_1; -SELECT DISTINCT c FROM ( - SELECT unnest(result::text[]) c - FROM run_command_on_workers($$ - SELECT array_agg(c) FROM (SELECT c FROM unnest(activate_node_snapshot()) c WHERE c LIKE '%CREATE PUBLIATION%' AND c LIKE '%pubpartitioned%' ORDER BY 1) s$$) - ORDER BY c) s; -ERROR: malformed array literal: "" -DETAIL: Array value must start with "{" or dimension information. --- make sure we can sync all the publication metadata -SELECT start_metadata_sync_to_all_nodes(); - start_metadata_sync_to_all_nodes ---------------------------------------------------------------------- - t -(1 row) - -DROP PUBLICATION pubempty; -DROP PUBLICATION pubtables; -DROP PUBLICATION pubinsertonly; -DROP PUBLICATION "pub-all-insertupdateonly"; -DROP PUBLICATION "pub-all"; -DROP PUBLICATION pubpartitioned; -DROP PUBLICATION pubnotdistributed; -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -SET client_min_messages TO ERROR; -DROP SCHEMA publication CASCADE; -DROP SCHEMA "publication-1" CASCADE; -DROP SCHEMA citus_schema_1 CASCADE; -SELECT public.wait_for_resource_cleanup(); - wait_for_resource_cleanup ---------------------------------------------------------------------- - -(1 row) - -\q diff --git a/src/test/regress/expected/single_node_0.out b/src/test/regress/expected/single_node_0.out deleted file mode 100644 index a94c02951..000000000 --- a/src/test/regress/expected/single_node_0.out +++ /dev/null @@ -1,2582 +0,0 @@ --- --- SINGLE_NODE --- --- This test file has an alternative output because of the change in the --- display of SQL-standard function's arguments in INSERT/SELECT in PG15. --- The alternative output can be deleted when we drop support for PG14 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15; - server_version_ge_15 ---------------------------------------------------------------------- - f -(1 row) - -CREATE SCHEMA single_node; -SET search_path TO single_node; -SET citus.shard_count TO 4; -SET citus.shard_replication_factor TO 1; -SET citus.next_shard_id TO 90630500; --- Ensure tuple data in explain analyze output is the same on all PG versions -SET citus.enable_binary_protocol = TRUE; --- do not cache any connections for now, will enable it back soon -ALTER SYSTEM SET citus.max_cached_conns_per_worker TO 0; --- adding the coordinator as inactive is disallowed -SELECT 1 FROM master_add_inactive_node('localhost', :master_port, groupid => 0); -ERROR: coordinator node cannot be added as inactive node --- before adding a node we are not officially a coordinator -SELECT citus_is_coordinator(); - citus_is_coordinator ---------------------------------------------------------------------- - f -(1 row) - --- idempotently add node to allow this test to run without add_coordinator -SET client_min_messages TO WARNING; -SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- after adding a node we are officially a coordinator -SELECT citus_is_coordinator(); - citus_is_coordinator ---------------------------------------------------------------------- - t -(1 row) - --- coordinator cannot be disabled -SELECT 1 FROM citus_disable_node('localhost', :master_port); -ERROR: cannot change "isactive" field of the coordinator node -RESET client_min_messages; -SELECT 1 FROM master_remove_node('localhost', :master_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT count(*) FROM pg_dist_node; - count ---------------------------------------------------------------------- - 0 -(1 row) - --- there are no workers now, but we should still be able to create Citus tables --- force local execution when creating the index -ALTER SYSTEM SET citus.local_shared_pool_size TO -1; --- Postmaster might not ack SIGHUP signal sent by pg_reload_conf() immediately, --- so we need to sleep for some amount of time to do our best to ensure that --- postmaster reflects GUC changes. -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(0.1); - pg_sleep ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE failover_to_local (a int); -SELECT create_distributed_table('failover_to_local', 'a', shard_count=>32); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE INDEX CONCURRENTLY ON failover_to_local(a); -WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. -If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, -if applicable, and then re-attempt the original command. -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: Consider using a higher value for max_connections --- reset global GUC changes -ALTER SYSTEM RESET citus.local_shared_pool_size; -ALTER SYSTEM RESET citus.max_cached_conns_per_worker; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -CREATE TABLE single_node_nullkey_c1(a int, b int); -SELECT create_distributed_table('single_node_nullkey_c1', null, colocate_with=>'none', distribution_type=>null); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE single_node_nullkey_c2(a int, b int); -SELECT create_distributed_table('single_node_nullkey_c2', null, colocate_with=>'none', distribution_type=>null); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- created on different colocation groups .. -SELECT -( - SELECT colocationid FROM pg_dist_partition - WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass -) -!= -( - SELECT colocationid FROM pg_dist_partition - WHERE logicalrelid = 'single_node.single_node_nullkey_c2'::regclass -); - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- .. but both are associated to coordinator -SELECT groupid = 0 FROM pg_dist_placement -WHERE shardid = ( - SELECT shardid FROM pg_dist_shard - WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass -); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT groupid = 0 FROM pg_dist_placement -WHERE shardid = ( - SELECT shardid FROM pg_dist_shard - WHERE logicalrelid = 'single_node.single_node_nullkey_c2'::regclass -); - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- try creating a single-shard table from a shard relation -SELECT shardid AS round_robin_test_c1_shard_id FROM pg_dist_shard WHERE logicalrelid = 'single_node.single_node_nullkey_c1'::regclass \gset -SELECT create_distributed_table('single_node_nullkey_c1_' || :round_robin_test_c1_shard_id , null, colocate_with=>'none', distribution_type=>null); -ERROR: relation "single_node_nullkey_c1_90630532" is a shard relation --- create a tenant schema on single node setup -SET citus.enable_schema_based_sharding TO ON; -CREATE SCHEMA tenant_1; -CREATE TABLE tenant_1.tbl_1 (a int); --- verify that we recorded tenant_1 in pg_dist_schema -SELECT COUNT(*)=1 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'tenant_1'; - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- verify that tenant_1.tbl_1 is recorded in pg_dist_partition, as a single-shard table -SELECT COUNT(*)=1 FROM pg_dist_partition -WHERE logicalrelid = 'tenant_1.tbl_1'::regclass AND - partmethod = 'n' AND repmodel = 's' AND colocationid IS NOT NULL; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -RESET citus.enable_schema_based_sharding; --- Test lazy conversion from Citus local to single-shard tables --- and reference tables, on single node. This means that no shard --- replication should be needed. -CREATE TABLE ref_table_conversion_test ( - a int PRIMARY KEY -); -SELECT citus_add_local_table_to_metadata('ref_table_conversion_test'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - --- save old shardid and placementid -SELECT get_shard_id_for_distribution_column('single_node.ref_table_conversion_test') AS ref_table_conversion_test_old_shard_id \gset -SELECT placementid AS ref_table_conversion_test_old_coord_placement_id FROM pg_dist_placement WHERE shardid = :ref_table_conversion_test_old_shard_id \gset -SELECT create_reference_table('ref_table_conversion_test'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -SELECT public.verify_pg_dist_partition_for_reference_table('single_node.ref_table_conversion_test'); - verify_pg_dist_partition_for_reference_table ---------------------------------------------------------------------- - t -(1 row) - -SELECT public.verify_shard_placements_for_reference_table('single_node.ref_table_conversion_test', - :ref_table_conversion_test_old_shard_id, - :ref_table_conversion_test_old_coord_placement_id); - verify_shard_placements_for_reference_table ---------------------------------------------------------------------- - t -(1 row) - -CREATE TABLE single_shard_conversion_test_1 ( - int_col_1 int PRIMARY KEY, - text_col_1 text UNIQUE, - int_col_2 int -); -SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_1'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - --- save old shardid -SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_1') AS single_shard_conversion_test_1_old_shard_id \gset -SELECT create_distributed_table('single_shard_conversion_test_1', null, colocate_with=>'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_1'); - verify_pg_dist_partition_for_single_shard_table ---------------------------------------------------------------------- - t -(1 row) - -SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_1', :single_shard_conversion_test_1_old_shard_id, true); - verify_shard_placement_for_single_shard_table ---------------------------------------------------------------------- - t -(1 row) - -CREATE TABLE single_shard_conversion_test_2 ( - int_col_1 int -); -SELECT citus_add_local_table_to_metadata('single_shard_conversion_test_2'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - --- save old shardid -SELECT get_shard_id_for_distribution_column('single_node.single_shard_conversion_test_2') AS single_shard_conversion_test_2_old_shard_id \gset -SELECT create_distributed_table('single_shard_conversion_test_2', null, colocate_with=>'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT public.verify_pg_dist_partition_for_single_shard_table('single_node.single_shard_conversion_test_2'); - verify_pg_dist_partition_for_single_shard_table ---------------------------------------------------------------------- - t -(1 row) - -SELECT public.verify_shard_placement_for_single_shard_table('single_node.single_shard_conversion_test_2', :single_shard_conversion_test_2_old_shard_id, true); - verify_shard_placement_for_single_shard_table ---------------------------------------------------------------------- - t -(1 row) - --- make sure that they're created on different colocation groups -SELECT -( - SELECT colocationid FROM pg_dist_partition - WHERE logicalrelid = 'single_node.single_shard_conversion_test_1'::regclass -) -!= -( - SELECT colocationid FROM pg_dist_partition - WHERE logicalrelid = 'single_node.single_shard_conversion_test_2'::regclass -); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SET client_min_messages TO WARNING; -DROP TABLE failover_to_local, single_node_nullkey_c1, single_node_nullkey_c2, ref_table_conversion_test, single_shard_conversion_test_1, single_shard_conversion_test_2; -DROP SCHEMA tenant_1 CASCADE; -RESET client_min_messages; --- so that we don't have to update rest of the test output -SET citus.next_shard_id TO 90630500; -CREATE TABLE ref(x int, y int); -SELECT create_reference_table('ref'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node; - groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced ---------------------------------------------------------------------- - 0 | localhost | 57636 | t | t | t | t -(1 row) - -DROP TABLE ref; --- remove the coordinator to try again with create_reference_table -SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0; - master_remove_node ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE loc(x int, y int); -SELECT citus_add_local_table_to_metadata('loc'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node; - groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced ---------------------------------------------------------------------- - 0 | localhost | 57636 | t | t | t | t -(1 row) - -DROP TABLE loc; --- remove the coordinator to try again with create_distributed_table -SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0; - master_remove_node ---------------------------------------------------------------------- - -(1 row) - --- verify the coordinator gets auto added with the localhost guc -ALTER SYSTEM SET citus.local_hostname TO '127.0.0.1'; --although not a hostname, should work for connecting locally -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(.1); -- wait to make sure the config has changed before running the GUC - pg_sleep ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE test(x int, y int); -SELECT create_distributed_table('test','x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node; - groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced ---------------------------------------------------------------------- - 0 | 127.0.0.1 | 57636 | t | t | t | t -(1 row) - -DROP TABLE test; --- remove the coordinator to try again -SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node WHERE groupid = 0; - master_remove_node ---------------------------------------------------------------------- - -(1 row) - -ALTER SYSTEM RESET citus.local_hostname; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(.1); -- wait to make sure the config has changed before running the GUC - pg_sleep ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE test(x int, y int); -SELECT create_distributed_table('test','x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT groupid, nodename, nodeport, isactive, shouldhaveshards, hasmetadata, metadatasynced FROM pg_dist_node; - groupid | nodename | nodeport | isactive | shouldhaveshards | hasmetadata | metadatasynced ---------------------------------------------------------------------- - 0 | localhost | 57636 | t | t | t | t -(1 row) - -BEGIN; - -- we should not enable MX for this temporary node just because - -- it'd spawn a bg worker targeting this node - -- and that changes the connection count specific tests - -- here - SET LOCAL citus.enable_metadata_sync TO OFF; - -- cannot add workers with specific IP as long as I have a placeholder coordinator record - SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port); -ERROR: cannot add a worker node when the coordinator hostname is set to localhost -DETAIL: Worker nodes need to be able to connect to the coordinator to transfer data. -HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname -COMMIT; -BEGIN; - -- we should not enable MX for this temporary node just because - -- it'd spawn a bg worker targeting this node - -- and that changes the connection count specific tests - -- here - SET LOCAL citus.enable_metadata_sync TO OFF; - -- adding localhost workers is ok - SELECT 1 FROM master_add_node('localhost', :worker_1_port); -NOTICE: shards are still on the coordinator after adding the new node -HINT: Use SELECT rebalance_table_shards(); to balance shards data between workers and coordinator or SELECT citus_drain_node('localhost',57636); to permanently move shards away from the coordinator. - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -COMMIT; --- we don't need this node anymore -SELECT 1 FROM master_remove_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- set the coordinator host to something different than localhost -SELECT 1 FROM citus_set_coordinator_host('127.0.0.1'); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -BEGIN; - -- we should not enable MX for this temporary node just because - -- it'd spawn a bg worker targeting this node - -- and that changes the connection count specific tests - -- here - SET LOCAL citus.enable_metadata_sync TO OFF; - -- adding workers with specific IP is ok now - SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port); -NOTICE: shards are still on the coordinator after adding the new node -HINT: Use SELECT rebalance_table_shards(); to balance shards data between workers and coordinator or SELECT citus_drain_node('127.0.0.1',57636); to permanently move shards away from the coordinator. - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -COMMIT; --- we don't need this node anymore -SELECT 1 FROM master_remove_node('127.0.0.1', :worker_1_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- set the coordinator host back to localhost for the remainder of tests -SELECT 1 FROM citus_set_coordinator_host('localhost'); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- should have shards setting should not really matter for a single node -SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -CREATE TYPE new_type AS (n int, m text); -CREATE TABLE test_2(x int, y int, z new_type); -SELECT create_distributed_table('test_2','x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE ref(a int, b int); -SELECT create_reference_table('ref'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE local(c int, d int); -CREATE TABLE public.another_schema_table(a int, b int); -SELECT create_distributed_table('public.another_schema_table', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE non_binary_copy_test (key int PRIMARY KEY, value new_type); -SELECT create_distributed_table('non_binary_copy_test', 'key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO non_binary_copy_test SELECT i, (i, 'citus9.5')::new_type FROM generate_series(0,1000)i; --- Confirm the basics work -INSERT INTO test VALUES (1, 2), (3, 4), (5, 6), (2, 7), (4, 5); -SELECT * FROM test WHERE x = 1; - x | y ---------------------------------------------------------------------- - 1 | 2 -(1 row) - -SELECT count(*) FROM test; - count ---------------------------------------------------------------------- - 5 -(1 row) - -SELECT * FROM test ORDER BY x; - x | y ---------------------------------------------------------------------- - 1 | 2 - 2 | 7 - 3 | 4 - 4 | 5 - 5 | 6 -(5 rows) - -UPDATE test SET y = y + 1 RETURNING *; - x | y ---------------------------------------------------------------------- - 1 | 3 - 2 | 8 - 3 | 5 - 4 | 6 - 5 | 7 -(5 rows) - -WITH cte_1 AS (UPDATE test SET y = y - 1 RETURNING *) SELECT * FROM cte_1 ORDER BY 1,2; - x | y ---------------------------------------------------------------------- - 1 | 2 - 2 | 7 - 3 | 4 - 4 | 5 - 5 | 6 -(5 rows) - --- show that we can filter remote commands --- given that citus.grep_remote_commands, we log all commands -SET citus.log_local_commands to true; -SELECT count(*) FROM public.another_schema_table WHERE a = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 0 -(1 row) - --- grep matches all commands -SET citus.grep_remote_commands TO "%%"; -SELECT count(*) FROM public.another_schema_table WHERE a = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 0 -(1 row) - --- only filter a specific shard for the local execution -BEGIN; - SET LOCAL citus.grep_remote_commands TO "%90630515%"; - SELECT count(*) FROM public.another_schema_table; -NOTICE: executing the command locally: SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE true - count ---------------------------------------------------------------------- - 0 -(1 row) - - -- match nothing - SET LOCAL citus.grep_remote_commands TO '%nothing%'; - SELECT count(*) FROM public.another_schema_table; - count ---------------------------------------------------------------------- - 0 -(1 row) - -COMMIT; --- only filter a specific shard for the remote execution -BEGIN; - SET LOCAL citus.enable_local_execution TO FALSE; - SET LOCAL citus.grep_remote_commands TO '%90630515%'; - SET LOCAL citus.log_remote_commands TO ON; - SELECT count(*) FROM public.another_schema_table; -NOTICE: issuing SELECT count(*) AS count FROM public.another_schema_table_90630515 another_schema_table WHERE true -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx - count ---------------------------------------------------------------------- - 0 -(1 row) - - -- match nothing - SET LOCAL citus.grep_remote_commands TO '%nothing%'; - SELECT count(*) FROM public.another_schema_table; - count ---------------------------------------------------------------------- - 0 -(1 row) - -COMMIT; -RESET citus.log_local_commands; -RESET citus.grep_remote_commands; --- Test upsert with constraint -CREATE TABLE upsert_test -( - part_key int UNIQUE, - other_col int, - third_col int -); --- distribute the table -SELECT create_distributed_table('upsert_test', 'part_key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- do a regular insert -INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1), (2, 2) RETURNING *; - part_key | other_col | third_col ---------------------------------------------------------------------- - 1 | 1 | - 2 | 2 | -(2 rows) - -SET citus.log_remote_commands to true; --- observe that there is a conflict and the following query does nothing -INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT DO NOTHING RETURNING *; -NOTICE: executing the command locally: INSERT INTO single_node.upsert_test_90630523 AS citus_table_alias (part_key, other_col) VALUES (1, 1) ON CONFLICT DO NOTHING RETURNING part_key, other_col, third_col - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - --- same as the above with different syntax -INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO NOTHING RETURNING *; -NOTICE: executing the command locally: INSERT INTO single_node.upsert_test_90630523 AS citus_table_alias (part_key, other_col) VALUES (1, 1) ON CONFLICT(part_key) DO NOTHING RETURNING part_key, other_col, third_col - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - --- again the same query with another syntax -INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *; -NOTICE: executing the command locally: INSERT INTO single_node.upsert_test_90630523 AS citus_table_alias (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key_90630523 DO NOTHING RETURNING part_key, other_col, third_col - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - -BEGIN; --- force local execution -SELECT count(*) FROM upsert_test WHERE part_key = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.upsert_test_90630523 upsert_test WHERE (part_key OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 1 -(1 row) - -SET citus.log_remote_commands to false; --- multi-shard pushdown query that goes through local execution -INSERT INTO upsert_test (part_key, other_col) SELECT part_key, other_col FROM upsert_test ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *; - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - --- multi-shard pull-to-coordinator query that goes through local execution -INSERT INTO upsert_test (part_key, other_col) SELECT part_key, other_col FROM upsert_test LIMIT 100 ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *; - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - -COMMIT; --- to test citus local tables -select undistribute_table('upsert_test'); -NOTICE: creating a new table for single_node.upsert_test -NOTICE: moving the data of single_node.upsert_test -NOTICE: dropping the old single_node.upsert_test -NOTICE: renaming the new table to single_node.upsert_test - undistribute_table ---------------------------------------------------------------------- - -(1 row) - --- create citus local table -select citus_add_local_table_to_metadata('upsert_test'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - --- test the constraint with local execution -INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING RETURNING *; - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - -DROP TABLE upsert_test; -CREATE TABLE relation_tracking_table_1(id int, nonid int); -SELECT create_distributed_table('relation_tracking_table_1', 'id', colocate_with := 'none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO relation_tracking_table_1 select generate_series(6, 10000, 1), 0; -CREATE or REPLACE function foo() -returns setof relation_tracking_table_1 -AS $$ -BEGIN -RETURN query select * from relation_tracking_table_1 order by 1 limit 10; -end; -$$ language plpgsql; -CREATE TABLE relation_tracking_table_2 (id int, nonid int); --- use the relation-access in this session -select foo(); - foo ---------------------------------------------------------------------- - (6,0) - (7,0) - (8,0) - (9,0) - (10,0) - (11,0) - (12,0) - (13,0) - (14,0) - (15,0) -(10 rows) - --- we should be able to use sequential mode, as the previous multi-shard --- relation access has been cleaned-up -BEGIN; -SET LOCAL citus.multi_shard_modify_mode TO sequential; -INSERT INTO relation_tracking_table_2 select generate_series(6, 1000, 1), 0; -SELECT create_distributed_table('relation_tracking_table_2', 'id', colocate_with := 'none'); -NOTICE: Copying data from local table... -NOTICE: copying the data has completed -DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$single_node.relation_tracking_table_2$$) - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT count(*) FROM relation_tracking_table_2; - count ---------------------------------------------------------------------- - 995 -(1 row) - -ROLLBACK; -BEGIN; -INSERT INTO relation_tracking_table_2 select generate_series(6, 1000, 1), 0; -SELECT create_distributed_table('relation_tracking_table_2', 'id', colocate_with := 'none'); -NOTICE: Copying data from local table... -NOTICE: copying the data has completed -DETAIL: The local data in the table is no longer visible, but is still on disk. -HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$single_node.relation_tracking_table_2$$) - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT count(*) FROM relation_tracking_table_2; - count ---------------------------------------------------------------------- - 995 -(1 row) - -COMMIT; -SET client_min_messages TO ERROR; -DROP TABLE relation_tracking_table_2, relation_tracking_table_1 CASCADE; -RESET client_min_messages; -CREATE SCHEMA "Quoed.Schema"; -SET search_path TO "Quoed.Schema"; -CREATE TABLE "long_constraint_upsert\_test" -( - part_key int, - other_col int, - third_col int, - CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" UNIQUE (part_key) -); -NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted " --- distribute the table and create shards -SELECT create_distributed_table('"long_constraint_upsert\_test"', 'part_key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO "long_constraint_upsert\_test" (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" DO NOTHING RETURNING *; -NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted " - part_key | other_col | third_col ---------------------------------------------------------------------- - 1 | 1 | -(1 row) - -ALTER TABLE "long_constraint_upsert\_test" RENAME TO simple_table_name; -INSERT INTO simple_table_name (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" DO NOTHING RETURNING *; -NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted " - part_key | other_col | third_col ---------------------------------------------------------------------- -(0 rows) - --- this is currently not supported, but once we support --- make sure that the following query also works fine -ALTER TABLE simple_table_name RENAME CONSTRAINT "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" TO simple_constraint_name; -NOTICE: identifier "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted \aconstraint" will be truncated to "looo oooo ooooo ooooooooooooooooo oooooooo oooooooo ng quoted " -ERROR: renaming constraints belonging to distributed tables is currently unsupported ---INSERT INTO simple_table_name (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT simple_constraint_name DO NOTHING RETURNING *; -SET search_path TO single_node; -SET client_min_messages TO ERROR; -DROP SCHEMA "Quoed.Schema" CASCADE; -RESET client_min_messages; --- test partitioned index creation with long name -CREATE TABLE test_index_creation1 -( - tenant_id integer NOT NULL, - timeperiod timestamp without time zone NOT NULL, - field1 integer NOT NULL, - inserted_utc timestamp without time zone NOT NULL DEFAULT now(), - PRIMARY KEY(tenant_id, timeperiod) -) PARTITION BY RANGE (timeperiod); -CREATE TABLE test_index_creation1_p2020_09_26 -PARTITION OF test_index_creation1 FOR VALUES FROM ('2020-09-26 00:00:00') TO ('2020-09-27 00:00:00'); -CREATE TABLE test_index_creation1_p2020_09_27 -PARTITION OF test_index_creation1 FOR VALUES FROM ('2020-09-27 00:00:00') TO ('2020-09-28 00:00:00'); -select create_distributed_table('test_index_creation1', 'tenant_id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- should be able to create indexes with INCLUDE/WHERE -CREATE INDEX ix_test_index_creation5 ON test_index_creation1 - USING btree(tenant_id, timeperiod) - INCLUDE (field1) WHERE (tenant_id = 100); --- test if indexes are created -SELECT 1 AS created WHERE EXISTS(SELECT * FROM pg_indexes WHERE indexname LIKE '%test_index_creation%'); - created ---------------------------------------------------------------------- - 1 -(1 row) - --- test citus size functions in transaction with modification -CREATE TABLE test_citus_size_func (a int); -SELECT create_distributed_table('test_citus_size_func', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO test_citus_size_func VALUES(1), (2); -BEGIN; - -- DDL with citus_table_size - ALTER TABLE test_citus_size_func ADD COLUMN newcol INT; - SELECT citus_table_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- DDL with citus_relation_size - ALTER TABLE test_citus_size_func ADD COLUMN newcol INT; - SELECT citus_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- DDL with citus_total_relation_size - ALTER TABLE test_citus_size_func ADD COLUMN newcol INT; - SELECT citus_total_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- single shard insert with citus_table_size - INSERT INTO test_citus_size_func VALUES (3); - SELECT citus_table_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- multi shard modification with citus_table_size - INSERT INTO test_citus_size_func SELECT * FROM test_citus_size_func; - SELECT citus_table_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- single shard insert with citus_relation_size - INSERT INTO test_citus_size_func VALUES (3); - SELECT citus_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- multi shard modification with citus_relation_size - INSERT INTO test_citus_size_func SELECT * FROM test_citus_size_func; - SELECT citus_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- single shard insert with citus_total_relation_size - INSERT INTO test_citus_size_func VALUES (3); - SELECT citus_total_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; -BEGIN; - -- multi shard modification with citus_total_relation_size - INSERT INTO test_citus_size_func SELECT * FROM test_citus_size_func; - SELECT citus_total_relation_size('test_citus_size_func'); -ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications -ROLLBACK; --- we should be able to limit intermediate results -BEGIN; - SET LOCAL citus.max_intermediate_result_size TO 0; - WITH cte_1 AS (SELECT * FROM test OFFSET 0) SELECT * FROM cte_1; -ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 0 kB) -DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. -HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. -ROLLBACK; --- the first cte (cte_1) does not exceed the limit --- but the second (cte_2) exceeds, so we error out -BEGIN; - SET LOCAL citus.max_intermediate_result_size TO '1kB'; - INSERT INTO test SELECT i,i from generate_series(0,1000)i; - -- only pulls 1 row, should not hit the limit - WITH cte_1 AS (SELECT * FROM test LIMIT 1) SELECT count(*) FROM cte_1; - count ---------------------------------------------------------------------- - 1 -(1 row) - - -- cte_1 only pulls 1 row, but cte_2 all rows - WITH cte_1 AS (SELECT * FROM test LIMIT 1), - cte_2 AS (SELECT * FROM test OFFSET 0) - SELECT count(*) FROM cte_1, cte_2; -ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 1 kB) -DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. -HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. -ROLLBACK; --- single shard and multi-shard delete --- inside a transaction block -BEGIN; - DELETE FROM test WHERE y = 5; - INSERT INTO test VALUES (4, 5); - DELETE FROM test WHERE x = 1; - INSERT INTO test VALUES (1, 2); -COMMIT; -CREATE INDEX single_node_i1 ON test(x); -CREATE INDEX single_node_i2 ON test(x,y); -REINDEX SCHEMA single_node; -REINDEX SCHEMA CONCURRENTLY single_node; --- keep one of the indexes --- drop w/wout tx blocks -BEGIN; - DROP INDEX single_node_i2; -ROLLBACK; -DROP INDEX single_node_i2; --- change the schema w/wout TX block -BEGIN; - ALTER TABLE public.another_schema_table SET SCHEMA single_node; -ROLLBACK; -ALTER TABLE public.another_schema_table SET SCHEMA single_node; -BEGIN; - TRUNCATE test; - SELECT * FROM test; - x | y ---------------------------------------------------------------------- -(0 rows) - -ROLLBACK; -VACUUM test; -VACUUM test, test_2; -VACUUM ref, test; -VACUUM ANALYZE test(x); -ANALYZE ref; -ANALYZE test_2; -VACUUM local; -VACUUM local, ref, test, test_2; -VACUUM FULL test, ref; -BEGIN; - ALTER TABLE test ADD COLUMN z INT DEFAULT 66; - SELECT count(*) FROM test WHERE z = 66; - count ---------------------------------------------------------------------- - 5 -(1 row) - -ROLLBACK; --- explain analyze should work on a single node -EXPLAIN (COSTS FALSE, ANALYZE TRUE, TIMING FALSE, SUMMARY FALSE, BUFFERS OFF) - SELECT * FROM test; - QUERY PLAN ---------------------------------------------------------------------- - Custom Scan (Citus Adaptive) (actual rows=5 loops=1) - Task Count: 4 - Tuple data received from nodes: 40 bytes - Tasks Shown: One of 4 - -> Task - Tuple data received from node: 16 bytes - Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on test_90630506 test (actual rows=2 loops=1) -(8 rows) - --- common utility command -SELECT pg_size_pretty(citus_relation_size('test'::regclass)); - pg_size_pretty ---------------------------------------------------------------------- - 24 kB -(1 row) - --- basic view queries -CREATE VIEW single_node_view AS - SELECT count(*) as cnt FROM test t1 JOIN test t2 USING (x); -SELECT * FROM single_node_view; - cnt ---------------------------------------------------------------------- - 5 -(1 row) - -SELECT * FROM single_node_view, test WHERE test.x = single_node_view.cnt; - cnt | x | y ---------------------------------------------------------------------- - 5 | 5 | 6 -(1 row) - --- copy in/out -BEGIN; - COPY test(x) FROM PROGRAM 'seq 32'; - SELECT count(*) FROM test; - count ---------------------------------------------------------------------- - 37 -(1 row) - - COPY (SELECT count(DISTINCT x) FROM test) TO STDOUT; -32 - INSERT INTO test SELECT i,i FROM generate_series(0,100)i; -ROLLBACK; --- master_create_empty_shard on coordinator -BEGIN; -CREATE TABLE append_table (a INT, b INT); -SELECT create_distributed_table('append_table','a','append'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT master_create_empty_shard('append_table'); -NOTICE: Creating placements for the append partitioned tables on the coordinator is not supported, skipping coordinator ... -ERROR: could only create 0 of 1 of required shard replicas -END; --- alter table inside a tx block -BEGIN; - ALTER TABLE test ADD COLUMN z single_node.new_type; - INSERT INTO test VALUES (99, 100, (1, 'onder')::new_type) RETURNING *; - x | y | z ---------------------------------------------------------------------- - 99 | 100 | (1,onder) -(1 row) - -ROLLBACK; --- prepared statements with custom types -PREPARE single_node_prepare_p1(int, int, new_type) AS - INSERT INTO test_2 VALUES ($1, $2, $3); -EXECUTE single_node_prepare_p1(1, 1, (95, 'citus9.5')::new_type); -EXECUTE single_node_prepare_p1(2 ,2, (94, 'citus9.4')::new_type); -EXECUTE single_node_prepare_p1(3 ,2, (93, 'citus9.3')::new_type); -EXECUTE single_node_prepare_p1(4 ,2, (92, 'citus9.2')::new_type); -EXECUTE single_node_prepare_p1(5 ,2, (91, 'citus9.1')::new_type); -EXECUTE single_node_prepare_p1(6 ,2, (90, 'citus9.0')::new_type); -PREPARE use_local_query_cache(int) AS SELECT count(*) FROM test_2 WHERE x = $1; -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -SET client_min_messages TO DEBUG2; --- the 6th execution will go through the planner --- the 7th execution will skip the planner as it uses the cache -EXECUTE use_local_query_cache(1); -DEBUG: Deferred pruning for a fast-path router query -DEBUG: Creating router plan - count ---------------------------------------------------------------------- - 1 -(1 row) - -EXECUTE use_local_query_cache(1); - count ---------------------------------------------------------------------- - 1 -(1 row) - -RESET client_min_messages; --- partitioned table should be fine, adding for completeness -CREATE TABLE collections_list ( - key bigint, - ts timestamptz DEFAULT now(), - collection_id integer, - value numeric, - PRIMARY KEY(key, collection_id) -) PARTITION BY LIST (collection_id ); -SELECT create_distributed_table('collections_list', 'key'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE collections_list_0 - PARTITION OF collections_list (key, ts, collection_id, value) - FOR VALUES IN ( 0 ); -CREATE TABLE collections_list_1 - PARTITION OF collections_list (key, ts, collection_id, value) - FOR VALUES IN ( 1 ); -INSERT INTO collections_list SELECT i, '2011-01-01', i % 2, i * i FROM generate_series(0, 100) i; -SELECT count(*) FROM collections_list WHERE key < 10 AND collection_id = 1; - count ---------------------------------------------------------------------- - 5 -(1 row) - -SELECT count(*) FROM collections_list_0 WHERE key < 10 AND collection_id = 1; - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT count(*) FROM collections_list_1 WHERE key = 11; - count ---------------------------------------------------------------------- - 1 -(1 row) - -ALTER TABLE collections_list DROP COLUMN ts; -SELECT * FROM collections_list, collections_list_0 WHERE collections_list.key=collections_list_0.key ORDER BY 1 DESC,2 DESC,3 DESC,4 DESC LIMIT 1; - key | collection_id | value | key | collection_id | value ---------------------------------------------------------------------- - 100 | 0 | 10000 | 100 | 0 | 10000 -(1 row) - --- test hash distribution using INSERT with generate_series() function -CREATE OR REPLACE FUNCTION part_hashint4_noop(value int4, seed int8) -RETURNS int8 AS $$ -SELECT value + seed; -$$ LANGUAGE SQL IMMUTABLE; -CREATE OPERATOR CLASS part_test_int4_ops -FOR TYPE int4 -USING HASH AS -operator 1 =, -function 2 part_hashint4_noop(int4, int8); -CREATE TABLE hash_parted ( - a int, - b int -) PARTITION BY HASH (a part_test_int4_ops); -CREATE TABLE hpart0 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 0); -CREATE TABLE hpart1 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 1); -CREATE TABLE hpart2 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 2); -CREATE TABLE hpart3 PARTITION OF hash_parted FOR VALUES WITH (modulus 4, remainder 3); --- Disable metadata sync since citus doesn't support distributing --- operator class for now. -SET citus.enable_metadata_sync TO OFF; -SELECT create_distributed_table('hash_parted ', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO hash_parted VALUES (1, generate_series(1, 10)); -SELECT * FROM hash_parted ORDER BY 1, 2; - a | b ---------------------------------------------------------------------- - 1 | 1 - 1 | 2 - 1 | 3 - 1 | 4 - 1 | 5 - 1 | 6 - 1 | 7 - 1 | 8 - 1 | 9 - 1 | 10 -(10 rows) - -ALTER TABLE hash_parted DETACH PARTITION hpart0; -ALTER TABLE hash_parted DETACH PARTITION hpart1; -ALTER TABLE hash_parted DETACH PARTITION hpart2; -ALTER TABLE hash_parted DETACH PARTITION hpart3; -RESET citus.enable_metadata_sync; --- test range partition without creating partitions and inserting with generate_series() --- should error out even in plain PG since no partition of relation "parent_tab" is found for row --- in Citus it errors out because it fails to evaluate partition key in insert -CREATE TABLE parent_tab (id int) PARTITION BY RANGE (id); -SELECT create_distributed_table('parent_tab', 'id'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO parent_tab VALUES (generate_series(0, 3)); -ERROR: failed to evaluate partition key in insert -HINT: try using constant values for partition column --- now it should work -CREATE TABLE parent_tab_1_2 PARTITION OF parent_tab FOR VALUES FROM (1) to (2); -ALTER TABLE parent_tab ADD COLUMN b int; -INSERT INTO parent_tab VALUES (1, generate_series(0, 3)); -SELECT * FROM parent_tab ORDER BY 1, 2; - id | b ---------------------------------------------------------------------- - 1 | 0 - 1 | 1 - 1 | 2 - 1 | 3 -(4 rows) - --- make sure that parallel accesses are good -SET citus.force_max_query_parallelization TO ON; -SELECT * FROM test_2 ORDER BY 1 DESC; - x | y | z ---------------------------------------------------------------------- - 6 | 2 | (90,citus9.0) - 5 | 2 | (91,citus9.1) - 4 | 2 | (92,citus9.2) - 3 | 2 | (93,citus9.3) - 2 | 2 | (94,citus9.4) - 1 | 1 | (95,citus9.5) -(6 rows) - -DELETE FROM test_2 WHERE y = 1000 RETURNING *; - x | y | z ---------------------------------------------------------------------- -(0 rows) - -RESET citus.force_max_query_parallelization ; -BEGIN; - INSERT INTO test_2 VALUES (7 ,2, (83, 'citus8.3')::new_type); - SAVEPOINT s1; - INSERT INTO test_2 VALUES (9 ,1, (82, 'citus8.2')::new_type); - SAVEPOINT s2; - ROLLBACK TO SAVEPOINT s1; - SELECT * FROM test_2 WHERE z = (83, 'citus8.3')::new_type OR z = (82, 'citus8.2')::new_type; - x | y | z ---------------------------------------------------------------------- - 7 | 2 | (83,citus8.3) -(1 row) - - RELEASE SAVEPOINT s1; -COMMIT; -SELECT * FROM test_2 WHERE z = (83, 'citus8.3')::new_type OR z = (82, 'citus8.2')::new_type; - x | y | z ---------------------------------------------------------------------- - 7 | 2 | (83,citus8.3) -(1 row) - --- final query is only intermediate result --- we want PG 11/12/13 behave consistently, the CTEs should be MATERIALIZED -WITH cte_1 AS (SELECT * FROM test_2) SELECT * FROM cte_1 ORDER BY 1,2; - x | y | z ---------------------------------------------------------------------- - 1 | 1 | (95,citus9.5) - 2 | 2 | (94,citus9.4) - 3 | 2 | (93,citus9.3) - 4 | 2 | (92,citus9.2) - 5 | 2 | (91,citus9.1) - 6 | 2 | (90,citus9.0) - 7 | 2 | (83,citus8.3) -(7 rows) - --- final query is router query -WITH cte_1 AS (SELECT * FROM test_2) SELECT * FROM cte_1, test_2 WHERE test_2.x = cte_1.x AND test_2.x = 7 ORDER BY 1,2; - x | y | z | x | y | z ---------------------------------------------------------------------- - 7 | 2 | (83,citus8.3) | 7 | 2 | (83,citus8.3) -(1 row) - --- final query is a distributed query -WITH cte_1 AS (SELECT * FROM test_2) SELECT * FROM cte_1, test_2 WHERE test_2.x = cte_1.x AND test_2.y != 2 ORDER BY 1,2; - x | y | z | x | y | z ---------------------------------------------------------------------- - 1 | 1 | (95,citus9.5) | 1 | 1 | (95,citus9.5) -(1 row) - --- query pushdown should work -SELECT - * -FROM - (SELECT x, count(*) FROM test_2 GROUP BY x) as foo, - (SELECT x, count(*) FROM test_2 GROUP BY x) as bar -WHERE - foo.x = bar.x -ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC -LIMIT 1; - x | count | x | count ---------------------------------------------------------------------- - 7 | 1 | 7 | 1 -(1 row) - --- make sure that foreign keys work fine -ALTER TABLE test_2 ADD CONSTRAINT first_pkey PRIMARY KEY (x); -ALTER TABLE test ADD CONSTRAINT foreign_key FOREIGN KEY (x) REFERENCES test_2(x) ON DELETE CASCADE; --- show that delete on test_2 cascades to test -SELECT * FROM test WHERE x = 5; - x | y ---------------------------------------------------------------------- - 5 | 6 -(1 row) - -DELETE FROM test_2 WHERE x = 5; -SELECT * FROM test WHERE x = 5; - x | y ---------------------------------------------------------------------- -(0 rows) - -INSERT INTO test_2 VALUES (5 ,2, (91, 'citus9.1')::new_type); -INSERT INTO test VALUES (5, 6); -INSERT INTO ref VALUES (1, 2), (5, 6), (7, 8); -SELECT count(*) FROM ref; - count ---------------------------------------------------------------------- - 3 -(1 row) - -SELECT * FROM ref ORDER BY a; - a | b ---------------------------------------------------------------------- - 1 | 2 - 5 | 6 - 7 | 8 -(3 rows) - -SELECT * FROM test, ref WHERE x = a ORDER BY x; - x | y | a | b ---------------------------------------------------------------------- - 1 | 2 | 1 | 2 - 5 | 6 | 5 | 6 -(2 rows) - -INSERT INTO local VALUES (1, 2), (3, 4), (7, 8); -SELECT count(*) FROM local; - count ---------------------------------------------------------------------- - 3 -(1 row) - -SELECT * FROM local ORDER BY c; - c | d ---------------------------------------------------------------------- - 1 | 2 - 3 | 4 - 7 | 8 -(3 rows) - -SELECT * FROM ref, local WHERE a = c ORDER BY a; - a | b | c | d ---------------------------------------------------------------------- - 1 | 2 | 1 | 2 - 7 | 8 | 7 | 8 -(2 rows) - --- Check repartition joins are supported -SET citus.enable_repartition_joins TO ON; -SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET citus.enable_single_hash_repartition_joins TO ON; -SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET search_path TO public; -SET citus.enable_single_hash_repartition_joins TO OFF; -SELECT * FROM single_node.test t1, single_node.test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET citus.enable_single_hash_repartition_joins TO ON; -SELECT * FROM single_node.test t1, single_node.test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET search_path TO single_node; -SET citus.task_assignment_policy TO 'round-robin'; -SET citus.enable_single_hash_repartition_joins TO ON; -SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET citus.task_assignment_policy TO 'greedy'; -SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -SET citus.task_assignment_policy TO 'first-replica'; -SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x; - x | y | x | y ---------------------------------------------------------------------- - 2 | 7 | 1 | 2 - 4 | 5 | 3 | 4 - 5 | 6 | 4 | 5 -(3 rows) - -RESET citus.enable_repartition_joins; -RESET citus.enable_single_hash_repartition_joins; --- INSERT SELECT router -BEGIN; -INSERT INTO test(x, y) SELECT x, y FROM test WHERE x = 1; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- INSERT SELECT pushdown -BEGIN; -INSERT INTO test(x, y) SELECT x, y FROM test; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 10 -(1 row) - -ROLLBACK; --- INSERT SELECT analytical query -BEGIN; -INSERT INTO test(x, y) SELECT count(x), max(y) FROM test; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- INSERT SELECT repartition -BEGIN; -INSERT INTO test(x, y) SELECT y, x FROM test; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 10 -(1 row) - -ROLLBACK; --- INSERT SELECT from reference table into distributed -BEGIN; -INSERT INTO test(x, y) SELECT a, b FROM ref; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from local table into distributed -BEGIN; -INSERT INTO test(x, y) SELECT c, d FROM local; -SELECT count(*) from test; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO ref(a, b) SELECT x, y FROM test; -SELECT count(*) from ref; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO ref(a, b) SELECT c, d FROM local; -SELECT count(*) from ref; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO local(c, d) SELECT x, y FROM test; -SELECT count(*) from local; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO local(c, d) SELECT a, b FROM ref; -SELECT count(*) from local; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- Confirm that dummy placements work -SELECT count(*) FROM test WHERE false; - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y); - count ---------------------------------------------------------------------- -(0 rows) - --- Confirm that they work with round-robin task assignment policy -SET citus.task_assignment_policy TO 'round-robin'; -SELECT count(*) FROM test WHERE false; - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT count(*) FROM test WHERE false GROUP BY GROUPING SETS (x,y); - count ---------------------------------------------------------------------- -(0 rows) - -RESET citus.task_assignment_policy; -SELECT count(*) FROM test; - count ---------------------------------------------------------------------- - 5 -(1 row) - --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO ref(a, b) SELECT x, y FROM test; -SELECT count(*) from ref; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO ref(a, b) SELECT c, d FROM local; -SELECT count(*) from ref; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO local(c, d) SELECT x, y FROM test; -SELECT count(*) from local; - count ---------------------------------------------------------------------- - 8 -(1 row) - -ROLLBACK; --- INSERT SELECT from distributed table to local table -BEGIN; -INSERT INTO local(c, d) SELECT a, b FROM ref; -SELECT count(*) from local; - count ---------------------------------------------------------------------- - 6 -(1 row) - -ROLLBACK; --- query fails on the shards should be handled --- nicely -SELECT x/0 FROM test; -ERROR: division by zero -CONTEXT: while executing command on localhost:xxxxx --- Add "fake" pg_dist_transaction records and run recovery --- to show that it is recovered --- Temporarily disable automatic 2PC recovery -ALTER SYSTEM SET citus.recover_2pc_interval TO -1; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -BEGIN; -CREATE TABLE should_commit (value int); -PREPARE TRANSACTION 'citus_0_should_commit'; --- zero is the coordinator's group id, so we can hard code it -INSERT INTO pg_dist_transaction VALUES (0, 'citus_0_should_commit'); -SELECT recover_prepared_transactions(); - recover_prepared_transactions ---------------------------------------------------------------------- - 1 -(1 row) - --- the table should be seen -SELECT * FROM should_commit; - value ---------------------------------------------------------------------- -(0 rows) - --- set the original back -ALTER SYSTEM RESET citus.recover_2pc_interval; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -RESET citus.task_executor_type; --- make sure undistribute table works fine -ALTER TABLE test DROP CONSTRAINT foreign_key; -SELECT undistribute_table('test_2'); -NOTICE: creating a new table for single_node.test_2 -NOTICE: moving the data of single_node.test_2 -NOTICE: dropping the old single_node.test_2 -NOTICE: renaming the new table to single_node.test_2 - undistribute_table ---------------------------------------------------------------------- - -(1 row) - -SELECT * FROM pg_dist_partition WHERE logicalrelid = 'test_2'::regclass; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted ---------------------------------------------------------------------- -(0 rows) - -CREATE TABLE reference_table_1 (col_1 INT UNIQUE, col_2 INT UNIQUE, UNIQUE (col_2, col_1)); -SELECT create_reference_table('reference_table_1'); - create_reference_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE distributed_table_1 (col_1 INT UNIQUE); -SELECT create_distributed_table('distributed_table_1', 'col_1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE citus_local_table_1 (col_1 INT UNIQUE); -SELECT citus_add_local_table_to_metadata('citus_local_table_1'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE partitioned_table_1 (col_1 INT UNIQUE, col_2 INT) PARTITION BY RANGE (col_1); -CREATE TABLE partitioned_table_1_100_200 PARTITION OF partitioned_table_1 FOR VALUES FROM (100) TO (200); -CREATE TABLE partitioned_table_1_200_300 PARTITION OF partitioned_table_1 FOR VALUES FROM (200) TO (300); -SELECT create_distributed_table('partitioned_table_1', 'col_1'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -ALTER TABLE citus_local_table_1 ADD CONSTRAINT fkey_1 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_2); -ALTER TABLE reference_table_1 ADD CONSTRAINT fkey_2 FOREIGN KEY (col_2) REFERENCES reference_table_1(col_1); -ALTER TABLE distributed_table_1 ADD CONSTRAINT fkey_3 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_1); -ALTER TABLE citus_local_table_1 ADD CONSTRAINT fkey_4 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_2); -ALTER TABLE partitioned_table_1 ADD CONSTRAINT fkey_5 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_2); -SELECT undistribute_table('partitioned_table_1', cascade_via_foreign_keys=>true); -NOTICE: converting the partitions of single_node.partitioned_table_1 -NOTICE: creating a new table for single_node.partitioned_table_1 -NOTICE: dropping the old single_node.partitioned_table_1 -NOTICE: renaming the new table to single_node.partitioned_table_1 -NOTICE: creating a new table for single_node.reference_table_1 -NOTICE: moving the data of single_node.reference_table_1 -NOTICE: dropping the old single_node.reference_table_1 -NOTICE: renaming the new table to single_node.reference_table_1 -NOTICE: creating a new table for single_node.distributed_table_1 -NOTICE: moving the data of single_node.distributed_table_1 -NOTICE: dropping the old single_node.distributed_table_1 -NOTICE: renaming the new table to single_node.distributed_table_1 -NOTICE: creating a new table for single_node.citus_local_table_1 -NOTICE: moving the data of single_node.citus_local_table_1 -NOTICE: dropping the old single_node.citus_local_table_1 -NOTICE: renaming the new table to single_node.citus_local_table_1 -NOTICE: creating a new table for single_node.partitioned_table_1_100_200 -NOTICE: moving the data of single_node.partitioned_table_1_100_200 -NOTICE: dropping the old single_node.partitioned_table_1_100_200 -NOTICE: renaming the new table to single_node.partitioned_table_1_100_200 -NOTICE: creating a new table for single_node.partitioned_table_1_200_300 -NOTICE: moving the data of single_node.partitioned_table_1_200_300 -NOTICE: dropping the old single_node.partitioned_table_1_200_300 -NOTICE: renaming the new table to single_node.partitioned_table_1_200_300 - undistribute_table ---------------------------------------------------------------------- - -(1 row) - -CREATE TABLE local_table_1 (col_1 INT UNIQUE); -CREATE TABLE local_table_2 (col_1 INT UNIQUE); -CREATE TABLE local_table_3 (col_1 INT UNIQUE); -ALTER TABLE local_table_2 ADD CONSTRAINT fkey_6 FOREIGN KEY (col_1) REFERENCES local_table_1(col_1); -ALTER TABLE local_table_3 ADD CONSTRAINT fkey_7 FOREIGN KEY (col_1) REFERENCES local_table_1(col_1); -ALTER TABLE local_table_1 ADD CONSTRAINT fkey_8 FOREIGN KEY (col_1) REFERENCES local_table_1(col_1); -SELECT citus_add_local_table_to_metadata('local_table_2', cascade_via_foreign_keys=>true); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -CREATE PROCEDURE call_delegation(x int) LANGUAGE plpgsql AS $$ -BEGIN - INSERT INTO test (x) VALUES ($1); -END;$$; -SELECT * FROM pg_dist_node; - nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards ---------------------------------------------------------------------- - 5 | 0 | localhost | 57636 | default | t | t | primary | default | t | t -(1 row) - -SELECT create_distributed_function('call_delegation(int)', '$1', 'test'); - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - -CREATE FUNCTION function_delegation(int) RETURNS void AS $$ -BEGIN -UPDATE test SET y = y + 1 WHERE x < $1; -END; -$$ LANGUAGE plpgsql; -SELECT create_distributed_function('function_delegation(int)', '$1', 'test'); - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - -SET client_min_messages TO DEBUG1; -CALL call_delegation(1); -DEBUG: not pushing down procedure to the same node -SELECT function_delegation(1); -DEBUG: not pushing down function to the same node - function_delegation ---------------------------------------------------------------------- - -(1 row) - -SET client_min_messages TO WARNING; -DROP TABLE test CASCADE; -CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_client_backend_count() - RETURNS bigint - LANGUAGE C STRICT - AS 'citus', $$get_all_active_client_backend_count$$; --- set the cached connections to zero --- and execute a distributed query so that --- we end up with zero cached connections afterwards -ALTER SYSTEM SET citus.max_cached_conns_per_worker TO 0; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - --- disable deadlock detection and re-trigger 2PC recovery --- once more when citus.max_cached_conns_per_worker is zero --- so that we can be sure that the connections established for --- maintanince daemon is closed properly. --- this is to prevent random failures in the tests (otherwise, we --- might see connections established for this operations) -ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO -1; -ALTER SYSTEM SET citus.recover_2pc_interval TO '1ms'; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(0.1); - pg_sleep ---------------------------------------------------------------------- - -(1 row) - --- now that last 2PC recovery is done, we're good to disable it -ALTER SYSTEM SET citus.recover_2pc_interval TO '-1'; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - --- test alter_distributed_table UDF -CREATE TABLE adt_table (a INT, b INT); -CREATE TABLE adt_col (a INT UNIQUE, b INT); -CREATE TABLE adt_ref (a INT REFERENCES adt_col(a)); -SELECT create_distributed_table('adt_table', 'a', colocate_with:='none'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table('adt_col', 'a', colocate_with:='adt_table'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT create_distributed_table('adt_ref', 'a', colocate_with:='adt_table'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -INSERT INTO adt_table VALUES (1, 2), (3, 4), (5, 6); -INSERT INTO adt_col VALUES (3, 4), (5, 6), (7, 8); -INSERT INTO adt_ref VALUES (3), (5); -SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'adt%'; - table_name | citus_table_type | distribution_column | shard_count ---------------------------------------------------------------------- - adt_col | distributed | a | 4 - adt_ref | distributed | a | 4 - adt_table | distributed | a | 4 -(3 rows) - -SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables WHERE table_name::text LIKE 'adt%' GROUP BY colocation_id ORDER BY 1; - Colocation Groups ---------------------------------------------------------------------- - adt_col, adt_ref, adt_table -(1 row) - -SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint - WHERE (conrelid::regclass::text = 'adt_col' OR confrelid::regclass::text = 'adt_col') ORDER BY 1; - Referencing Table | Definition ---------------------------------------------------------------------- - adt_col | UNIQUE (a) - adt_ref | FOREIGN KEY (a) REFERENCES adt_col(a) -(2 rows) - -SELECT alter_distributed_table('adt_table', shard_count:=6, cascade_to_colocated:=true); - alter_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'adt%'; - table_name | citus_table_type | distribution_column | shard_count ---------------------------------------------------------------------- - adt_col | distributed | a | 6 - adt_ref | distributed | a | 6 - adt_table | distributed | a | 6 -(3 rows) - -SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables WHERE table_name::text LIKE 'adt%' GROUP BY colocation_id ORDER BY 1; - Colocation Groups ---------------------------------------------------------------------- - adt_col, adt_ref, adt_table -(1 row) - -SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint - WHERE (conrelid::regclass::text = 'adt_col' OR confrelid::regclass::text = 'adt_col') ORDER BY 1; - Referencing Table | Definition ---------------------------------------------------------------------- - adt_col | UNIQUE (a) - adt_ref | FOREIGN KEY (a) REFERENCES adt_col(a) -(2 rows) - -SELECT alter_distributed_table('adt_table', distribution_column:='b', colocate_with:='none'); - alter_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text LIKE 'adt%'; - table_name | citus_table_type | distribution_column | shard_count ---------------------------------------------------------------------- - adt_col | distributed | a | 6 - adt_ref | distributed | a | 6 - adt_table | distributed | b | 6 -(3 rows) - -SELECT STRING_AGG(table_name::text, ', ' ORDER BY 1) AS "Colocation Groups" FROM public.citus_tables WHERE table_name::text LIKE 'adt%' GROUP BY colocation_id ORDER BY 1; - Colocation Groups ---------------------------------------------------------------------- - adt_col, adt_ref - adt_table -(2 rows) - -SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint - WHERE (conrelid::regclass::text = 'adt_col' OR confrelid::regclass::text = 'adt_col') ORDER BY 1; - Referencing Table | Definition ---------------------------------------------------------------------- - adt_col | UNIQUE (a) - adt_ref | FOREIGN KEY (a) REFERENCES adt_col(a) -(2 rows) - -SELECT * FROM adt_table ORDER BY 1; - a | b ---------------------------------------------------------------------- - 1 | 2 - 3 | 4 - 5 | 6 -(3 rows) - -SELECT * FROM adt_col ORDER BY 1; - a | b ---------------------------------------------------------------------- - 3 | 4 - 5 | 6 - 7 | 8 -(3 rows) - -SELECT * FROM adt_ref ORDER BY 1; - a ---------------------------------------------------------------------- - 3 - 5 -(2 rows) - --- make sure that COPY (e.g., INSERT .. SELECT) and --- alter_distributed_table works in the same TX -BEGIN; -SET LOCAL citus.enable_local_execution=OFF; -INSERT INTO adt_table SELECT x, x+1 FROM generate_series(1, 1000) x; -SELECT alter_distributed_table('adt_table', distribution_column:='a'); - alter_distributed_table ---------------------------------------------------------------------- - -(1 row) - -ROLLBACK; -BEGIN; -INSERT INTO adt_table SELECT x, x+1 FROM generate_series(1, 1000) x; -SELECT alter_distributed_table('adt_table', distribution_column:='a'); - alter_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT COUNT(*) FROM adt_table; - count ---------------------------------------------------------------------- - 1003 -(1 row) - -END; -SELECT table_name, citus_table_type, distribution_column, shard_count FROM public.citus_tables WHERE table_name::text = 'adt_table'; - table_name | citus_table_type | distribution_column | shard_count ---------------------------------------------------------------------- - adt_table | distributed | a | 6 -(1 row) - -\c - - - :master_port --- sometimes Postgres is a little slow to terminate the backends --- even if PGFinish is sent. So, to prevent any flaky tests, sleep -SELECT pg_sleep(0.1); - pg_sleep ---------------------------------------------------------------------- - -(1 row) - --- since max_cached_conns_per_worker == 0 at this point, the --- backend(s) that execute on the shards will be terminated --- so show that there no internal backends -SET search_path TO single_node; -SET citus.next_shard_id TO 90730500; -SELECT count(*) from should_commit; - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'citus_internal%'; - count ---------------------------------------------------------------------- - 0 -(1 row) - -SELECT get_all_active_client_backend_count(); - get_all_active_client_backend_count ---------------------------------------------------------------------- - 1 -(1 row) - -BEGIN; - SET LOCAL citus.shard_count TO 32; - SET LOCAL citus.force_max_query_parallelization TO ON; - SET LOCAL citus.enable_local_execution TO false; - CREATE TABLE test (a int); - SET citus.shard_replication_factor TO 1; - SELECT create_distributed_table('test', 'a'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - - SELECT count(*) FROM test; - count ---------------------------------------------------------------------- - 0 -(1 row) - - -- now, we should have additional 32 connections - SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'citus_internal%'; - count ---------------------------------------------------------------------- - 32 -(1 row) - - -- single external connection - SELECT get_all_active_client_backend_count(); - get_all_active_client_backend_count ---------------------------------------------------------------------- - 1 -(1 row) - -ROLLBACK; -\c - - - :master_port -SET search_path TO single_node; -SET citus.next_shard_id TO 90830500; --- simulate that even if there is no connection slots --- to connect, Citus can switch to local execution -SET citus.force_max_query_parallelization TO false; -SET citus.log_remote_commands TO ON; -ALTER SYSTEM SET citus.local_shared_pool_size TO -1; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - -SELECT pg_sleep(0.1); - pg_sleep ---------------------------------------------------------------------- - -(1 row) - -SET citus.executor_slow_start_interval TO 10; -SELECT count(*) from another_schema_table; -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630515 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630516 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630517 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630518 another_schema_table WHERE true - count ---------------------------------------------------------------------- - 0 -(1 row) - -UPDATE another_schema_table SET b = b; -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = b --- INSERT .. SELECT pushdown and INSERT .. SELECT via repartitioning --- not that we ignore INSERT .. SELECT via coordinator as it relies on --- COPY command -INSERT INTO another_schema_table SELECT * FROM another_schema_table; -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE (a IS NOT NULL) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE (a IS NOT NULL) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE (a IS NOT NULL) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE (a IS NOT NULL) -INSERT INTO another_schema_table SELECT b::int, a::int FROM another_schema_table; -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630515_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630515_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630516_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630516_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630517_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630517_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630518_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630518_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 --- multi-row INSERTs -INSERT INTO another_schema_table VALUES (1,1), (2,2), (3,3), (4,4), (5,5),(6,6),(7,7); -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) VALUES (1,1), (5,5) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) VALUES (3,3), (4,4), (7,7) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) VALUES (6,6) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) VALUES (2,2) --- INSERT..SELECT with re-partitioning when using local execution -BEGIN; -INSERT INTO another_schema_table VALUES (1,100); -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 (a, b) VALUES (1, 100) -INSERT INTO another_schema_table VALUES (2,100); -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 (a, b) VALUES (2, 100) -INSERT INTO another_schema_table SELECT b::int, a::int FROM another_schema_table; -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630515_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630515_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630516_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630516_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630517_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630517_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_90630518_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_90630518_to','SELECT b AS a, a AS b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true',0,'hash','{-2147483648,-1073741824,0,1073741824}'::text[],'{-1073741825,-1,1073741823,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630515_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630516_to_1}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630515_to_2,repartitioned_results_xxxxx_from_90630517_to_2,repartitioned_results_xxxxx_from_90630518_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_results('{repartitioned_results_xxxxx_from_90630518_to_3}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer, b integer) -SELECT * FROM another_schema_table WHERE a = 100 ORDER BY b; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 100) ORDER BY b - a | b ---------------------------------------------------------------------- - 100 | 1 - 100 | 2 -(2 rows) - -ROLLBACK; --- intermediate results -WITH cte_1 AS (SELECT * FROM another_schema_table LIMIT 1000) - SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 - count ---------------------------------------------------------------------- - 7 -(1 row) - --- this is to get ready for the next tests -TRUNCATE another_schema_table; -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE --- copy can use local execution even if there is no connection available -COPY another_schema_table(a) FROM PROGRAM 'seq 32'; -NOTICE: executing the copy locally for shard xxxxx -CONTEXT: COPY another_schema_table, line 1: "1" -NOTICE: executing the copy locally for shard xxxxx -CONTEXT: COPY another_schema_table, line 2: "2" -NOTICE: executing the copy locally for shard xxxxx -CONTEXT: COPY another_schema_table, line 3: "3" -NOTICE: executing the copy locally for shard xxxxx -CONTEXT: COPY another_schema_table, line 6: "6" --- INSERT .. SELECT with co-located intermediate results -SET citus.log_remote_commands to false; -CREATE UNIQUE INDEX another_schema_table_pk ON another_schema_table(a); -SET citus.log_local_commands to true; -INSERT INTO another_schema_table SELECT * FROM another_schema_table LIMIT 10000 ON CONFLICT(a) DO NOTHING; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING -INSERT INTO another_schema_table SELECT * FROM another_schema_table ORDER BY a LIMIT 10 ON CONFLICT(a) DO UPDATE SET b = EXCLUDED.b + 1 RETURNING *; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true ORDER BY a LIMIT '10'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO UPDATE SET b = (excluded.b OPERATOR(pg_catalog.+) 1) RETURNING citus_table_alias.a, citus_table_alias.b - a | b ---------------------------------------------------------------------- - 1 | - 2 | - 3 | - 4 | - 5 | - 6 | - 7 | - 8 | - 9 | - 10 | -(10 rows) - --- INSERT .. SELECT with co-located intermediate result for non-binary input -WITH cte_1 AS -(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING value) -SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value) SELECT key, value FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.value -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'text'::citus_copy_format) intermediate_result(value single_node.new_type)) cte_1 - count ---------------------------------------------------------------------- - 1001 -(1 row) - --- test with NULL columns -ALTER TABLE non_binary_copy_test ADD COLUMN z INT; -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630519, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630520, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630521, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90630522, 'single_node', 'ALTER TABLE non_binary_copy_test ADD COLUMN z integer;') -WITH cte_1 AS -(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING z) -SELECT bool_and(z is null) FROM cte_1; -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: SELECT bool_and((z IS NULL)) AS bool_and FROM (SELECT intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(z integer)) cte_1 - bool_and ---------------------------------------------------------------------- - t -(1 row) - --- test with type coersion (int -> text) and also NULL values with coersion -WITH cte_1 AS -(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING key, z) -SELECT count(DISTINCT key::text), count(DISTINCT z::text) FROM cte_1; -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.key, citus_table_alias.z -NOTICE: executing the command locally: SELECT count(DISTINCT (key)::text) AS count, count(DISTINCT (z)::text) AS count FROM (SELECT intermediate_result.key, intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, z integer)) cte_1 - count | count ---------------------------------------------------------------------- - 1001 | 0 -(1 row) - --- test disabling drop and truncate for known shards -SET citus.shard_replication_factor TO 1; -CREATE TABLE test_disabling_drop_and_truncate (a int); -SELECT create_distributed_table('test_disabling_drop_and_truncate', 'a'); -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830500, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830500, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830501, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830501, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830502, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830502, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830503, 'single_node', 'CREATE TABLE single_node.test_disabling_drop_and_truncate (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830503, 'single_node', 'ALTER TABLE single_node.test_disabling_drop_and_truncate OWNER TO postgres') - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SET citus.enable_manual_changes_to_shards TO off; --- these should error out -DROP TABLE test_disabling_drop_and_truncate_90830500; -ERROR: cannot modify "test_disabling_drop_and_truncate_90830500" because it is a shard of a distributed table -HINT: Use the distributed table or set citus.enable_manual_changes_to_shards to on to modify shards directly -TRUNCATE TABLE test_disabling_drop_and_truncate_90830500; -ERROR: cannot modify "test_disabling_drop_and_truncate_90830500" because it is a shard of a distributed table -HINT: Use the distributed table or set citus.enable_manual_changes_to_shards to on to modify shards directly -RESET citus.enable_manual_changes_to_shards ; --- these should work as expected -TRUNCATE TABLE test_disabling_drop_and_truncate_90830500; -DROP TABLE test_disabling_drop_and_truncate_90830500; -DROP TABLE test_disabling_drop_and_truncate; --- test creating distributed or reference tables from shards -CREATE TABLE test_creating_distributed_relation_table_from_shard (a int); -SELECT create_distributed_table('test_creating_distributed_relation_table_from_shard', 'a'); -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830504, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830504, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830505, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830505, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830506, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830506, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') -NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (90830507, 'single_node', 'CREATE TABLE single_node.test_creating_distributed_relation_table_from_shard (a integer) USING heap');SELECT worker_apply_shard_ddl_command (90830507, 'single_node', 'ALTER TABLE single_node.test_creating_distributed_relation_table_from_shard OWNER TO postgres') - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - --- these should error because shards cannot be used to: --- create distributed table -SELECT create_distributed_table('test_creating_distributed_relation_table_from_shard_90830504', 'a'); -ERROR: relation "test_creating_distributed_relation_table_from_shard_90830504" is a shard relation --- create reference table -SELECT create_reference_table('test_creating_distributed_relation_table_from_shard_90830504'); -ERROR: relation "test_creating_distributed_relation_table_from_shard_90830504" is a shard relation -RESET citus.shard_replication_factor; -DROP TABLE test_creating_distributed_relation_table_from_shard; --- lets flush the copy often to make sure everyhing is fine -SET citus.local_copy_flush_threshold TO 1; -TRUNCATE another_schema_table; -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -NOTICE: executing the command locally: TRUNCATE TABLE single_node.another_schema_table_xxxxx CASCADE -INSERT INTO another_schema_table(a) SELECT i from generate_Series(0,10000)i; -NOTICE: executing the copy locally for shard xxxxx -NOTICE: executing the copy locally for shard xxxxx -NOTICE: executing the copy locally for shard xxxxx -NOTICE: executing the copy locally for shard xxxxx -WITH cte_1 AS -(INSERT INTO another_schema_table SELECT * FROM another_schema_table ORDER BY a LIMIT 10000 ON CONFLICT(a) DO NOTHING RETURNING *) -SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true ORDER BY a LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630515 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630515'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630516 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630516'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630517 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630517'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: INSERT INTO single_node.another_schema_table_90630518 AS citus_table_alias (a, b) SELECT a, b FROM read_intermediate_result('insert_select_XXX_90630518'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer) ON CONFLICT(a) DO NOTHING RETURNING citus_table_alias.a, citus_table_alias.b -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 - count ---------------------------------------------------------------------- - 0 -(1 row) - -WITH cte_1 AS -(INSERT INTO non_binary_copy_test SELECT * FROM non_binary_copy_test LIMIT 10000 ON CONFLICT (key) DO UPDATE SET value = (0, 'citus0')::new_type RETURNING z) -SELECT bool_and(z is null) FROM cte_1; -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630519 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630520 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630521 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the command locally: SELECT key, value, z FROM single_node.non_binary_copy_test_90630522 non_binary_copy_test WHERE true LIMIT '10000'::bigint -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the copy locally for colocated file with shard xxxxx -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630519 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630519'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630520 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630520'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630521 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630521'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: INSERT INTO single_node.non_binary_copy_test_90630522 AS citus_table_alias (key, value, z) SELECT key, value, z FROM read_intermediate_result('insert_select_XXX_90630522'::text, 'text'::citus_copy_format) intermediate_result(key integer, value single_node.new_type, z integer) ON CONFLICT(key) DO UPDATE SET value = ROW(0, 'citus0'::text)::single_node.new_type RETURNING citus_table_alias.z -NOTICE: executing the command locally: SELECT bool_and((z IS NULL)) AS bool_and FROM (SELECT intermediate_result.z FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(z integer)) cte_1 - bool_and ---------------------------------------------------------------------- - t -(1 row) - -RESET citus.local_copy_flush_threshold; -RESET citus.local_copy_flush_threshold; -CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC() -RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus', -$$coordinated_transaction_should_use_2PC$$; --- a multi-shard/single-shard select that is failed over to local --- execution doesn't start a 2PC -BEGIN; - SELECT count(*) FROM another_schema_table; -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630515 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630516 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630517 another_schema_table WHERE true -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630518 another_schema_table WHERE true - count ---------------------------------------------------------------------- - 10001 -(1 row) - - SELECT count(*) FROM another_schema_table WHERE a = 1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM single_node.another_schema_table_90630515 another_schema_table WHERE (a OPERATOR(pg_catalog.=) 1) - count ---------------------------------------------------------------------- - 1 -(1 row) - - WITH cte_1 as (SELECT * FROM another_schema_table LIMIT 10) - SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT a, b FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '10'::bigint -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 - count ---------------------------------------------------------------------- - 10 -(1 row) - - WITH cte_1 as (SELECT * FROM another_schema_table WHERE a = 1 LIMIT 10) - SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT another_schema_table.a, another_schema_table.b FROM single_node.another_schema_table_90630515 another_schema_table WHERE (another_schema_table.a OPERATOR(pg_catalog.=) 1) LIMIT 10) cte_1 - count ---------------------------------------------------------------------- - 1 -(1 row) - - SELECT coordinated_transaction_should_use_2PC(); - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - f -(1 row) - -ROLLBACK; --- same without a transaction block -WITH cte_1 AS (SELECT count(*) as cnt FROM another_schema_table LIMIT 1000), - cte_2 AS (SELECT coordinated_transaction_should_use_2PC() as enabled_2pc) -SELECT cnt, enabled_2pc FROM cte_1, cte_2; -NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630515 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630516 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630517 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT count(*) AS cnt FROM single_node.another_schema_table_90630518 another_schema_table WHERE true LIMIT '1000'::bigint -NOTICE: executing the command locally: SELECT cte_1.cnt, cte_2.enabled_2pc FROM (SELECT intermediate_result.cnt FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(cnt bigint)) cte_1, (SELECT intermediate_result.enabled_2pc FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(enabled_2pc boolean)) cte_2 - cnt | enabled_2pc ---------------------------------------------------------------------- - 10001 | f -(1 row) - --- a multi-shard modification that is failed over to local --- execution starts a 2PC -BEGIN; - UPDATE another_schema_table SET b = b + 1; -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) - SELECT coordinated_transaction_should_use_2PC(); - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - t -(1 row) - -ROLLBACK; --- a multi-shard modification that is failed over to local --- execution starts a 2PC -BEGIN; - WITH cte_1 AS (UPDATE another_schema_table SET b = b + 1 RETURNING *) - SELECT count(*) FROM cte_1; -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte_1 - count ---------------------------------------------------------------------- - 10001 -(1 row) - - SELECT coordinated_transaction_should_use_2PC(); - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - t -(1 row) - -ROLLBACK; --- same without transaction block -WITH cte_1 AS (UPDATE another_schema_table SET b = b + 1 RETURNING *) -SELECT coordinated_transaction_should_use_2PC(); -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630516 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630517 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630518 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) RETURNING a, b -NOTICE: executing the command locally: SELECT single_node.coordinated_transaction_should_use_2pc() AS coordinated_transaction_should_use_2pc - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - t -(1 row) - --- a single-shard modification that is failed over to local --- starts 2PC execution -BEGIN; - UPDATE another_schema_table SET b = b + 1 WHERE a = 1; -NOTICE: executing the command locally: UPDATE single_node.another_schema_table_90630515 another_schema_table SET b = (b OPERATOR(pg_catalog.+) 1) WHERE (a OPERATOR(pg_catalog.=) 1) - SELECT coordinated_transaction_should_use_2PC(); - coordinated_transaction_should_use_2pc ---------------------------------------------------------------------- - t -(1 row) - -ROLLBACK; --- if the local execution is disabled, we cannot failover to --- local execution and the queries would fail -SET citus.enable_local_execution TO false; -SELECT count(*) from another_schema_table; -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; -UPDATE another_schema_table SET b = b; -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; -INSERT INTO another_schema_table SELECT * FROM another_schema_table; -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; -INSERT INTO another_schema_table SELECT b::int, a::int FROM another_schema_table; -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; -WITH cte_1 AS (SELECT * FROM another_schema_table LIMIT 1000) - SELECT count(*) FROM cte_1; -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; -INSERT INTO another_schema_table VALUES (1,1), (2,2), (3,3), (4,4), (5,5),(6,6),(7,7); -ERROR: the total number of connections on the server is more than max_connections(100) -HINT: This command supports local execution. Consider enabling local execution using SET citus.enable_local_execution TO true; --- copy fails if local execution is disabled and there is no connection slot -COPY another_schema_table(a) FROM PROGRAM 'seq 32'; -ERROR: could not find an available connection -HINT: Set citus.max_shared_pool_size TO -1 to let COPY command finish -CONTEXT: COPY another_schema_table, line 1: "1" --- set the values to originals back -ALTER SYSTEM RESET citus.max_cached_conns_per_worker; -ALTER SYSTEM RESET citus.distributed_deadlock_detection_factor; -ALTER SYSTEM RESET citus.recover_2pc_interval; -ALTER SYSTEM RESET citus.distributed_deadlock_detection_factor; -ALTER SYSTEM RESET citus.local_shared_pool_size; -SELECT pg_reload_conf(); - pg_reload_conf ---------------------------------------------------------------------- - t -(1 row) - --- suppress notices -SET client_min_messages TO error; --- cannot remove coordinator since a reference table exists on coordinator and no other worker nodes are added -SELECT 1 FROM master_remove_node('localhost', :master_port); -ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx -DETAIL: One of the table(s) that prevents the operation complete successfully is single_node.ref -HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables --- Cleanup -DROP SCHEMA single_node CASCADE; --- Remove the coordinator again -SELECT 1 FROM master_remove_node('localhost', :master_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - --- restart nodeid sequence so that multi_cluster_management still has the same --- nodeids -ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 1; diff --git a/src/test/regress/sql/create_drop_database_propagation_pg15.sql b/src/test/regress/sql/create_drop_database_propagation_pg15.sql index 40d1b9e09..4f57f9112 100644 --- a/src/test/regress/sql/create_drop_database_propagation_pg15.sql +++ b/src/test/regress/sql/create_drop_database_propagation_pg15.sql @@ -1,14 +1,3 @@ --- --- PG15 --- -SHOW server_version \gset -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q -\endif - -- create/drop database for pg >= 15 set citus.enable_create_database_propagation=on; diff --git a/src/test/regress/sql/merge_unsupported.sql b/src/test/regress/sql/merge_unsupported.sql index ef95e01ea..9903fd6a5 100644 --- a/src/test/regress/sql/merge_unsupported.sql +++ b/src/test/regress/sql/merge_unsupported.sql @@ -1,18 +1,9 @@ - - SHOW server_version \gset SELECT CASE WHEN substring(current_setting('server_version'), '\d+')::int >= 17 THEN '17+' WHEN substring(current_setting('server_version'), '\d+')::int IN (15, 16) THEN '15_16' - WHEN substring(current_setting('server_version'), '\d+')::int = 14 THEN '14' ELSE 'Unsupported version' END AS version_category; -SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 -\gset -\if :server_version_ge_15 -\else -\q -\endif -- -- MERGE test from PG community (adapted to Citus by converting all tables to Citus local)