Change test files in multi and multi-1 schedules to accommodate coordinator in the metadata. (#6939)

Changes test files in multi and multi-1 schedules such that they
accomodate coordinator in metadata.

Changes fall into the following buckets:

1. When coordinator is in metadata, reference table shards are present
in coordinator too.
This changes test outputs checking the table size, shard numbers etc.
for reference tables.

2. When coordinator is in metadata, postgres tables are converted to
citus local tables whenever a foreign key relationship to them is
created. This changes some test cases which tests it should not be
possible to create foreign keys to postgres tables.

3. Remove lines that add/remove coordinator for testing purposes.
pull/6970/head
Emel Şimşek 2023-06-05 10:37:48 +03:00 committed by GitHub
parent 976ab5a9be
commit 3fda2c3254
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
173 changed files with 1676 additions and 956 deletions

View File

@ -127,6 +127,9 @@ DEPS = {
"multi_mx_function_table_reference", "multi_mx_function_table_reference",
], ],
), ),
"alter_distributed_table": TestDeps(
"minimal_schedule", ["multi_behavioral_analytics_create_table"]
),
"background_rebalance": TestDeps( "background_rebalance": TestDeps(
None, None,
[ [
@ -144,6 +147,7 @@ DEPS = {
worker_count=6, worker_count=6,
), ),
"function_propagation": TestDeps("minimal_schedule"), "function_propagation": TestDeps("minimal_schedule"),
"grant_on_foreign_server_propagation": TestDeps("minimal_schedule"),
"multi_mx_modifying_xacts": TestDeps(None, ["multi_mx_create_table"]), "multi_mx_modifying_xacts": TestDeps(None, ["multi_mx_create_table"]),
"multi_mx_router_planner": TestDeps(None, ["multi_mx_create_table"]), "multi_mx_router_planner": TestDeps(None, ["multi_mx_create_table"]),
"multi_mx_copy_data": TestDeps(None, ["multi_mx_create_table"]), "multi_mx_copy_data": TestDeps(None, ["multi_mx_create_table"]),

View File

@ -2,6 +2,7 @@ test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers
test: multi_cluster_management test: multi_cluster_management
test: multi_test_catalog_views test: multi_test_catalog_views
test: remove_coordinator_from_metadata
test: columnar_create test: columnar_create
test: columnar_load test: columnar_load
test: columnar_query test: columnar_query

View File

@ -16,11 +16,11 @@ test: add_coordinator
test: citus_local_tables_ent test: citus_local_tables_ent
test: remove_coordinator
# -------- # --------
test: publication test: publication
test: logical_replication test: logical_replication
test: check_cluster_state
test: multi_create_table test: multi_create_table
test: multi_create_table_superuser test: multi_create_table_superuser
test: multi_create_role_dependency test: multi_create_role_dependency

View File

@ -168,10 +168,11 @@ select count(*) from trips t1, cars r1, trips t2, cars r2 where t1.trip_id = t2.
(1 row) (1 row)
DROP SCHEMA adaptive_executor CASCADE; DROP SCHEMA adaptive_executor CASCADE;
NOTICE: drop cascades to 6 other objects NOTICE: drop cascades to 7 other objects
DETAIL: drop cascades to table ab DETAIL: drop cascades to table ab
drop cascades to table single_hash_repartition_first drop cascades to table single_hash_repartition_first
drop cascades to table single_hash_repartition_second drop cascades to table single_hash_repartition_second
drop cascades to table ref_table drop cascades to table ref_table
drop cascades to table ref_table_361397
drop cascades to table cars drop cascades to table cars
drop cascades to table trips drop cascades to table trips

View File

@ -2,6 +2,13 @@
-- ADD_COORDINATOR -- ADD_COORDINATOR
-- --
-- node trying to add itself without specifying groupid => 0 should error out -- node trying to add itself without specifying groupid => 0 should error out
-- first remove the coordinator to for testing master_add_node for coordinator
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
SELECT master_add_node('localhost', :master_port); SELECT master_add_node('localhost', :master_port);
ERROR: Node cannot add itself as a worker. ERROR: Node cannot add itself as a worker.
HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636); HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636);

View File

@ -528,8 +528,8 @@ SELECT COUNT(DISTINCT colocationid) FROM pg_dist_partition WHERE logicalrelid::r
-- test references -- test references
CREATE TABLE referenced_dist_table (a INT UNIQUE); CREATE TABLE referenced_dist_table (a INT UNIQUE);
CREATE TABLE referenced_ref_table (a INT UNIQUE); CREATE TABLE referenced_ref_table (a INT UNIQUE);
CREATE TABLE table_with_references (a1 INT UNIQUE REFERENCES referenced_dist_table(a), a2 INT REFERENCES referenced_ref_table(a)); CREATE TABLE table_with_references (a1 INT UNIQUE, a2 INT);
CREATE TABLE referencing_dist_table (a INT REFERENCES table_with_references(a1)); CREATE TABLE referencing_dist_table (a INT);
SELECT create_distributed_table('referenced_dist_table', 'a', colocate_with:='none'); SELECT create_distributed_table('referenced_dist_table', 'a', colocate_with:='none');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -554,6 +554,9 @@ SELECT create_distributed_table('referencing_dist_table', 'a', colocate_with:='r
(1 row) (1 row)
ALTER TABLE table_with_references ADD FOREIGN KEY (a1) REFERENCES referenced_dist_table(a);
ALTER TABLE table_with_references ADD FOREIGN KEY (a2) REFERENCES referenced_ref_table(a);
ALTER TABLE referencing_dist_table ADD FOREIGN KEY (a) REFERENCES table_with_references(a1);
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint SELECT conrelid::regclass::text AS "Referencing Table", pg_get_constraintdef(oid, true) AS "Definition" FROM pg_constraint
WHERE (conrelid::regclass::text = 'table_with_references' OR confrelid::regclass::text = 'table_with_references') AND contype = 'f' ORDER BY 1,2; WHERE (conrelid::regclass::text = 'table_with_references' OR confrelid::regclass::text = 'table_with_references') AND contype = 'f' ORDER BY 1,2;
@ -1255,3 +1258,4 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_matviews WHERE matviewna
RESET search_path; RESET search_path;
DROP SCHEMA alter_distributed_table CASCADE; DROP SCHEMA alter_distributed_table CASCADE;
DROP SCHEMA schema_to_test_alter_dist_table CASCADE; DROP SCHEMA schema_to_test_alter_dist_table CASCADE;
DROP USER alter_dist_table_test_user;

View File

@ -802,9 +802,3 @@ select alter_table_set_access_method('view_test_view','columnar');
ERROR: you cannot alter access method of a view ERROR: you cannot alter access method of a view
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA alter_table_set_access_method CASCADE; DROP SCHEMA alter_table_set_access_method CASCADE;
SELECT 1 FROM master_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)

View File

@ -0,0 +1,6 @@
SELECT count(*) >= 1 as coordinator_exists FROM pg_dist_node WHERE groupid = 0 AND isactive;
coordinator_exists
---------------------------------------------------------------------
t
(1 row)

View File

@ -1,7 +1,6 @@
CREATE SCHEMA citus_local_dist_joins; CREATE SCHEMA citus_local_dist_joins;
SET search_path TO citus_local_dist_joins; SET search_path TO citus_local_dist_joins;
SET client_min_messages to ERROR; SET client_min_messages to ERROR;
SELECT master_add_node('localhost', :master_port, groupId => 0) AS coordinator_nodeid \gset
CREATE TABLE citus_local(key int, value text); CREATE TABLE citus_local(key int, value text);
SELECT citus_add_local_table_to_metadata('citus_local'); SELECT citus_add_local_table_to_metadata('citus_local');
citus_add_local_table_to_metadata citus_add_local_table_to_metadata
@ -523,11 +522,5 @@ ERROR: recursive complex joins are only supported when all distributed tables a
RESET citus.local_table_join_policy; RESET citus.local_table_join_policy;
SET client_min_messages to ERROR; SET client_min_messages to ERROR;
DROP TABLE citus_local; DROP TABLE citus_local;
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
\set VERBOSITY terse \set VERBOSITY terse
DROP SCHEMA citus_local_dist_joins CASCADE; DROP SCHEMA citus_local_dist_joins CASCADE;

View File

@ -155,4 +155,4 @@ SELECT master_get_table_ddl_events('test_table');
-- cleanup at exit -- cleanup at exit
DROP SCHEMA table_triggers_schema CASCADE; DROP SCHEMA table_triggers_schema CASCADE;
NOTICE: drop cascades to 8 other objects NOTICE: drop cascades to 9 other objects

View File

@ -64,6 +64,10 @@ SET citus.multi_shard_modify_mode TO sequential;
SELECT citus_update_table_statistics('test_table_statistics_hash'); SELECT citus_update_table_statistics('test_table_statistics_hash');
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT 0::bigint, NULL::text, 0::bigint;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT 981000 AS shard_id, 'public.test_table_statistics_hash_981000' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, 'public.test_table_statistics_hash_981001' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, 'public.test_table_statistics_hash_981002' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, 'public.test_table_statistics_hash_981003' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, 'public.test_table_statistics_hash_981004' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, 'public.test_table_statistics_hash_981005' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, 'public.test_table_statistics_hash_981006' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, 'public.test_table_statistics_hash_981007' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint; NOTICE: issuing SELECT 981000 AS shard_id, 'public.test_table_statistics_hash_981000' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, 'public.test_table_statistics_hash_981001' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, 'public.test_table_statistics_hash_981002' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, 'public.test_table_statistics_hash_981003' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, 'public.test_table_statistics_hash_981004' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, 'public.test_table_statistics_hash_981005' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, 'public.test_table_statistics_hash_981006' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, 'public.test_table_statistics_hash_981007' AS shard_name, pg_total_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
@ -73,6 +77,8 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
citus_update_table_statistics citus_update_table_statistics
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -152,6 +158,10 @@ SET citus.multi_shard_modify_mode TO sequential;
SELECT citus_update_table_statistics('test_table_statistics_append'); SELECT citus_update_table_statistics('test_table_statistics_append');
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT 0::bigint, NULL::text, 0::bigint;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SELECT 981008 AS shard_id, 'public.test_table_statistics_append_981008' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, 'public.test_table_statistics_append_981009' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint; NOTICE: issuing SELECT 981008 AS shard_id, 'public.test_table_statistics_append_981008' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, 'public.test_table_statistics_append_981009' AS shard_name, pg_total_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, NULL::text, 0::bigint;
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
@ -161,6 +171,8 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
citus_update_table_statistics citus_update_table_statistics
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -1118,7 +1118,7 @@ SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA function_tests CASCADE; DROP SCHEMA function_tests CASCADE;
DROP SCHEMA function_tests2 CASCADE; DROP SCHEMA function_tests2 CASCADE;
-- clear objects -- clear objects
SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary'; SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary' AND groupid <> 0;
stop_metadata_sync_to_node stop_metadata_sync_to_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1144,7 +1144,7 @@ SELECT 1 FROM run_command_on_workers($$DROP USER functionuser$$);
(2 rows) (2 rows)
-- sync metadata again -- sync metadata again
SELECT start_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary'; SELECT start_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isactive AND noderole = 'primary' AND groupid <> 0;
start_metadata_sync_to_node start_metadata_sync_to_node
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -357,9 +357,10 @@ DEBUG: generating subplan XXX_1 for subquery SELECT tenant_id FROM recursive_dm
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.local_table SET id = 'citus_test'::text FROM (SELECT distributed_table_1.tenant_id, NULL::integer AS dept, NULL::jsonb AS info FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) distributed_table_1) distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) local_table.id) DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_queries.local_table SET id = 'citus_test'::text FROM (SELECT distributed_table_1.tenant_id, NULL::integer AS dept, NULL::jsonb AS info FROM (SELECT intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(tenant_id text)) distributed_table_1) distributed_table WHERE (distributed_table.tenant_id OPERATOR(pg_catalog.=) local_table.id)
RESET client_min_messages; RESET client_min_messages;
DROP SCHEMA recursive_dml_queries CASCADE; DROP SCHEMA recursive_dml_queries CASCADE;
NOTICE: drop cascades to 5 other objects NOTICE: drop cascades to 6 other objects
DETAIL: drop cascades to table distributed_table DETAIL: drop cascades to table distributed_table
drop cascades to table second_distributed_table drop cascades to table second_distributed_table
drop cascades to table reference_table drop cascades to table reference_table
drop cascades to table reference_table_2370008
drop cascades to table local_table drop cascades to table local_table
drop cascades to view tenant_ids drop cascades to view tenant_ids

View File

@ -489,8 +489,9 @@ RESET citus.enable_fast_path_router_planner;
RESET client_min_messages; RESET client_min_messages;
RESET citus.log_remote_commands; RESET citus.log_remote_commands;
DROP SCHEMA fast_path_router_modify CASCADE; DROP SCHEMA fast_path_router_modify CASCADE;
NOTICE: drop cascades to 4 other objects NOTICE: drop cascades to 5 other objects
DETAIL: drop cascades to table modify_fast_path DETAIL: drop cascades to table modify_fast_path
drop cascades to table modify_fast_path_replication_2 drop cascades to table modify_fast_path_replication_2
drop cascades to table modify_fast_path_reference drop cascades to table modify_fast_path_reference
drop cascades to table modify_fast_path_reference_1840008
drop cascades to function modify_fast_path_plpsql(integer,integer) drop cascades to function modify_fast_path_plpsql(integer,integer)

View File

@ -616,12 +616,15 @@ BEGIN;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint; ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table" DEBUG: rewriting table "reference_table"
DEBUG: validating foreign key constraint "fkey" DEBUG: validating foreign key constraint "fkey"
DEBUG: rewriting table "reference_table_2380001"
CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1); CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1);
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint; ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table" DEBUG: rewriting table "transitive_reference_table"
DEBUG: validating foreign key constraint "fkey" DEBUG: validating foreign key constraint "fkey"
DEBUG: rewriting table "transitive_reference_table_2380000"
DEBUG: validating foreign key constraint "fkey_xxxxxxx"
CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1); CREATE INDEX fkey_test_index_1 ON on_update_fkey_table(value_1);
ROLLBACK; ROLLBACK;
-- case 4.6: DDL to reference table followed by a DDL to dist table, both touching fkey columns -- case 4.6: DDL to reference table followed by a DDL to dist table, both touching fkey columns
@ -629,6 +632,7 @@ BEGIN;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint; ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table" DEBUG: rewriting table "reference_table"
DEBUG: validating foreign key constraint "fkey" DEBUG: validating foreign key constraint "fkey"
DEBUG: rewriting table "reference_table_2380001"
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint; ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
DEBUG: rewriting table "on_update_fkey_table" DEBUG: rewriting table "on_update_fkey_table"
DEBUG: validating foreign key constraint "fkey" DEBUG: validating foreign key constraint "fkey"
@ -637,6 +641,8 @@ BEGIN;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint; ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table" DEBUG: rewriting table "transitive_reference_table"
DEBUG: validating foreign key constraint "fkey" DEBUG: validating foreign key constraint "fkey"
DEBUG: rewriting table "transitive_reference_table_2380000"
DEBUG: validating foreign key constraint "fkey_xxxxxxx"
ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint; ALTER TABLE on_update_fkey_table ALTER COLUMN value_1 SET DATA TYPE smallint;
DEBUG: rewriting table "on_update_fkey_table" DEBUG: rewriting table "on_update_fkey_table"
DEBUG: validating foreign key constraint "fkey" DEBUG: validating foreign key constraint "fkey"
@ -672,12 +678,15 @@ BEGIN;
ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint; ALTER TABLE reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "reference_table" DEBUG: rewriting table "reference_table"
DEBUG: validating foreign key constraint "fkey" DEBUG: validating foreign key constraint "fkey"
DEBUG: rewriting table "reference_table_2380001"
TRUNCATE on_update_fkey_table; TRUNCATE on_update_fkey_table;
ROLLBACK; ROLLBACK;
BEGIN; BEGIN;
ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint; ALTER TABLE transitive_reference_table ALTER COLUMN id SET DATA TYPE smallint;
DEBUG: rewriting table "transitive_reference_table" DEBUG: rewriting table "transitive_reference_table"
DEBUG: validating foreign key constraint "fkey" DEBUG: validating foreign key constraint "fkey"
DEBUG: rewriting table "transitive_reference_table_2380000"
DEBUG: validating foreign key constraint "fkey_xxxxxxx"
TRUNCATE on_update_fkey_table; TRUNCATE on_update_fkey_table;
ROLLBACK; ROLLBACK;
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -834,6 +843,7 @@ BEGIN;
TRUNCATE transitive_reference_table CASCADE; TRUNCATE transitive_reference_table CASCADE;
NOTICE: truncate cascades to table "reference_table" NOTICE: truncate cascades to table "reference_table"
NOTICE: truncate cascades to table "on_update_fkey_table" NOTICE: truncate cascades to table "on_update_fkey_table"
NOTICE: truncate cascades to table "reference_table_xxxxx"
ROLLBACK; ROLLBACK;
-- case 4.7: SELECT to a dist table is followed by a DROP -- case 4.7: SELECT to a dist table is followed by a DROP
-- DROP following SELECT is important as we error out after -- DROP following SELECT is important as we error out after
@ -1101,6 +1111,12 @@ ROLLBACK;
-- the fails since we're trying to switch sequential mode after -- the fails since we're trying to switch sequential mode after
-- already executed a parallel query -- already executed a parallel query
BEGIN; BEGIN;
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
create_reference_table create_reference_table
@ -1129,6 +1145,12 @@ ROLLBACK;
-- same test with the above, but this time using -- same test with the above, but this time using
-- sequential mode, succeeds -- sequential mode, succeeds
BEGIN; BEGIN;
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
CREATE TABLE test_table_1(id int PRIMARY KEY); CREATE TABLE test_table_1(id int PRIMARY KEY);
SELECT create_reference_table('test_table_1'); SELECT create_reference_table('test_table_1');
@ -1499,6 +1521,6 @@ ROLLBACK;
RESET client_min_messages; RESET client_min_messages;
\set VERBOSITY terse \set VERBOSITY terse
DROP SCHEMA test_fkey_to_ref_in_tx CASCADE; DROP SCHEMA test_fkey_to_ref_in_tx CASCADE;
NOTICE: drop cascades to 5 other objects NOTICE: drop cascades to 7 other objects
\set VERBOSITY default \set VERBOSITY default
SET search_path TO public; SET search_path TO public;

View File

@ -864,13 +864,6 @@ BEGIN;
(0 rows) (0 rows)
CREATE TABLE citus_local_table_to_test_func(l1 int DEFAULT func_in_transaction_for_local_table()); CREATE TABLE citus_local_table_to_test_func(l1 int DEFAULT func_in_transaction_for_local_table());
SET LOCAL client_min_messages TO WARNING;
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT citus_add_local_table_to_metadata('citus_local_table_to_test_func'); SELECT citus_add_local_table_to_metadata('citus_local_table_to_test_func');
citus_add_local_table_to_metadata citus_add_local_table_to_metadata
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -5,6 +5,12 @@
CREATE SCHEMA "grant on server"; CREATE SCHEMA "grant on server";
SET search_path TO "grant on server"; SET search_path TO "grant on server";
-- remove one of the worker nodes to test adding a new node later -- remove one of the worker nodes to test adding a new node later
SELECT 1 FROM citus_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); SELECT 1 FROM citus_remove_node('localhost', :worker_2_port);
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -173,9 +179,3 @@ SET client_min_messages TO ERROR;
DROP SERVER "Foreign Server" CASCADE; DROP SERVER "Foreign Server" CASCADE;
DROP SCHEMA "grant on server" CASCADE; DROP SCHEMA "grant on server" CASCADE;
DROP ROLE role_test_servers, role_test_servers_2, ownerrole; DROP ROLE role_test_servers, role_test_servers_2, ownerrole;
SELECT 1 FROM citus_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)

View File

@ -1112,4 +1112,4 @@ RETURNING *;
ROLLBACK; ROLLBACK;
\set VERBOSITY terse \set VERBOSITY terse
DROP SCHEMA insert_select_into_local_table CASCADE; DROP SCHEMA insert_select_into_local_table CASCADE;
NOTICE: drop cascades to 12 other objects NOTICE: drop cascades to 13 other objects

View File

@ -1092,14 +1092,14 @@ EXPLAIN (costs off) INSERT INTO test(y, x) SELECT a.x, b.y FROM test a JOIN test
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
(11 rows) (11 rows)
SET client_min_messages TO DEBUG1; SET client_min_messages TO DEBUG1;
@ -1121,14 +1121,14 @@ EXPLAIN (costs off) INSERT INTO test SELECT a.* FROM test a JOIN test b USING (y
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
(11 rows) (11 rows)
SET client_min_messages TO DEBUG1; SET client_min_messages TO DEBUG1;

View File

@ -1092,14 +1092,14 @@ EXPLAIN (costs off) INSERT INTO test(y, x) SELECT a.x, b.y FROM test a JOIN test
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
(11 rows) (11 rows)
SET client_min_messages TO DEBUG1; SET client_min_messages TO DEBUG1;
@ -1121,14 +1121,14 @@ EXPLAIN (costs off) INSERT INTO test SELECT a.* FROM test a JOIN test b USING (y
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
(11 rows) (11 rows)
SET client_min_messages TO DEBUG1; SET client_min_messages TO DEBUG1;

View File

@ -2,13 +2,6 @@ CREATE SCHEMA insert_select_single_shard_table;
SET search_path TO insert_select_single_shard_table; SET search_path TO insert_select_single_shard_table;
SET citus.next_shard_id TO 1820000; SET citus.next_shard_id TO 1820000;
SET citus.shard_count TO 32; SET citus.shard_count TO 32;
SET client_min_messages TO WARNING;
SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
SET client_min_messages TO NOTICE; SET client_min_messages TO NOTICE;
CREATE TABLE nullkey_c1_t1(a int, b int); CREATE TABLE nullkey_c1_t1(a int, b int);
CREATE TABLE nullkey_c1_t2(a int, b int); CREATE TABLE nullkey_c1_t2(a int, b int);
@ -823,9 +816,3 @@ DEBUG: Creating router plan
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA insert_select_single_shard_table CASCADE; DROP SCHEMA insert_select_single_shard_table CASCADE;
SELECT citus_remove_node('localhost', :master_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)

View File

@ -99,6 +99,7 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -386,6 +387,7 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
key | key | value key | key | value
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -99,6 +99,7 @@ FROM
DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text])) DEBUG: generating subplan XXX_1 for CTE some_values_1: SELECT key, random() AS random FROM intermediate_result_pruning.table_1 WHERE (value OPERATOR(pg_catalog.=) ANY (ARRAY['3'::text, '4'::text]))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.key, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(key integer, random double precision)) some_values_1 JOIN intermediate_result_pruning.ref_table USING (key))
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -386,6 +387,7 @@ DEBUG: Subplan XXX_1 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx DEBUG: Subplan XXX_2 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
key | key | value key | key | value
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -672,3 +672,5 @@ COMMIT;
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP SCHEMA other_schema CASCADE; DROP SCHEMA other_schema CASCADE;
DROP SCHEMA intermediate_results CASCADE; DROP SCHEMA intermediate_results CASCADE;
DROP OWNED BY some_other_user;
DROP USER some_other_user;

View File

@ -463,10 +463,11 @@ SELECT * FROM abcd first join abcd second USING(b) join abcd third on first.b=th
END; END;
DROP SCHEMA join_schema CASCADE; DROP SCHEMA join_schema CASCADE;
NOTICE: drop cascades to 6 other objects NOTICE: drop cascades to 7 other objects
DETAIL: drop cascades to table abcd DETAIL: drop cascades to table abcd
drop cascades to table distributed_table drop cascades to table distributed_table
drop cascades to table reference_table drop cascades to table reference_table
drop cascades to table reference_table_9000004
drop cascades to table test_table_1 drop cascades to table test_table_1
drop cascades to table test_table_2 drop cascades to table test_table_2
drop cascades to view abcd_view drop cascades to view abcd_view

View File

@ -16,7 +16,7 @@ SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10;
ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 2 kB) ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 2 kB)
DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place.
HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable.
SET citus.max_intermediate_result_size TO 9; SET citus.max_intermediate_result_size TO 17;
WITH cte AS MATERIALIZED WITH cte AS MATERIALIZED
( (
SELECT SELECT

View File

@ -86,7 +86,13 @@ CREATE FOREIGN TABLE foreign_table (
CREATE MATERIALIZED VIEW mv1 AS SELECT * FROM postgres_table; CREATE MATERIALIZED VIEW mv1 AS SELECT * FROM postgres_table;
CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM distributed_table; CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM distributed_table;
SET client_min_messages TO DEBUG1; SET client_min_messages TO DEBUG1;
-- the user doesn't allow local / distributed table joinn -- the user doesn't allow local / distributed table join
SELECT master_remove_node('localhost', :master_port); -- https://github.com/citusdata/citus/issues/6958
master_remove_node
---------------------------------------------------------------------
(1 row)
SET citus.local_table_join_policy TO 'never'; SET citus.local_table_join_policy TO 'never';
SELECT count(*) FROM postgres_table JOIN distributed_table USING(key); SELECT count(*) FROM postgres_table JOIN distributed_table USING(key);
ERROR: direct joins between distributed and local tables are not supported ERROR: direct joins between distributed and local tables are not supported
@ -94,6 +100,12 @@ HINT: Use CTE's or subqueries to select from local tables and use them in joins
SELECT count(*) FROM postgres_table JOIN reference_table USING(key); SELECT count(*) FROM postgres_table JOIN reference_table USING(key);
ERROR: direct joins between distributed and local tables are not supported ERROR: direct joins between distributed and local tables are not supported
HINT: Use CTE's or subqueries to select from local tables and use them in joins HINT: Use CTE's or subqueries to select from local tables and use them in joins
SELECT citus_set_coordinator_host('localhost'); -- https://github.com/citusdata/citus/issues/6958
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)
-- the user prefers local table recursively planned -- the user prefers local table recursively planned
SET citus.local_table_join_policy TO 'prefer-local'; SET citus.local_table_join_policy TO 'prefer-local';
SELECT count(*) FROM postgres_table JOIN distributed_table USING(key); SELECT count(*) FROM postgres_table JOIN distributed_table USING(key);
@ -1586,6 +1598,12 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT 1 AS res FROM
(1 row) (1 row)
ROLLBACK; ROLLBACK;
SELECT master_remove_node('localhost', :master_port); -- https://github.com/citusdata/citus/issues/6958
master_remove_node
---------------------------------------------------------------------
(1 row)
BEGIN; BEGIN;
SELECT create_reference_table('table1'); SELECT create_reference_table('table1');
NOTICE: Copying data from local table... NOTICE: Copying data from local table...
@ -1632,7 +1650,13 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT 1 AS res FROM
(1 row) (1 row)
ROLLBACK; ROLLBACK;
SELECT citus_set_coordinator_host('localhost'); -- https://github.com/citusdata/citus/issues/6958
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)
RESET client_min_messages; RESET client_min_messages;
\set VERBOSITY terse \set VERBOSITY terse
DROP SCHEMA local_table_join CASCADE; DROP SCHEMA local_table_join CASCADE;
NOTICE: drop cascades to 22 other objects NOTICE: drop cascades to 23 other objects

View File

@ -14,13 +14,6 @@ SELECT create_distributed_table('dist', 'id');
(1 row) (1 row)
INSERT INTO dist SELECT generate_series(1, 100); INSERT INTO dist SELECT generate_series(1, 100);
SELECT 1 from citus_add_node('localhost', :master_port, groupId := 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column?
---------------------------------------------------------------------
1
(1 row)
-- Create a publiction and subscription (including replication slot) manually. -- Create a publiction and subscription (including replication slot) manually.
-- This allows us to test the cleanup logic at the start of the shard move. -- This allows us to test the cleanup logic at the start of the shard move.
\c - - - :worker_1_port \c - - - :worker_1_port
@ -97,12 +90,6 @@ select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localho
(1 row) (1 row)
SELECT citus_remove_node('localhost', :master_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)
-- the subscription is still there, as there is no cleanup record for it -- the subscription is still there, as there is no cleanup record for it
-- we have created it manually -- we have created it manually
SELECT count(*) from pg_subscription; SELECT count(*) from pg_subscription;

View File

@ -20,13 +20,6 @@ SET citus.next_shard_id TO 4000000;
SET citus.explain_all_tasks TO true; SET citus.explain_all_tasks TO true;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.max_adaptive_executor_pool_size TO 1; SET citus.max_adaptive_executor_pool_size TO 1;
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column?
---------------------------------------------------------------------
1
(1 row)
CREATE TABLE source CREATE TABLE source
( (
order_id INT, order_id INT,
@ -3477,9 +3470,3 @@ drop cascades to table dist_colocated
drop cascades to table dist_target drop cascades to table dist_target
drop cascades to table dist_source drop cascades to table dist_source
drop cascades to view show_tables drop cascades to view show_tables
SELECT 1 FROM master_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)

View File

@ -862,14 +862,6 @@ DROP TABLE AT_AddConstNoName.dist_partitioned_table;
-- Test with Citus Local Tables -- Test with Citus Local Tables
-- Test "ADD PRIMARY KEY" -- Test "ADD PRIMARY KEY"
\c - - :master_host :master_port \c - - :master_host :master_port
SET client_min_messages to ERROR;
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
CREATE TABLE AT_AddConstNoName.citus_local_table(id int, other_column int); CREATE TABLE AT_AddConstNoName.citus_local_table(id int, other_column int);
SELECT citus_add_local_table_to_metadata('AT_AddConstNoName.citus_local_table'); SELECT citus_add_local_table_to_metadata('AT_AddConstNoName.citus_local_table');
citus_add_local_table_to_metadata citus_add_local_table_to_metadata
@ -1175,12 +1167,6 @@ SELECT con.conname
(0 rows) (0 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
SELECT 1 FROM master_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- Test with unusual table and column names -- Test with unusual table and column names
CREATE TABLE AT_AddConstNoName."2nd table" ( "2nd id" INTEGER, "3rd id" INTEGER); CREATE TABLE AT_AddConstNoName."2nd table" ( "2nd id" INTEGER, "3rd id" INTEGER);
SELECT create_distributed_table('AT_AddConstNoName."2nd table"','2nd id'); SELECT create_distributed_table('AT_AddConstNoName."2nd table"','2nd id');
@ -1315,7 +1301,7 @@ NOTICE: drop cascades to 7 other objects
DETAIL: drop cascades to table at_addconstnoname.tbl DETAIL: drop cascades to table at_addconstnoname.tbl
drop cascades to table at_addconstnoname.products_ref_2 drop cascades to table at_addconstnoname.products_ref_2
drop cascades to table at_addconstnoname.products_ref_3 drop cascades to table at_addconstnoname.products_ref_3
drop cascades to table at_addconstnoname.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon
drop cascades to table at_addconstnoname.products_ref_3_5410009 drop cascades to table at_addconstnoname.products_ref_3_5410009
drop cascades to table at_addconstnoname.verylonglonglonglonglonglonglonglonglonglonglonglonglonglonglon
drop cascades to table at_addconstnoname.citus_local_partitioned_table drop cascades to table at_addconstnoname.citus_local_partitioned_table
drop cascades to table at_addconstnoname."2nd table" drop cascades to table at_addconstnoname."2nd table"

View File

@ -120,7 +120,7 @@ ERROR: cannot create foreign key constraint since relations are not colocated o
DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table DETAIL: A distributed table can only have foreign keys if it is referencing another colocated hash distributed table or a reference table
DROP TABLE referencing_table; DROP TABLE referencing_table;
DROP TABLE referenced_table; DROP TABLE referenced_table;
-- test foreign constraint creation is not supported when one of the tables is not a citus table -- test foreign constraint creation is supported when coordinator is in metadata
CREATE TABLE referenced_local_table(id int PRIMARY KEY, other_column int); CREATE TABLE referenced_local_table(id int PRIMARY KEY, other_column int);
CREATE TABLE reference_table(id int, referencing_column int); CREATE TABLE reference_table(id int, referencing_column int);
SELECT create_reference_table('reference_table'); SELECT create_reference_table('reference_table');
@ -130,11 +130,12 @@ SELECT create_reference_table('reference_table');
(1 row) (1 row)
ALTER TABLE reference_table ADD FOREIGN KEY (referencing_column) REFERENCES referenced_local_table(id); ALTER TABLE reference_table ADD FOREIGN KEY (referencing_column) REFERENCES referenced_local_table(id);
ERROR: referenced table "referenced_local_table" must be a distributed table or a reference table
DETAIL: To enforce foreign keys, the referencing and referenced rows need to be stored on the same node.
HINT: You could use SELECT create_reference_table('referenced_local_table') to replicate the referenced table to all nodes or consider dropping the foreign key
DROP TABLE referenced_local_table; DROP TABLE referenced_local_table;
DROP TABLE reference_table; ERROR: cannot drop table referenced_local_table because other objects depend on it
DETAIL: constraint reference_table_referencing_column_fkey on table reference_table depends on table referenced_local_table
HINT: Use DROP ... CASCADE to drop the dependent objects too.
DROP TABLE reference_table CASCADE;
NOTICE: removing table at_add_fk.referenced_local_table from metadata as it is not connected to any reference tables via foreign keys
-- test foreign constraint with correct conditions -- test foreign constraint with correct conditions
CREATE TABLE referenced_table(id int PRIMARY KEY, test_column int); CREATE TABLE referenced_table(id int PRIMARY KEY, test_column int);
CREATE TABLE referencing_table(id int, ref_id int); CREATE TABLE referencing_table(id int, ref_id int);
@ -170,8 +171,8 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table_ref_id_fkey referencing_table_ref_id_fkey
referencing_table_ref_id_fkey_1770033 referencing_table_ref_id_fkey_1770034
referencing_table_ref_id_fkey_1770035 referencing_table_ref_id_fkey_1770036
(3 rows) (3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -198,8 +199,8 @@ SELECT con.conname
conname conname
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table_ref_id_fkey referencing_table_ref_id_fkey
referencing_table_ref_id_fkey_1770033 referencing_table_ref_id_fkey_1770034
referencing_table_ref_id_fkey_1770035 referencing_table_ref_id_fkey_1770036
(3 rows) (3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -244,8 +245,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype conname | confupdtype | confdeltype | confmatchtype
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table_ref_id_fkey | a | c | s referencing_table_ref_id_fkey | a | c | s
referencing_table_ref_id_fkey_1770041 | a | c | s referencing_table_ref_id_fkey_1770042 | a | c | s
referencing_table_ref_id_fkey_1770043 | a | c | s referencing_table_ref_id_fkey_1770044 | a | c | s
(3 rows) (3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -272,8 +273,8 @@ SELECT con.conname, con.convalidated
conname | convalidated conname | convalidated
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table_ref_id_fkey | f referencing_table_ref_id_fkey | f
referencing_table_ref_id_fkey_1770041 | f referencing_table_ref_id_fkey_1770042 | f
referencing_table_ref_id_fkey_1770043 | f referencing_table_ref_id_fkey_1770044 | f
(3 rows) (3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -300,8 +301,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype conname | confupdtype | confdeltype | confmatchtype
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table_ref_id_fkey | a | a | s referencing_table_ref_id_fkey | a | a | s
referencing_table_ref_id_fkey_1770041 | a | a | s referencing_table_ref_id_fkey_1770042 | a | a | s
referencing_table_ref_id_fkey_1770043 | a | a | s referencing_table_ref_id_fkey_1770044 | a | a | s
(3 rows) (3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -328,8 +329,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype conname | confupdtype | confdeltype | confmatchtype
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table_ref_id_fkey | a | r | s referencing_table_ref_id_fkey | a | r | s
referencing_table_ref_id_fkey_1770041 | a | r | s referencing_table_ref_id_fkey_1770042 | a | r | s
referencing_table_ref_id_fkey_1770043 | a | r | s referencing_table_ref_id_fkey_1770044 | a | r | s
(3 rows) (3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -356,8 +357,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype conname | confupdtype | confdeltype | confmatchtype
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table_ref_id_id_fkey | a | a | s referencing_table_ref_id_id_fkey | a | a | s
referencing_table_ref_id_id_fkey_1770041 | a | a | s referencing_table_ref_id_id_fkey_1770042 | a | a | s
referencing_table_ref_id_id_fkey_1770043 | a | a | s referencing_table_ref_id_id_fkey_1770044 | a | a | s
(3 rows) (3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -384,8 +385,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype conname | confupdtype | confdeltype | confmatchtype
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table_ref_id_id_fkey | r | a | s referencing_table_ref_id_id_fkey | r | a | s
referencing_table_ref_id_id_fkey_1770041 | r | a | s referencing_table_ref_id_id_fkey_1770042 | r | a | s
referencing_table_ref_id_id_fkey_1770043 | r | a | s referencing_table_ref_id_id_fkey_1770044 | r | a | s
(3 rows) (3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -412,8 +413,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype conname | confupdtype | confdeltype | confmatchtype
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table_ref_id_id_fkey | a | a | s referencing_table_ref_id_id_fkey | a | a | s
referencing_table_ref_id_id_fkey_1770041 | a | a | s referencing_table_ref_id_id_fkey_1770042 | a | a | s
referencing_table_ref_id_id_fkey_1770043 | a | a | s referencing_table_ref_id_id_fkey_1770044 | a | a | s
(3 rows) (3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -440,8 +441,8 @@ SELECT con.conname, con.confupdtype, con.confdeltype, con.confmatchtype
conname | confupdtype | confdeltype | confmatchtype conname | confupdtype | confdeltype | confmatchtype
--------------------------------------------------------------------- ---------------------------------------------------------------------
referencing_table_ref_id_id_fkey | a | a | f referencing_table_ref_id_id_fkey | a | a | f
referencing_table_ref_id_id_fkey_1770041 | a | a | f referencing_table_ref_id_id_fkey_1770042 | a | a | f
referencing_table_ref_id_id_fkey_1770043 | a | a | f referencing_table_ref_id_id_fkey_1770044 | a | a | f
(3 rows) (3 rows)
\c - - :master_host :master_port \c - - :master_host :master_port
@ -524,13 +525,6 @@ BEGIN;
DROP TABLE dist_table CASCADE; DROP TABLE dist_table CASCADE;
DROP TABLE reference_table CASCADE; DROP TABLE reference_table CASCADE;
-- test ADD FOREIGN KEY from citus local to reference table -- test ADD FOREIGN KEY from citus local to reference table
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column?
---------------------------------------------------------------------
1
(1 row)
CREATE TABLE citus_local_table(l1 int); CREATE TABLE citus_local_table(l1 int);
SELECT citus_add_local_table_to_metadata('citus_local_table'); SELECT citus_add_local_table_to_metadata('citus_local_table');
citus_add_local_table_to_metadata citus_add_local_table_to_metadata
@ -557,17 +551,12 @@ ALTER TABLE citus_local_table ADD FOREIGN KEY(l1) REFERENCES reference_table(r1)
ALTER TABLE citus_local_table ADD FOREIGN KEY(l1) REFERENCES reference_table(r1) ON DELETE NO ACTION; ALTER TABLE citus_local_table ADD FOREIGN KEY(l1) REFERENCES reference_table(r1) ON DELETE NO ACTION;
ALTER TABLE citus_local_table ADD FOREIGN KEY(l1) REFERENCES reference_table(r1) ON DELETE RESTRICT; ALTER TABLE citus_local_table ADD FOREIGN KEY(l1) REFERENCES reference_table(r1) ON DELETE RESTRICT;
DROP TABLE citus_local_table CASCADE; DROP TABLE citus_local_table CASCADE;
SELECT 1 FROM master_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET SEARCH_PATH; RESET SEARCH_PATH;
RESET client_min_messages; RESET client_min_messages;
DROP SCHEMA at_add_fk CASCADE; DROP SCHEMA at_add_fk CASCADE;
NOTICE: drop cascades to 4 other objects NOTICE: drop cascades to 5 other objects
DETAIL: drop cascades to table at_add_fk.referenced_table DETAIL: drop cascades to table at_add_fk.referenced_local_table
drop cascades to table at_add_fk.referenced_table
drop cascades to table at_add_fk.referencing_table drop cascades to table at_add_fk.referencing_table
drop cascades to table at_add_fk.reference_table drop cascades to table at_add_fk.reference_table
drop cascades to table at_add_fk.reference_table_1770051 drop cascades to table at_add_fk.reference_table_1770052

View File

@ -547,7 +547,8 @@ WHERE isactive = 't' AND noderole='primary';
--------------------------------------------------------------------- ---------------------------------------------------------------------
t t
t t
(2 rows) t
(3 rows)
CREATE TABLE distributed(id int, data text); CREATE TABLE distributed(id int, data text);
SELECT create_distributed_table('distributed', 'id'); SELECT create_distributed_table('distributed', 'id');
@ -632,11 +633,16 @@ SELECT citus_check_connection_to_node('localhost', :worker_2_port);
SELECT * FROM citus_check_cluster_node_health() ORDER BY 1,2,3,4; SELECT * FROM citus_check_cluster_node_health() ORDER BY 1,2,3,4;
from_nodename | from_nodeport | to_nodename | to_nodeport | result from_nodename | from_nodeport | to_nodename | to_nodeport | result
--------------------------------------------------------------------- ---------------------------------------------------------------------
localhost | 57636 | localhost | 57636 | t
localhost | 57636 | localhost | 57637 | t
localhost | 57636 | localhost | 57638 | t
localhost | 57637 | localhost | 57636 | t
localhost | 57637 | localhost | 57637 | t localhost | 57637 | localhost | 57637 | t
localhost | 57637 | localhost | 57638 | t localhost | 57637 | localhost | 57638 | t
localhost | 57638 | localhost | 57636 | t
localhost | 57638 | localhost | 57637 | t localhost | 57638 | localhost | 57637 | t
localhost | 57638 | localhost | 57638 | t localhost | 57638 | localhost | 57638 | t
(4 rows) (9 rows)
-- test cluster connectivity when we have broken nodes -- test cluster connectivity when we have broken nodes
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
@ -648,23 +654,32 @@ INSERT INTO pg_dist_node VALUES
SELECT * FROM citus_check_cluster_node_health() ORDER BY 5,1,2,3,4; SELECT * FROM citus_check_cluster_node_health() ORDER BY 5,1,2,3,4;
from_nodename | from_nodeport | to_nodename | to_nodeport | result from_nodename | from_nodeport | to_nodename | to_nodeport | result
--------------------------------------------------------------------- ---------------------------------------------------------------------
localhost | 57636 | localhost | 123456789 | f
localhost | 57636 | www.citusdata.com | 5432 | f
localhost | 57637 | localhost | 123456789 | f localhost | 57637 | localhost | 123456789 | f
localhost | 57637 | www.citusdata.com | 5432 | f localhost | 57637 | www.citusdata.com | 5432 | f
localhost | 57638 | localhost | 123456789 | f localhost | 57638 | localhost | 123456789 | f
localhost | 57638 | www.citusdata.com | 5432 | f localhost | 57638 | www.citusdata.com | 5432 | f
localhost | 57636 | localhost | 57636 | t
localhost | 57636 | localhost | 57637 | t
localhost | 57636 | localhost | 57638 | t
localhost | 57637 | localhost | 57636 | t
localhost | 57637 | localhost | 57637 | t localhost | 57637 | localhost | 57637 | t
localhost | 57637 | localhost | 57638 | t localhost | 57637 | localhost | 57638 | t
localhost | 57638 | localhost | 57636 | t
localhost | 57638 | localhost | 57637 | t localhost | 57638 | localhost | 57637 | t
localhost | 57638 | localhost | 57638 | t localhost | 57638 | localhost | 57638 | t
localhost | 123456789 | localhost | 57636 |
localhost | 123456789 | localhost | 57637 | localhost | 123456789 | localhost | 57637 |
localhost | 123456789 | localhost | 57638 | localhost | 123456789 | localhost | 57638 |
localhost | 123456789 | localhost | 123456789 | localhost | 123456789 | localhost | 123456789 |
localhost | 123456789 | www.citusdata.com | 5432 | localhost | 123456789 | www.citusdata.com | 5432 |
www.citusdata.com | 5432 | localhost | 57636 |
www.citusdata.com | 5432 | localhost | 57637 | www.citusdata.com | 5432 | localhost | 57637 |
www.citusdata.com | 5432 | localhost | 57638 | www.citusdata.com | 5432 | localhost | 57638 |
www.citusdata.com | 5432 | localhost | 123456789 | www.citusdata.com | 5432 | localhost | 123456789 |
www.citusdata.com | 5432 | www.citusdata.com | 5432 | www.citusdata.com | 5432 | www.citusdata.com | 5432 |
(16 rows) (25 rows)
ROLLBACK; ROLLBACK;
RESET citus.node_connection_timeout; RESET citus.node_connection_timeout;

View File

@ -681,6 +681,12 @@ SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node;
(3 rows) (3 rows)
SELECT citus_set_coordinator_host('localhost');
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)
SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_1_port);
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -791,13 +797,13 @@ SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_g
SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary');
master_add_inactive_node master_add_inactive_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
22 23
(1 row) (1 row)
SELECT master_activate_node('localhost', 9999); SELECT master_activate_node('localhost', 9999);
master_activate_node master_activate_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
22 23
(1 row) (1 row)
SELECT citus_disable_node('localhost', 9999); SELECT citus_disable_node('localhost', 9999);
@ -831,17 +837,17 @@ CONTEXT: PL/pgSQL function citus_internal.pg_dist_node_trigger_func() line XX a
INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole, nodecluster) INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole, nodecluster)
VALUES ('localhost', 5000, 1000, 'primary', 'olap'); VALUES ('localhost', 5000, 1000, 'primary', 'olap');
ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster" ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster"
DETAIL: Failing row contains (24, 1000, localhost, 5000, default, f, t, primary, olap, f, t). DETAIL: Failing row contains (25, 1000, localhost, 5000, default, f, t, primary, olap, f, t).
UPDATE pg_dist_node SET nodecluster = 'olap' UPDATE pg_dist_node SET nodecluster = 'olap'
WHERE nodeport = :worker_1_port; WHERE nodeport = :worker_1_port;
ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster" ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster"
DETAIL: Failing row contains (16, 14, localhost, 57637, default, f, t, primary, olap, f, t). DETAIL: Failing row contains (17, 14, localhost, 57637, default, f, t, primary, olap, f, t).
-- check that you /can/ add a secondary node to a non-default cluster -- check that you /can/ add a secondary node to a non-default cluster
SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap'); SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap');
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
25 26
(1 row) (1 row)
-- check that super-long cluster names are truncated -- check that super-long cluster names are truncated
@ -854,13 +860,13 @@ SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole =
); );
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
26 27
(1 row) (1 row)
SELECT * FROM pg_dist_node WHERE nodeport=8887; SELECT * FROM pg_dist_node WHERE nodeport=8887;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
--------------------------------------------------------------------- ---------------------------------------------------------------------
26 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t 27 | 14 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. | f | t
(1 row) (1 row)
-- don't remove the secondary and unavailable nodes, check that no commands are sent to -- don't remove the secondary and unavailable nodes, check that no commands are sent to
@ -869,13 +875,13 @@ SELECT * FROM pg_dist_node WHERE nodeport=8887;
SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port); SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port);
master_add_secondary_node master_add_secondary_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
27 28
(1 row) (1 row)
SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port); SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port);
master_add_secondary_node master_add_secondary_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
28 29
(1 row) (1 row)
SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000); SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000);
@ -883,7 +889,7 @@ ERROR: node at "localhost:xxxxx" does not exist
SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
master_add_secondary_node master_add_secondary_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
29 30
(1 row) (1 row)
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=9992 \gset SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=9992 \gset
@ -941,7 +947,7 @@ SELECT master_update_node(:worker_1_node, 'somehost', 9000);
SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
--------------------------------------------------------------------- ---------------------------------------------------------------------
16 | 14 | somehost | 9000 | default | f | t | primary | default | f | t 17 | 14 | somehost | 9000 | default | f | t | primary | default | f | t
(1 row) (1 row)
-- cleanup -- cleanup
@ -954,7 +960,7 @@ SELECT master_update_node(:worker_1_node, 'localhost', :worker_1_port);
SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node; SELECT * FROM pg_dist_node WHERE nodeid = :worker_1_node;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
--------------------------------------------------------------------- ---------------------------------------------------------------------
16 | 14 | localhost | 57637 | default | f | t | primary | default | f | t 17 | 14 | localhost | 57637 | default | f | t | primary | default | f | t
(1 row) (1 row)
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
@ -963,7 +969,8 @@ SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE i
--------------------------------------------------------------------- ---------------------------------------------------------------------
(2 rows)
(3 rows)
RESET client_min_messages; RESET client_min_messages;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
@ -1044,9 +1051,10 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
nodeport | count nodeport | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
57636 | 1
57637 | 1 57637 | 1
57638 | 1 57638 | 1
(2 rows) (3 rows)
-- cleanup for next test -- cleanup for next test
DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, test_dist_colocated_with_non_colocated; DROP TABLE test_dist, test_ref, test_dist_colocated, test_dist_non_colocated, test_dist_colocated_with_non_colocated;
@ -1088,9 +1096,10 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
nodeport | count nodeport | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
57636 | 1
57637 | 1 57637 | 1
57638 | 1 57638 | 1
(2 rows) (3 rows)
SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true);
master_set_node_property master_set_node_property
@ -1114,9 +1123,10 @@ FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid)
WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport; WHERE logicalrelid = 'test_ref'::regclass GROUP BY nodeport ORDER BY nodeport;
nodeport | count nodeport | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
57636 | 1
57637 | 1 57637 | 1
57638 | 1 57638 | 1
(2 rows) (3 rows)
SELECT create_distributed_table('test_dist_colocated', 'x'); SELECT create_distributed_table('test_dist_colocated', 'x');
create_distributed_table create_distributed_table

View File

@ -825,11 +825,13 @@ ORDER BY
table1_group_default | 1300070 | t | 57638 | -715827883 | 715827881 table1_group_default | 1300070 | t | 57638 | -715827883 | 715827881
table1_group_default | 1300071 | t | 57637 | 715827882 | 2147483647 table1_group_default | 1300071 | t | 57637 | 715827882 | 2147483647
table1_group_default | 1300071 | t | 57638 | 715827882 | 2147483647 table1_group_default | 1300071 | t | 57638 | 715827882 | 2147483647
table1_groupf | 1300080 | t | 57636 | |
table1_groupf | 1300080 | t | 57637 | | table1_groupf | 1300080 | t | 57637 | |
table1_groupf | 1300080 | t | 57638 | | table1_groupf | 1300080 | t | 57638 | |
table2_groupf | 1300081 | t | 57636 | |
table2_groupf | 1300081 | t | 57637 | | table2_groupf | 1300081 | t | 57637 | |
table2_groupf | 1300081 | t | 57638 | | table2_groupf | 1300081 | t | 57638 | |
(92 rows) (94 rows)
-- reset colocation ids to test update_distributed_table_colocation -- reset colocation ids to test update_distributed_table_colocation
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1;

View File

@ -756,9 +756,10 @@ SELECT shardid, shardstate, nodename, nodeport
WHERE logicalrelid = 'numbers_reference'::regclass order by placementid; WHERE logicalrelid = 'numbers_reference'::regclass order by placementid;
shardid | shardstate | nodename | nodeport shardid | shardstate | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
560165 | 1 | localhost | 57636
560165 | 1 | localhost | 57637 560165 | 1 | localhost | 57637
560165 | 1 | localhost | 57638 560165 | 1 | localhost | 57638
(2 rows) (3 rows)
-- try to insert into numbers_hash_other. copy should fail and rollback -- try to insert into numbers_hash_other. copy should fail and rollback
-- since it can not insert into either copies of a shard. shards are expected to -- since it can not insert into either copies of a shard. shards are expected to

View File

@ -642,13 +642,15 @@ DROP TABLE tt1;
DROP TABLE tt2; DROP TABLE tt2;
DROP TABLE alter_replica_table; DROP TABLE alter_replica_table;
DROP SCHEMA sc CASCADE; DROP SCHEMA sc CASCADE;
NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table sc.ref DETAIL: drop cascades to table sc.ref
drop cascades to table sc.ref_360102
drop cascades to table sc.hash drop cascades to table sc.hash
DROP SCHEMA sc2 CASCADE; DROP SCHEMA sc2 CASCADE;
NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table sc2.hash DETAIL: drop cascades to table sc2.hash
drop cascades to table sc2.ref drop cascades to table sc2.ref
drop cascades to table sc2.ref_360111
DROP SCHEMA sc3 CASCADE; DROP SCHEMA sc3 CASCADE;
NOTICE: drop cascades to table sc3.alter_replica_table NOTICE: drop cascades to table sc3.alter_replica_table
DROP SCHEMA sc4 CASCADE; DROP SCHEMA sc4 CASCADE;

View File

@ -21,12 +21,6 @@ BEGIN;
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
SET search_path TO public; SET search_path TO public;
CREATE EXTENSION citus; CREATE EXTENSION citus;
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
create table l1 (a int unique); create table l1 (a int unique);
SELECT create_reference_table('l1'); SELECT create_reference_table('l1');
create_reference_table create_reference_table
@ -136,6 +130,12 @@ ROLLBACK TO SAVEPOINT s3;
ROLLBACK; ROLLBACK;
CREATE EXTENSION citus; CREATE EXTENSION citus;
-- re-add the nodes to the cluster -- re-add the nodes to the cluster
SELECT citus_set_coordinator_host('localhost');
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)
SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_1_port);
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -331,16 +331,16 @@ BEGIN;
SET LOCAL citus.enable_repartition_joins TO true; SET LOCAL citus.enable_repartition_joins TO true;
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b; EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b;
Aggregate (actual rows=1 loops=1) Aggregate (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=4 loops=1) -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 4 Task Count: 6
Tuple data received from nodes: 32 bytes Tuple data received from nodes: 48 bytes
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
-- Confirm repartiton join in distributed subplan works -- Confirm repartiton join in distributed subplan works
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off)
WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b) WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
@ -350,16 +350,16 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Intermediate Data Size: 14 bytes Intermediate Data Size: 14 bytes
Result destination: Write locally Result destination: Write locally
-> Aggregate (actual rows=1 loops=1) -> Aggregate (actual rows=1 loops=1)
-> Custom Scan (Citus Adaptive) (actual rows=4 loops=1) -> Custom Scan (Citus Adaptive) (actual rows=6 loops=1)
Task Count: 4 Task Count: 6
Tuple data received from nodes: 32 bytes Tuple data received from nodes: 48 bytes
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
Task Count: 1 Task Count: 1
Tuple data received from nodes: 8 bytes Tuple data received from nodes: 8 bytes
Tasks Shown: All Tasks Shown: All
@ -1108,20 +1108,20 @@ EXPLAIN (COSTS FALSE)
AND l_suppkey = s_suppkey; AND l_suppkey = s_suppkey;
Aggregate Aggregate
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 4 Map Task Count: 6
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 1 Map Task Count: 1
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 1 Map Task Count: 1
Merge Task Count: 4 Merge Task Count: 6
EXPLAIN (COSTS FALSE, FORMAT JSON) EXPLAIN (COSTS FALSE, FORMAT JSON)
SELECT count(*) SELECT count(*)
FROM lineitem, orders, customer_append, supplier_single_shard FROM lineitem, orders, customer_append, supplier_single_shard
@ -1142,26 +1142,26 @@ EXPLAIN (COSTS FALSE, FORMAT JSON)
"Parallel Aware": false, "Parallel Aware": false,
"Distributed Query": { "Distributed Query": {
"Job": { "Job": {
"Task Count": 4, "Task Count": 6,
"Tasks Shown": "None, not supported for re-partition queries", "Tasks Shown": "None, not supported for re-partition queries",
"Dependent Jobs": [ "Dependent Jobs": [
{ {
"Map Task Count": 4, "Map Task Count": 6,
"Merge Task Count": 4, "Merge Task Count": 6,
"Dependent Jobs": [ "Dependent Jobs": [
{ {
"Map Task Count": 2, "Map Task Count": 2,
"Merge Task Count": 4 "Merge Task Count": 6
}, },
{ {
"Map Task Count": 1, "Map Task Count": 1,
"Merge Task Count": 4 "Merge Task Count": 6
} }
] ]
}, },
{ {
"Map Task Count": 1, "Map Task Count": 1,
"Merge Task Count": 4 "Merge Task Count": 6
} }
] ]
} }
@ -1198,26 +1198,26 @@ EXPLAIN (COSTS FALSE, FORMAT XML)
<Parallel-Aware>false</Parallel-Aware> <Parallel-Aware>false</Parallel-Aware>
<Distributed-Query> <Distributed-Query>
<Job> <Job>
<Task-Count>4</Task-Count> <Task-Count>6</Task-Count>
<Tasks-Shown>None, not supported for re-partition queries</Tasks-Shown> <Tasks-Shown>None, not supported for re-partition queries</Tasks-Shown>
<Dependent-Jobs> <Dependent-Jobs>
<MapMergeJob> <MapMergeJob>
<Map-Task-Count>4</Map-Task-Count> <Map-Task-Count>6</Map-Task-Count>
<Merge-Task-Count>4</Merge-Task-Count> <Merge-Task-Count>6</Merge-Task-Count>
<Dependent-Jobs> <Dependent-Jobs>
<MapMergeJob> <MapMergeJob>
<Map-Task-Count>2</Map-Task-Count> <Map-Task-Count>2</Map-Task-Count>
<Merge-Task-Count>4</Merge-Task-Count> <Merge-Task-Count>6</Merge-Task-Count>
</MapMergeJob> </MapMergeJob>
<MapMergeJob> <MapMergeJob>
<Map-Task-Count>1</Map-Task-Count> <Map-Task-Count>1</Map-Task-Count>
<Merge-Task-Count>4</Merge-Task-Count> <Merge-Task-Count>6</Merge-Task-Count>
</MapMergeJob> </MapMergeJob>
</Dependent-Jobs> </Dependent-Jobs>
</MapMergeJob> </MapMergeJob>
<MapMergeJob> <MapMergeJob>
<Map-Task-Count>1</Map-Task-Count> <Map-Task-Count>1</Map-Task-Count>
<Merge-Task-Count>4</Merge-Task-Count> <Merge-Task-Count>6</Merge-Task-Count>
</MapMergeJob> </MapMergeJob>
</Dependent-Jobs> </Dependent-Jobs>
</Job> </Job>
@ -1264,13 +1264,13 @@ EXPLAIN (COSTS FALSE, FORMAT YAML)
Parallel Aware: false Parallel Aware: false
Distributed Query: Distributed Query:
Job: Job:
Task Count: 4 Task Count: 6
Tasks Shown: "None, not supported for re-partition queries" Tasks Shown: "None, not supported for re-partition queries"
Dependent Jobs: Dependent Jobs:
- Map Task Count: 2 - Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
- Map Task Count: 1 - Map Task Count: 1
Merge Task Count: 4 Merge Task Count: 6
-- ensure local plans display correctly -- ensure local plans display correctly
CREATE TABLE lineitem_clone (LIKE lineitem); CREATE TABLE lineitem_clone (LIKE lineitem);
EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone;
@ -2317,7 +2317,7 @@ SELECT count(distinct a) from r NATURAL JOIN ref_table;
Custom Scan (Citus Adaptive) (actual rows=1 loops=1) Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
-> Distributed Subplan XXX_1 -> Distributed Subplan XXX_1
Intermediate Data Size: 220 bytes Intermediate Data Size: 220 bytes
Result destination: Send to 2 nodes Result destination: Send to 3 nodes
-> Custom Scan (Citus Adaptive) (actual rows=10 loops=1) -> Custom Scan (Citus Adaptive) (actual rows=10 loops=1)
Task Count: 4 Task Count: 4
Tuple data received from nodes: 120 bytes Tuple data received from nodes: 120 bytes
@ -3146,8 +3146,6 @@ Custom Scan (Citus Adaptive) (actual rows=0 loops=1)
-- check when auto explain + analyze is enabled, we do not allow local execution. -- check when auto explain + analyze is enabled, we do not allow local execution.
CREATE SCHEMA test_auto_explain; CREATE SCHEMA test_auto_explain;
SET search_path TO 'test_auto_explain'; SET search_path TO 'test_auto_explain';
SELECT citus_set_coordinator_host('localhost');
CREATE TABLE test_ref_table (key int PRIMARY KEY); CREATE TABLE test_ref_table (key int PRIMARY KEY);
SELECT create_reference_table('test_ref_table'); SELECT create_reference_table('test_ref_table');
@ -3157,9 +3155,5 @@ set auto_explain.log_analyze to true;
-- the following should not be locally executed since explain analyze is on -- the following should not be locally executed since explain analyze is on
select * from test_ref_table; select * from test_ref_table;
DROP SCHEMA test_auto_explain CASCADE; DROP SCHEMA test_auto_explain CASCADE;
select master_remove_node('localhost', :master_port);
SELECT public.wait_until_metadata_sync(30000);
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP SCHEMA multi_explain CASCADE; DROP SCHEMA multi_explain CASCADE;

View File

@ -495,14 +495,6 @@ SET search_path TO fix_idx_names, public;
DROP TABLE dist_partitioned_table; DROP TABLE dist_partitioned_table;
SET citus.next_shard_id TO 910040; SET citus.next_shard_id TO 910040;
-- test with citus local table -- test with citus local table
SET client_min_messages TO WARNING;
SELECT 1 FROM citus_add_node('localhost', :master_port, groupid=>0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
CREATE TABLE date_partitioned_citus_local_table( CREATE TABLE date_partitioned_citus_local_table(
measureid integer, measureid integer,
eventdate date, eventdate date,
@ -750,9 +742,3 @@ ALTER TABLE parent_table DROP CONSTRAINT pkey_cst CASCADE;
ALTER TABLE parent_table DROP CONSTRAINT unique_cst CASCADE; ALTER TABLE parent_table DROP CONSTRAINT unique_cst CASCADE;
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
DROP SCHEMA fix_idx_names CASCADE; DROP SCHEMA fix_idx_names CASCADE;
SELECT citus_remove_node('localhost', :master_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)

View File

@ -856,11 +856,16 @@ SELECT create_reference_table('reference_table_second');
CREATE TABLE referenced_local_table(id int PRIMARY KEY, other_column int); CREATE TABLE referenced_local_table(id int PRIMARY KEY, other_column int);
DROP TABLE reference_table CASCADE; DROP TABLE reference_table CASCADE;
NOTICE: drop cascades to constraint reference_table_second_referencing_column_fkey on table reference_table_second NOTICE: drop cascades to constraint reference_table_second_referencing_column_fkey on table reference_table_second
NOTICE: drop cascades to constraint reference_table_second_referencing_column_fkey_1350654 on table public.reference_table_second_1350654
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
CREATE TABLE reference_table(id int, referencing_column int REFERENCES referenced_local_table(id)); CREATE TABLE reference_table(id int, referencing_column int REFERENCES referenced_local_table(id));
SELECT create_reference_table('reference_table'); SELECT create_reference_table('reference_table');
ERROR: referenced table "referenced_local_table" must be a distributed table or a reference table create_reference_table
DETAIL: To enforce foreign keys, the referencing and referenced rows need to be stored on the same node. ---------------------------------------------------------------------
HINT: You could use SELECT create_reference_table('referenced_local_table') to replicate the referenced table to all nodes or consider dropping the foreign key
(1 row)
-- test foreign key creation on CREATE TABLE on self referencing reference table -- test foreign key creation on CREATE TABLE on self referencing reference table
CREATE TABLE self_referencing_reference_table( CREATE TABLE self_referencing_reference_table(
id int, id int,
@ -877,6 +882,7 @@ SELECT create_reference_table('self_referencing_reference_table');
-- test foreign key creation on ALTER TABLE from reference table -- test foreign key creation on ALTER TABLE from reference table
DROP TABLE reference_table; DROP TABLE reference_table;
NOTICE: removing table public.referenced_local_table from metadata as it is not connected to any reference tables via foreign keys
CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int);
SELECT create_reference_table('reference_table'); SELECT create_reference_table('reference_table');
create_reference_table create_reference_table
@ -911,6 +917,9 @@ DROP TABLE reference_table CASCADE;
NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to constraint fk on table references_to_reference_table DETAIL: drop cascades to constraint fk on table references_to_reference_table
drop cascades to constraint fk on table reference_table_second drop cascades to constraint fk on table reference_table_second
NOTICE: drop cascades to constraint fk_1350663 on table public.reference_table_second_1350663
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int);
SELECT create_reference_table('reference_table'); SELECT create_reference_table('reference_table');
create_reference_table create_reference_table
@ -919,9 +928,6 @@ SELECT create_reference_table('reference_table');
(1 row) (1 row)
ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_local_table(id); ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_local_table(id);
ERROR: referenced table "referenced_local_table" must be a distributed table or a reference table
DETAIL: To enforce foreign keys, the referencing and referenced rows need to be stored on the same node.
HINT: You could use SELECT create_reference_table('referenced_local_table') to replicate the referenced table to all nodes or consider dropping the foreign key
-- test foreign key creation on ALTER TABLE on self referencing reference table -- test foreign key creation on ALTER TABLE on self referencing reference table
DROP TABLE self_referencing_reference_table; DROP TABLE self_referencing_reference_table;
CREATE TABLE self_referencing_reference_table( CREATE TABLE self_referencing_reference_table(
@ -1187,12 +1193,7 @@ CREATE TABLE set_on_default_test_referencing(
REFERENCES set_on_default_test_referenced(col_1, col_3) REFERENCES set_on_default_test_referenced(col_1, col_3)
ON UPDATE SET DEFAULT ON UPDATE SET DEFAULT
); );
-- from distributed / reference to reference, fkey exists before calling the UDFs
SELECT create_distributed_table('set_on_default_test_referencing', 'col_1');
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
SELECT create_reference_table('set_on_default_test_referencing');
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
DROP TABLE set_on_default_test_referencing;
CREATE TABLE set_on_default_test_referencing( CREATE TABLE set_on_default_test_referencing(
col_1 serial, col_2 int, col_3 int, col_4 int col_1 serial, col_2 int, col_3 int, col_4 int
); );
@ -1276,3 +1277,6 @@ ERROR: cannot create foreign key constraint since Citus does not support ON DEL
-- we no longer need those tables -- we no longer need those tables
DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table, dropfkeytest2, DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table, dropfkeytest2,
set_on_default_test_referenced, set_on_default_test_referencing; set_on_default_test_referenced, set_on_default_test_referencing;
NOTICE: drop cascades to constraint fk_1350664 on table public.reference_table_1350664
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM

View File

@ -884,7 +884,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator -- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator
@ -921,7 +921,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- make things a bit more complicate with IN clauses -- make things a bit more complicate with IN clauses
@ -940,7 +940,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- implicit join on non partition column should also not be pushed down, -- implicit join on non partition column should also not be pushed down,
@ -959,7 +959,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
RESET client_min_messages; RESET client_min_messages;
@ -981,7 +981,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- foo is not joined on the partition key so the query is not -- foo is not joined on the partition key so the query is not
@ -1046,7 +1046,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- foo is not joined on the partition key so the query is not -- foo is not joined on the partition key so the query is not
@ -1437,7 +1437,7 @@ $Q$);
Group Key: remote_scan.id Group Key: remote_scan.id
Filter: (pg_catalog.sum(remote_scan.worker_column_4) > '10'::numeric) Filter: (pg_catalog.sum(remote_scan.worker_column_4) > '10'::numeric)
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(11 rows) (11 rows)
-- cannot push down since foo doesn't have en equi join -- cannot push down since foo doesn't have en equi join
@ -1514,7 +1514,7 @@ $Q$);
-> HashAggregate -> HashAggregate
Group Key: remote_scan.user_id Group Key: remote_scan.user_id
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(6 rows) (6 rows)
-- join among reference_ids and averages is not on the partition key -- join among reference_ids and averages is not on the partition key
@ -1576,7 +1576,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- Selected value in the WHERE is not partition key, so we cannot use distributed -- Selected value in the WHERE is not partition key, so we cannot use distributed
@ -3276,7 +3276,7 @@ $$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- For INSERT SELECT, when a lateral query references an outer query, push-down is possible even if limit clause exists in the lateral query. -- For INSERT SELECT, when a lateral query references an outer query, push-down is possible even if limit clause exists in the lateral query.

View File

@ -884,7 +884,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator -- EXPLAIN ANALYZE is not supported for INSERT ... SELECT via coordinator
@ -921,7 +921,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- make things a bit more complicate with IN clauses -- make things a bit more complicate with IN clauses
@ -940,7 +940,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- implicit join on non partition column should also not be pushed down, -- implicit join on non partition column should also not be pushed down,
@ -959,7 +959,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
RESET client_min_messages; RESET client_min_messages;
@ -981,7 +981,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- foo is not joined on the partition key so the query is not -- foo is not joined on the partition key so the query is not
@ -1046,7 +1046,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- foo is not joined on the partition key so the query is not -- foo is not joined on the partition key so the query is not
@ -1437,7 +1437,7 @@ $Q$);
Group Key: remote_scan.id Group Key: remote_scan.id
Filter: (pg_catalog.sum(remote_scan.worker_column_4) > '10'::numeric) Filter: (pg_catalog.sum(remote_scan.worker_column_4) > '10'::numeric)
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(11 rows) (11 rows)
-- cannot push down since foo doesn't have en equi join -- cannot push down since foo doesn't have en equi join
@ -1514,7 +1514,7 @@ $Q$);
-> HashAggregate -> HashAggregate
Group Key: remote_scan.user_id Group Key: remote_scan.user_id
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(6 rows) (6 rows)
-- join among reference_ids and averages is not on the partition key -- join among reference_ids and averages is not on the partition key
@ -1576,7 +1576,7 @@ $Q$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- Selected value in the WHERE is not partition key, so we cannot use distributed -- Selected value in the WHERE is not partition key, so we cannot use distributed
@ -3276,7 +3276,7 @@ $$);
Custom Scan (Citus INSERT ... SELECT) Custom Scan (Citus INSERT ... SELECT)
INSERT/SELECT method: pull to coordinator INSERT/SELECT method: pull to coordinator
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
(4 rows) (4 rows)
-- For INSERT SELECT, when a lateral query references an outer query, push-down is possible even if limit clause exists in the lateral query. -- For INSERT SELECT, when a lateral query references an outer query, push-down is possible even if limit clause exists in the lateral query.

View File

@ -589,8 +589,9 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
RESET client_min_messages; RESET client_min_messages;
DROP SCHEMA on_conflict CASCADE; DROP SCHEMA on_conflict CASCADE;
NOTICE: drop cascades to 7 other objects NOTICE: drop cascades to 8 other objects
DETAIL: drop cascades to table test_ref_table DETAIL: drop cascades to table test_ref_table
drop cascades to table test_ref_table_1900012
drop cascades to table source_table_3 drop cascades to table source_table_3
drop cascades to table source_table_4 drop cascades to table source_table_4
drop cascades to table target_table_2 drop cascades to table target_table_2

View File

@ -589,8 +589,9 @@ DEBUG: Collecting INSERT ... SELECT results on coordinator
RESET client_min_messages; RESET client_min_messages;
DROP SCHEMA on_conflict CASCADE; DROP SCHEMA on_conflict CASCADE;
NOTICE: drop cascades to 7 other objects NOTICE: drop cascades to 8 other objects
DETAIL: drop cascades to table test_ref_table DETAIL: drop cascades to table test_ref_table
drop cascades to table test_ref_table_1900012
drop cascades to table source_table_3 drop cascades to table source_table_3
drop cascades to table source_table_4 drop cascades to table source_table_4
drop cascades to table target_table_2 drop cascades to table target_table_2

View File

@ -140,3 +140,7 @@ DEBUG: join prunable for intervals [BA1000U2AMO4ZGX,BZZXSP27F21T6] and [AA1000U
explain statements for distributed queries are not enabled explain statements for distributed queries are not enabled
(3 rows) (3 rows)
SET client_min_messages TO WARNING;
DROP TABLE varchar_partitioned_table;
DROP TABLE array_partitioned_table;
DROP TABLE composite_partitioned_table;

View File

@ -298,10 +298,5 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(table_5.i
DEBUG: generating subplan XXX_1 for subquery SELECT table_6.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6 JOIN multi_recursive.dist0 table_8 USING (id)) WHERE (table_8.id OPERATOR(pg_catalog.<) 0) ORDER BY table_6.id DEBUG: generating subplan XXX_1 for subquery SELECT table_6.id FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_6 JOIN multi_recursive.dist0 table_8 USING (id)) WHERE (table_8.id OPERATOR(pg_catalog.<) 0) ORDER BY table_6.id
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(table_5.id) AS avg FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_5 JOIN multi_recursive.dist0 table_9 USING (id)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT avg(table_5.id) AS avg FROM ((SELECT intermediate_result.id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer)) table_5 JOIN multi_recursive.dist0 table_9 USING (id))
ERROR: recursive complex joins are only supported when all distributed tables are co-located and joined on their distribution columns ERROR: recursive complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
RESET client_min_messages; SET client_min_messages TO WARNING;
DROP SCHEMA multi_recursive CASCADE; DROP SCHEMA multi_recursive CASCADE;
NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table tbl_dist1
drop cascades to table tbl_ref1
drop cascades to table dist0
drop cascades to table dist1

View File

@ -85,7 +85,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner; GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner; GRANT USAGE ON SCHEMA public TO pg_database_owner;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -98,9 +98,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -158,7 +158,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner; GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner; GRANT USAGE ON SCHEMA public TO pg_database_owner;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -180,9 +180,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 2, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 2, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -226,7 +226,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner; GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner; GRANT USAGE ON SCHEMA public TO pg_database_owner;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -245,9 +245,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -287,7 +287,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner; GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner; GRANT USAGE ON SCHEMA public TO pg_database_owner;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -306,9 +306,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -355,7 +355,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner; GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner; GRANT USAGE ON SCHEMA public TO pg_database_owner;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -374,9 +374,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -416,7 +416,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner; GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner; GRANT USAGE ON SCHEMA public TO pg_database_owner;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -435,9 +435,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -451,11 +451,11 @@ SELECT unnest(activate_node_snapshot()) order by 1;
(54 rows) (54 rows)
-- Test start_metadata_sync_to_node and citus_activate_node UDFs -- Test start_metadata_sync_to_node and citus_activate_node UDFs
-- Ensure that hasmetadata=false for all nodes -- Ensure that hasmetadata=false for all nodes except for the coordinator node
SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 1
(1 row) (1 row)
-- Show that metadata can not be synced on secondary node -- Show that metadata can not be synced on secondary node
@ -463,7 +463,7 @@ SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_po
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary');
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
4 5
(1 row) (1 row)
SELECT start_metadata_sync_to_node('localhost', 8888); SELECT start_metadata_sync_to_node('localhost', 8888);
@ -495,7 +495,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888;
SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
master_add_secondary_node master_add_secondary_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
5 6
(1 row) (1 row)
\c - - - :master_port \c - - - :master_port
@ -509,7 +509,7 @@ SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port;
nodeid | hasmetadata nodeid | hasmetadata
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | t 2 | t
(1 row) (1 row)
-- Check that the metadata has been copied to the worker -- Check that the metadata has been copied to the worker
@ -523,11 +523,12 @@ SELECT * FROM pg_dist_local_group;
SELECT * FROM pg_dist_node ORDER BY nodeid; SELECT * FROM pg_dist_node ORDER BY nodeid;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
(4 rows) 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
(5 rows)
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
@ -661,11 +662,12 @@ SELECT * FROM pg_dist_local_group;
SELECT * FROM pg_dist_node ORDER BY nodeid; SELECT * FROM pg_dist_node ORDER BY nodeid;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
(4 rows) 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
(5 rows)
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
@ -1509,7 +1511,7 @@ SELECT create_distributed_table('mx_table', 'a');
SELECT master_add_node('localhost', :worker_2_port); SELECT master_add_node('localhost', :worker_2_port);
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
6 7
(1 row) (1 row)
\c - mx_user - :worker_1_port \c - mx_user - :worker_1_port
@ -1620,9 +1622,10 @@ ORDER BY
nodeport; nodeport;
logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
mx_ref | n | t | 1310074 | 100074 | localhost | 57637 mx_ref | n | t | 1310074 | 100074 | localhost | 57636
mx_ref | n | t | 1310074 | 100075 | localhost | 57638 mx_ref | n | t | 1310074 | 100075 | localhost | 57637
(2 rows) mx_ref | n | t | 1310074 | 100076 | localhost | 57638
(3 rows)
SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset
-- make sure we have the pg_dist_colocation record on the worker -- make sure we have the pg_dist_colocation record on the worker
@ -1716,8 +1719,9 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE logicalrelid='mx_ref'::regclass; WHERE logicalrelid='mx_ref'::regclass;
shardid | nodename | nodeport shardid | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1310075 | localhost | 57636
1310075 | localhost | 57637 1310075 | localhost | 57637
(1 row) (2 rows)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT shardid, nodename, nodeport SELECT shardid, nodename, nodeport
@ -1725,15 +1729,16 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE logicalrelid='mx_ref'::regclass; WHERE logicalrelid='mx_ref'::regclass;
shardid | nodename | nodeport shardid | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1310075 | localhost | 57636
1310075 | localhost | 57637 1310075 | localhost | 57637
(1 row) (2 rows)
\c - - - :master_port \c - - - :master_port
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
SELECT master_add_node('localhost', :worker_2_port); SELECT master_add_node('localhost', :worker_2_port);
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
7 8
(1 row) (1 row)
RESET client_min_messages; RESET client_min_messages;
@ -1743,8 +1748,9 @@ WHERE logicalrelid='mx_ref'::regclass
ORDER BY shardid, nodeport; ORDER BY shardid, nodeport;
shardid | nodename | nodeport shardid | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1310075 | localhost | 57636
1310075 | localhost | 57637 1310075 | localhost | 57637
(1 row) (2 rows)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT shardid, nodename, nodeport SELECT shardid, nodename, nodeport
@ -1753,8 +1759,9 @@ WHERE logicalrelid='mx_ref'::regclass
ORDER BY shardid, nodeport; ORDER BY shardid, nodeport;
shardid | nodename | nodeport shardid | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1310075 | localhost | 57636
1310075 | localhost | 57637 1310075 | localhost | 57637
(1 row) (2 rows)
-- Get the metadata back into a consistent state -- Get the metadata back into a consistent state
\c - - - :master_port \c - - - :master_port
@ -1862,10 +1869,6 @@ HINT: If the node is up, wait until metadata gets synced to it and try again.
ALTER TABLE dist_table_1 ADD COLUMN b int; ALTER TABLE dist_table_1 ADD COLUMN b int;
ERROR: localhost:xxxxx is a metadata node, but is out of sync ERROR: localhost:xxxxx is a metadata node, but is out of sync
HINT: If the node is up, wait until metadata gets synced to it and try again. HINT: If the node is up, wait until metadata gets synced to it and try again.
SELECT master_add_node('localhost', :master_port, groupid => 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
ERROR: localhost:xxxxx is a metadata node, but is out of sync
HINT: If the node is up, wait until metadata gets synced to it and try again.
SELECT citus_disable_node_and_wait('localhost', :worker_1_port); SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
ERROR: disabling the first worker node in the metadata is not allowed ERROR: disabling the first worker node in the metadata is not allowed
DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations. DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations.
@ -1918,7 +1921,7 @@ SELECT wait_until_metadata_sync(60000);
SELECT master_add_node('localhost', :worker_2_port); SELECT master_add_node('localhost', :worker_2_port);
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
7 8
(1 row) (1 row)
CREATE SEQUENCE mx_test_sequence_0; CREATE SEQUENCE mx_test_sequence_0;
@ -1989,7 +1992,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO pg_database_owner; GRANT CREATE ON SCHEMA public TO pg_database_owner;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO pg_database_owner; GRANT USAGE ON SCHEMA public TO pg_database_owner;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (4, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(5, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(7, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (5, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(6, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(8, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -2025,9 +2028,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -2050,9 +2053,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 1, 100076), (1310075, 0, 5, 100077)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100078), (1310077, 0, 5, 100079), (1310078, 0, 1, 100080), (1310079, 0, 5, 100081)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100088), (1310086, 0, 5, 100089), (1310087, 0, 1, 100090), (1310088, 0, 5, 100091)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;

View File

@ -85,7 +85,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres; GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres; GRANT USAGE ON SCHEMA public TO postgres;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -98,9 +98,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -158,7 +158,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres; GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres; GRANT USAGE ON SCHEMA public TO postgres;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -180,9 +180,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 2, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 2, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -226,7 +226,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres; GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres; GRANT USAGE ON SCHEMA public TO postgres;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -245,9 +245,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -287,7 +287,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres; GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres; GRANT USAGE ON SCHEMA public TO postgres;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -306,9 +306,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -355,7 +355,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres; GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres; GRANT USAGE ON SCHEMA public TO postgres;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -374,9 +374,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -416,7 +416,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres; GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres; GRANT USAGE ON SCHEMA public TO postgres;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(3, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -435,9 +435,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -451,11 +451,11 @@ SELECT unnest(activate_node_snapshot()) order by 1;
(54 rows) (54 rows)
-- Test start_metadata_sync_to_node and citus_activate_node UDFs -- Test start_metadata_sync_to_node and citus_activate_node UDFs
-- Ensure that hasmetadata=false for all nodes -- Ensure that hasmetadata=false for all nodes except for the coordinator node
SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 1
(1 row) (1 row)
-- Show that metadata can not be synced on secondary node -- Show that metadata can not be synced on secondary node
@ -463,7 +463,7 @@ SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_po
SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary');
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
4 5
(1 row) (1 row)
SELECT start_metadata_sync_to_node('localhost', 8888); SELECT start_metadata_sync_to_node('localhost', 8888);
@ -495,7 +495,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888;
SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster');
master_add_secondary_node master_add_secondary_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
5 6
(1 row) (1 row)
\c - - - :master_port \c - - - :master_port
@ -509,7 +509,7 @@ SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port;
nodeid | hasmetadata nodeid | hasmetadata
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | t 2 | t
(1 row) (1 row)
-- Check that the metadata has been copied to the worker -- Check that the metadata has been copied to the worker
@ -523,11 +523,12 @@ SELECT * FROM pg_dist_local_group;
SELECT * FROM pg_dist_node ORDER BY nodeid; SELECT * FROM pg_dist_node ORDER BY nodeid;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
(4 rows) 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
(5 rows)
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
@ -661,11 +662,12 @@ SELECT * FROM pg_dist_local_group;
SELECT * FROM pg_dist_node ORDER BY nodeid; SELECT * FROM pg_dist_node ORDER BY nodeid;
nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster | metadatasynced | shouldhaveshards
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | 1 | localhost | 57637 | default | t | t | primary | default | t | t 1 | 0 | localhost | 57636 | default | t | t | primary | default | t | f
2 | 2 | localhost | 57638 | default | f | t | primary | default | f | t 2 | 1 | localhost | 57637 | default | t | t | primary | default | t | t
4 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t 3 | 2 | localhost | 57638 | default | f | t | primary | default | f | t
5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t 5 | 1 | localhost | 8888 | default | f | t | secondary | default | f | t
(4 rows) 6 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster | f | t
(5 rows)
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
@ -1509,7 +1511,7 @@ SELECT create_distributed_table('mx_table', 'a');
SELECT master_add_node('localhost', :worker_2_port); SELECT master_add_node('localhost', :worker_2_port);
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
6 7
(1 row) (1 row)
\c - mx_user - :worker_1_port \c - mx_user - :worker_1_port
@ -1620,9 +1622,10 @@ ORDER BY
nodeport; nodeport;
logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
mx_ref | n | t | 1310074 | 100074 | localhost | 57637 mx_ref | n | t | 1310074 | 100074 | localhost | 57636
mx_ref | n | t | 1310074 | 100075 | localhost | 57638 mx_ref | n | t | 1310074 | 100075 | localhost | 57637
(2 rows) mx_ref | n | t | 1310074 | 100076 | localhost | 57638
(3 rows)
SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset
-- make sure we have the pg_dist_colocation record on the worker -- make sure we have the pg_dist_colocation record on the worker
@ -1716,8 +1719,9 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE logicalrelid='mx_ref'::regclass; WHERE logicalrelid='mx_ref'::regclass;
shardid | nodename | nodeport shardid | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1310075 | localhost | 57636
1310075 | localhost | 57637 1310075 | localhost | 57637
(1 row) (2 rows)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT shardid, nodename, nodeport SELECT shardid, nodename, nodeport
@ -1725,15 +1729,16 @@ FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement
WHERE logicalrelid='mx_ref'::regclass; WHERE logicalrelid='mx_ref'::regclass;
shardid | nodename | nodeport shardid | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1310075 | localhost | 57636
1310075 | localhost | 57637 1310075 | localhost | 57637
(1 row) (2 rows)
\c - - - :master_port \c - - - :master_port
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
SELECT master_add_node('localhost', :worker_2_port); SELECT master_add_node('localhost', :worker_2_port);
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
7 8
(1 row) (1 row)
RESET client_min_messages; RESET client_min_messages;
@ -1743,8 +1748,9 @@ WHERE logicalrelid='mx_ref'::regclass
ORDER BY shardid, nodeport; ORDER BY shardid, nodeport;
shardid | nodename | nodeport shardid | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1310075 | localhost | 57636
1310075 | localhost | 57637 1310075 | localhost | 57637
(1 row) (2 rows)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT shardid, nodename, nodeport SELECT shardid, nodename, nodeport
@ -1753,8 +1759,9 @@ WHERE logicalrelid='mx_ref'::regclass
ORDER BY shardid, nodeport; ORDER BY shardid, nodeport;
shardid | nodename | nodeport shardid | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1310075 | localhost | 57636
1310075 | localhost | 57637 1310075 | localhost | 57637
(1 row) (2 rows)
-- Get the metadata back into a consistent state -- Get the metadata back into a consistent state
\c - - - :master_port \c - - - :master_port
@ -1862,10 +1869,6 @@ HINT: If the node is up, wait until metadata gets synced to it and try again.
ALTER TABLE dist_table_1 ADD COLUMN b int; ALTER TABLE dist_table_1 ADD COLUMN b int;
ERROR: localhost:xxxxx is a metadata node, but is out of sync ERROR: localhost:xxxxx is a metadata node, but is out of sync
HINT: If the node is up, wait until metadata gets synced to it and try again. HINT: If the node is up, wait until metadata gets synced to it and try again.
SELECT master_add_node('localhost', :master_port, groupid => 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
ERROR: localhost:xxxxx is a metadata node, but is out of sync
HINT: If the node is up, wait until metadata gets synced to it and try again.
SELECT citus_disable_node_and_wait('localhost', :worker_1_port); SELECT citus_disable_node_and_wait('localhost', :worker_1_port);
ERROR: disabling the first worker node in the metadata is not allowed ERROR: disabling the first worker node in the metadata is not allowed
DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations. DETAIL: Citus uses the first worker node in the metadata for certain internal operations when replicated tables are modified. Synchronous mode ensures that all nodes have the same view of the first worker node, which is used for certain locking operations.
@ -1918,7 +1921,7 @@ SELECT wait_until_metadata_sync(60000);
SELECT master_add_node('localhost', :worker_2_port); SELECT master_add_node('localhost', :worker_2_port);
master_add_node master_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
7 8
(1 row) (1 row)
CREATE SEQUENCE mx_test_sequence_0; CREATE SEQUENCE mx_test_sequence_0;
@ -1989,7 +1992,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
GRANT CREATE ON SCHEMA public TO postgres; GRANT CREATE ON SCHEMA public TO postgres;
GRANT USAGE ON SCHEMA public TO PUBLIC; GRANT USAGE ON SCHEMA public TO PUBLIC;
GRANT USAGE ON SCHEMA public TO postgres; GRANT USAGE ON SCHEMA public TO postgres;
INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (4, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(5, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(7, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE) INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (5, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(6, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 0, 'localhost', 57636, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', FALSE),(2, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(8, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE)
RESET ROLE RESET ROLE
RESET ROLE RESET ROLE
SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''')
@ -2025,9 +2028,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on'
UPDATE pg_dist_local_group SET groupid = 1 UPDATE pg_dist_local_group SET groupid = 1
UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2
UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 1 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace)
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
@ -2050,9 +2053,9 @@ SELECT unnest(activate_node_snapshot()) order by 1;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 1, 100076), (1310075, 0, 5, 100077)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100078), (1310077, 0, 5, 100079), (1310078, 0, 1, 100080), (1310079, 0, 5, 100081)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100088), (1310086, 0, 5, 100089), (1310087, 0, 1, 100090), (1310088, 0, 5, 100091)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;
WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data;

View File

@ -914,7 +914,7 @@ GROUP BY s.logicalrelid, sp.shardstate
ORDER BY s.logicalrelid, sp.shardstate; ORDER BY s.logicalrelid, sp.shardstate;
logicalrelid | shardstate | count logicalrelid | shardstate | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
reference_modifying_xacts | 1 | 2 reference_modifying_xacts | 1 | 3
(1 row) (1 row)
-- for the time-being drop the constraint -- for the time-being drop the constraint
@ -1021,7 +1021,7 @@ GROUP BY s.logicalrelid, sp.shardstate
ORDER BY s.logicalrelid, sp.shardstate; ORDER BY s.logicalrelid, sp.shardstate;
logicalrelid | shardstate | count logicalrelid | shardstate | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
reference_modifying_xacts | 1 | 2 reference_modifying_xacts | 1 | 3
hash_modifying_xacts | 1 | 4 hash_modifying_xacts | 1 | 4
(2 rows) (2 rows)
@ -1070,7 +1070,7 @@ GROUP BY s.logicalrelid, sp.shardstate
ORDER BY s.logicalrelid, sp.shardstate; ORDER BY s.logicalrelid, sp.shardstate;
logicalrelid | shardstate | count logicalrelid | shardstate | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
reference_modifying_xacts | 1 | 2 reference_modifying_xacts | 1 | 3
hash_modifying_xacts | 1 | 4 hash_modifying_xacts | 1 | 4
(2 rows) (2 rows)
@ -1235,7 +1235,7 @@ GROUP BY s.logicalrelid, sp.shardstate
ORDER BY s.logicalrelid, sp.shardstate; ORDER BY s.logicalrelid, sp.shardstate;
logicalrelid | shardstate | count logicalrelid | shardstate | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
reference_failure_test | 1 | 2 reference_failure_test | 1 | 3
(1 row) (1 row)
-- any failure rollbacks the transaction -- any failure rollbacks the transaction

View File

@ -15,7 +15,7 @@
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
worker_1_id worker_1_id
--------------------------------------------------------------------- ---------------------------------------------------------------------
16 17
(1 row) (1 row)
\gset \gset

View File

@ -355,9 +355,10 @@ SELECT * FROM run_command_on_placements('multiuser_schema.reference_table', $$ s
ORDER BY nodename, nodeport, shardid; ORDER BY nodename, nodeport, shardid;
nodename | nodeport | shardid | success | result nodename | nodeport | shardid | success | result
--------------------------------------------------------------------- ---------------------------------------------------------------------
localhost | 57636 | 109094 | t | t
localhost | 57637 | 109094 | t | t localhost | 57637 | 109094 | t | t
localhost | 57638 | 109094 | t | t localhost | 57638 | 109094 | t | t
(2 rows) (3 rows)
-- create another table in the schema, verify select is not granted -- create another table in the schema, verify select is not granted
CREATE TABLE multiuser_schema.another_table(a int, b int); CREATE TABLE multiuser_schema.another_table(a int, b int);
@ -483,9 +484,10 @@ ORDER BY nodename, nodeport, shardid;
(6 rows) (6 rows)
DROP SCHEMA multiuser_schema CASCADE; DROP SCHEMA multiuser_schema CASCADE;
NOTICE: drop cascades to 3 other objects NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table multiuser_schema.hash_table DETAIL: drop cascades to table multiuser_schema.hash_table
drop cascades to table multiuser_schema.reference_table drop cascades to table multiuser_schema.reference_table
drop cascades to table multiuser_schema.reference_table_109094
drop cascades to table multiuser_schema.another_table drop cascades to table multiuser_schema.another_table
DROP SCHEMA multiuser_second_schema CASCADE; DROP SCHEMA multiuser_second_schema CASCADE;
NOTICE: drop cascades to table multiuser_second_schema.hash_table NOTICE: drop cascades to table multiuser_second_schema.hash_table

View File

@ -36,6 +36,7 @@ WHERE bar.id_deep = join_alias.id_deep;
(0 rows) (0 rows)
DROP SCHEMA multi_name_resolution CASCADE; DROP SCHEMA multi_name_resolution CASCADE;
NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table namenest1 DETAIL: drop cascades to table namenest1
drop cascades to table namenest2 drop cascades to table namenest2
drop cascades to table namenest2_2250000000010

View File

@ -104,15 +104,33 @@ LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -129,18 +147,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 18
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Aggregate Aggregate
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
(10 rows) (10 rows)
-- Next, set the maximum value for another shard to null. Then check that we -- Next, set the maximum value for another shard to null. Then check that we
@ -169,15 +195,33 @@ LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -194,18 +238,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 18
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Aggregate Aggregate
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
(10 rows) (10 rows)
-- Last, set the minimum value to 0 and check that we don't treat it as null. We -- Last, set the minimum value to 0 and check that we don't treat it as null. We
@ -232,15 +284,33 @@ LOG: join order: [ "lineitem" ][ dual partition join "orders" ]
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -257,18 +327,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 18
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Aggregate Aggregate
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
(10 rows) (10 rows)
RESET client_min_messages; RESET client_min_messages;

View File

@ -1952,6 +1952,8 @@ DEBUG: switching to sequential query execution mode
DETAIL: Table "<dropped>" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode DETAIL: Table "<dropped>" is modified, which might lead to data inconsistencies or distributed deadlocks via parallel accesses to hash distributed tables due to foreign keys. Any parallel modification to those hash distributed tables in the same transaction can only be executed in sequential query execution mode
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)" CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM PL/pgSQL function citus_drop_trigger() line XX at PERFORM
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
DEBUG: drop cascades to 2 other objects DEBUG: drop cascades to 2 other objects
DETAIL: drop cascades to constraint partitioning_reference_fkey_1660302 on table partitioning_schema.partitioning_test_1660302 DETAIL: drop cascades to constraint partitioning_reference_fkey_1660302 on table partitioning_schema.partitioning_test_1660302
drop cascades to constraint partitioning_reference_fkey_1660304 on table partitioning_schema.partitioning_test_1660304 drop cascades to constraint partitioning_reference_fkey_1660304 on table partitioning_schema.partitioning_test_1660304
@ -3772,13 +3774,6 @@ BEGIN;
ROLLBACK; ROLLBACK;
DROP TABLE pi_table; DROP TABLE pi_table;
-- 6) test with citus local table -- 6) test with citus local table
select 1 from citus_add_node('localhost', :master_port, groupid=>0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column?
---------------------------------------------------------------------
1
(1 row)
CREATE TABLE date_partitioned_citus_local_table( CREATE TABLE date_partitioned_citus_local_table(
measureid integer, measureid integer,
eventdate date, eventdate date,
@ -4214,12 +4209,6 @@ DROP TABLE date_partitioned_table_to_exp;
DROP TABLE date_partitioned_citus_local_table CASCADE; DROP TABLE date_partitioned_citus_local_table CASCADE;
DROP TABLE date_partitioned_citus_local_table_2; DROP TABLE date_partitioned_citus_local_table_2;
set client_min_messages to notice; set client_min_messages to notice;
SELECT citus_remove_node('localhost', :master_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)
-- d) invalid tables for helper UDFs -- d) invalid tables for helper UDFs
CREATE TABLE multiple_partition_column_table( CREATE TABLE multiple_partition_column_table(
event_id bigserial, event_id bigserial,

View File

@ -9,7 +9,7 @@ SET citus.next_shard_id TO 20000000;
SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port;
worker_1_id worker_1_id
--------------------------------------------------------------------- ---------------------------------------------------------------------
16 17
(1 row) (1 row)
\gset \gset

View File

@ -27,9 +27,10 @@ INSERT INTO source_table (a, b) VALUES (10, 10);
SELECT nodeid, groupid, nodename, nodeport, noderack, isactive, noderole, nodecluster FROM pg_dist_node ORDER BY 1, 2; SELECT nodeid, groupid, nodename, nodeport, noderack, isactive, noderole, nodecluster FROM pg_dist_node ORDER BY 1, 2;
nodeid | groupid | nodename | nodeport | noderack | isactive | noderole | nodecluster nodeid | groupid | nodename | nodeport | noderack | isactive | noderole | nodecluster
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | 1 | localhost | 57637 | default | t | primary | default 1 | 0 | localhost | 57636 | default | t | primary | default
2 | 2 | localhost | 57638 | default | t | primary | default 2 | 1 | localhost | 57637 | default | t | primary | default
(2 rows) 3 | 2 | localhost | 57638 | default | t | primary | default
(3 rows)
UPDATE pg_dist_node SET noderole = 'secondary'; UPDATE pg_dist_node SET noderole = 'secondary';
\c "dbname=regression options='-c\ citus.use_secondary_nodes=always'" \c "dbname=regression options='-c\ citus.use_secondary_nodes=always'"

View File

@ -668,8 +668,9 @@ SELECT id, pg_advisory_xact_lock(16) FROM test_table ORDER BY id;
END; END;
DROP SCHEMA multi_real_time_transaction CASCADE; DROP SCHEMA multi_real_time_transaction CASCADE;
NOTICE: drop cascades to 4 other objects NOTICE: drop cascades to 5 other objects
DETAIL: drop cascades to table test_table DETAIL: drop cascades to table test_table
drop cascades to table co_test_table drop cascades to table co_test_table
drop cascades to table ref_test_table drop cascades to table ref_test_table
drop cascades to table ref_test_table_1610008
drop cascades to function insert_row_test(name) drop cascades to function insert_row_test(name)

View File

@ -218,10 +218,24 @@ WHERE colocationid IN
1 | -1 | 0 1 | -1 | 0
(1 row) (1 row)
-- test that we cannot remove a node if it has the only placement for a shard
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
SELECT master_remove_node('localhost', :worker_1_port); SELECT master_remove_node('localhost', :worker_1_port);
ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx ERROR: cannot remove or disable the node localhost:xxxxx because because it contains the only shard placement for shard xxxxx
DETAIL: One of the table(s) that prevents the operation complete successfully is public.remove_node_reference_table DETAIL: One of the table(s) that prevents the operation complete successfully is public.remove_node_reference_table
HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables HINT: To proceed, either drop the tables or use undistribute_table() function to convert them to local tables
-- restore the coordinator
SELECT citus_set_coordinator_host('localhost');
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port;
count count
@ -972,12 +986,6 @@ ORDER BY shardid ASC;
(0 rows) (0 rows)
\c - - - :master_port \c - - - :master_port
SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT citus_disable_node('localhost', :worker_2_port); SELECT citus_disable_node('localhost', :worker_2_port);
citus_disable_node citus_disable_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1004,12 +1012,6 @@ SELECT hasmetadata, metadatasynced FROM pg_dist_node WHERE nodeport = :master_po
t | t t | t
(1 row) (1 row)
SELECT 1 FROM citus_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT SELECT
shardid, shardstate, shardlength, nodename, nodeport shardid, shardstate, shardlength, nodename, nodeport
FROM FROM

View File

@ -7,6 +7,7 @@
SET citus.next_shard_id TO 690000; SET citus.next_shard_id TO 690000;
SET citus.enable_unique_job_ids TO off; SET citus.enable_unique_job_ids TO off;
SET citus.enable_repartition_joins to ON; SET citus.enable_repartition_joins to ON;
SET citus.shard_replication_factor to 1;
create schema repartition_join; create schema repartition_join;
DROP TABLE IF EXISTS repartition_join.order_line; DROP TABLE IF EXISTS repartition_join.order_line;
NOTICE: table "order_line" does not exist, skipping NOTICE: table "order_line" does not exist, skipping
@ -69,15 +70,33 @@ DEBUG: join prunable for intervals [0,2147483647] and [-2147483648,-1]
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -94,34 +113,68 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 18
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 13 DETAIL: Creating dependency on merge taskId 19
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 4 DETAIL: Creating dependency on merge taskId 4
DEBUG: pruning merge fetch taskId 4 DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 18 DETAIL: Creating dependency on merge taskId 26
DEBUG: pruning merge fetch taskId 5 DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 7 DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 23 DETAIL: Creating dependency on merge taskId 33
DEBUG: pruning merge fetch taskId 8 DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 10 DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 28 DETAIL: Creating dependency on merge taskId 40
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 47
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 54
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
l_partkey | o_orderkey | count l_partkey | o_orderkey | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
18 | 12005 | 1 18 | 12005 | 1
@ -170,15 +223,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -195,6 +266,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 18
l_partkey | o_orderkey | count l_partkey | o_orderkey | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
@ -214,15 +293,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -239,6 +336,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 18
o_orderkey | o_shippriority | count o_orderkey | o_shippriority | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
@ -260,15 +365,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -285,6 +408,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 18
o_orderkey | o_shippriority | count o_orderkey | o_shippriority | count
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
@ -304,15 +435,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -329,6 +478,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 18
o_orderkey | any_value o_orderkey | any_value
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
@ -346,15 +503,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5 DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -371,6 +546,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 30
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 30
s_i_id s_i_id
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)

View File

@ -17,15 +17,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -42,18 +60,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Aggregate Aggregate
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
(10 rows) (10 rows)
SELECT SELECT
@ -66,15 +92,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -91,6 +135,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
2985 2985
@ -110,15 +162,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -135,18 +205,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Aggregate Aggregate
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
(10 rows) (10 rows)
SELECT SELECT
@ -160,15 +238,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -185,6 +281,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 0
@ -204,15 +308,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -229,18 +351,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Aggregate Aggregate
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
(10 rows) (10 rows)
SELECT SELECT
@ -254,15 +384,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -279,6 +427,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 0
@ -298,15 +454,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -323,18 +497,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Aggregate Aggregate
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
(10 rows) (10 rows)
SELECT SELECT
@ -347,15 +529,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -372,6 +572,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
125 125
@ -391,15 +599,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -416,18 +642,26 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Aggregate Aggregate
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
(10 rows) (10 rows)
SELECT SELECT
@ -441,15 +675,33 @@ DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -466,6 +718,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 0

View File

@ -30,15 +30,33 @@ DEBUG: assigned task to node localhost:xxxxx
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -55,6 +73,16 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx
@ -88,15 +116,33 @@ DEBUG: assigned task to node localhost:xxxxx
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 4 DETAIL: Creating dependency on merge taskId 4
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -113,6 +159,16 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 24
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 18
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx
@ -143,15 +199,33 @@ DEBUG: assigned task to node localhost:xxxxx
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3 DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -168,6 +242,16 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16 DETAIL: Creating dependency on merge taskId 16
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 18
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 24
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx
DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx

View File

@ -155,14 +155,14 @@ LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_ot
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (Citus Adaptive) Custom Scan (Citus Adaptive)
Task Count: 4 Task Count: 6
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 3 Map Task Count: 3
Merge Task Count: 4 Merge Task Count: 6
-> MapMergeJob -> MapMergeJob
Map Task Count: 5 Map Task Count: 5
Merge Task Count: 4 Merge Task Count: 6
(9 rows) (9 rows)
SELECT * FROM repartition_udt JOIN repartition_udt_other SELECT * FROM repartition_udt JOIN repartition_udt_other

View File

@ -284,7 +284,7 @@ DROP TABLE replicate_reference_table_rollback;
SELECT count(*) FROM pg_dist_node; SELECT count(*) FROM pg_dist_node;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 2
(1 row) (1 row)
-- test whether we can create distributed objects on a single worker node -- test whether we can create distributed objects on a single worker node
@ -376,12 +376,6 @@ SELECT citus_add_node('localhost', :worker_2_port);
(1 row) (1 row)
-- required for create_distributed_table_concurrently -- required for create_distributed_table_concurrently
SELECT 1 FROM citus_set_coordinator_host('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
CREATE TABLE distributed_table_cdtc(column1 int primary key); CREATE TABLE distributed_table_cdtc(column1 int primary key);
SELECT create_distributed_table_concurrently('distributed_table_cdtc', 'column1'); SELECT create_distributed_table_concurrently('distributed_table_cdtc', 'column1');
@ -391,12 +385,6 @@ SELECT create_distributed_table_concurrently('distributed_table_cdtc', 'column1'
(1 row) (1 row)
RESET citus.shard_replication_factor; RESET citus.shard_replication_factor;
SELECT citus_remove_node('localhost', :master_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)
SELECT SELECT
shardid, shardstate, shardlength, nodename, nodeport shardid, shardstate, shardlength, nodename, nodeport
FROM FROM
@ -712,12 +700,22 @@ SELECT master_remove_node('localhost', :worker_2_port);
CREATE TABLE ref_table_1(id int primary key, v int); CREATE TABLE ref_table_1(id int primary key, v int);
CREATE TABLE ref_table_2(id int primary key, v int references ref_table_1(id)); CREATE TABLE ref_table_2(id int primary key, v int references ref_table_1(id));
CREATE TABLE ref_table_3(id int primary key, v int references ref_table_2(id)); CREATE TABLE ref_table_3(id int primary key, v int references ref_table_2(id));
SELECT create_reference_table('ref_table_1'), SELECT create_reference_table('ref_table_1');
create_reference_table('ref_table_2'), create_reference_table
create_reference_table('ref_table_3');
create_reference_table | create_reference_table | create_reference_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
| |
(1 row)
SELECT create_reference_table('ref_table_2');
create_reference_table
---------------------------------------------------------------------
(1 row)
SELECT create_reference_table('ref_table_3');
create_reference_table
---------------------------------------------------------------------
(1 row) (1 row)
-- status before master_add_node -- status before master_add_node
@ -795,7 +793,7 @@ WHERE
ORDER BY 1,4,5; ORDER BY 1,4,5;
shardid | shardstate | shardlength | nodename | nodeport shardid | shardstate | shardlength | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1370019 | 1 | 0 | localhost | 57637 1370021 | 1 | 0 | localhost | 57637
(1 row) (1 row)
-- we should see the two shard placements after activation -- we should see the two shard placements after activation
@ -820,7 +818,7 @@ WHERE
ORDER BY 1,4,5; ORDER BY 1,4,5;
shardid | shardstate | shardlength | nodename | nodeport shardid | shardstate | shardlength | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1370019 | 1 | 0 | localhost | 57637 1370021 | 1 | 0 | localhost | 57637
(1 row) (1 row)
SELECT 1 FROM master_remove_node('localhost', :worker_2_port); SELECT 1 FROM master_remove_node('localhost', :worker_2_port);
@ -850,7 +848,7 @@ HINT: Add the target node via SELECT citus_add_node('localhost', 57638);
SELECT citus_add_secondary_node('localhost', :worker_2_port, 'localhost', :worker_1_port); SELECT citus_add_secondary_node('localhost', :worker_2_port, 'localhost', :worker_1_port);
citus_add_secondary_node citus_add_secondary_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
1370014 1370013
(1 row) (1 row)
SELECT citus_copy_shard_placement( SELECT citus_copy_shard_placement(
@ -1139,8 +1137,10 @@ select 1 FROM master_add_node('localhost', :worker_2_port);
BEGIN; BEGIN;
DROP TABLE test; DROP TABLE test;
CREATE TABLE test (x int, y int references ref(a)); CREATE TABLE test (x int, y int references ref(a));
SELECT create_distributed_table('test','x');
ERROR: canceling the transaction since it was involved in a distributed deadlock ERROR: canceling the transaction since it was involved in a distributed deadlock
DETAIL: When adding a foreign key from a local table to a reference table, Citus applies a conversion to all the local tables in the foreign key graph
SELECT create_distributed_table('test','x');
ERROR: current transaction is aborted, commands ignored until end of transaction block
END; END;
-- verify the split fails if we still need to replicate reference tables -- verify the split fails if we still need to replicate reference tables
SELECT citus_remove_node('localhost', :worker_2_port); SELECT citus_remove_node('localhost', :worker_2_port);
@ -1158,7 +1158,7 @@ SELECT create_distributed_table('test','x');
SELECT citus_add_node('localhost', :worker_2_port); SELECT citus_add_node('localhost', :worker_2_port);
citus_add_node citus_add_node
--------------------------------------------------------------------- ---------------------------------------------------------------------
1370022 1370020
(1 row) (1 row)
SELECT SELECT
@ -1194,7 +1194,7 @@ errors_received := 0;
RAISE '(%/1) failed to add node', errors_received; RAISE '(%/1) failed to add node', errors_received;
END; END;
$$; $$;
ERROR: (1/1) failed to add node ERROR: (0/1) failed to add node
-- drop unnecassary tables -- drop unnecassary tables
DROP TABLE initially_not_replicated_reference_table; DROP TABLE initially_not_replicated_reference_table;
-- reload pg_dist_shard_placement table -- reload pg_dist_shard_placement table

View File

@ -794,15 +794,33 @@ DEBUG: push down of limit count: 3
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2 DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -819,6 +837,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 12
ERROR: the query contains a join that requires repartitioning ERROR: the query contains a join that requires repartitioning
HINT: Set citus.enable_repartition_joins to on to enable repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning
RESET citus.enable_non_colocated_router_query_pushdown; RESET citus.enable_non_colocated_router_query_pushdown;
@ -1517,15 +1543,33 @@ DEBUG: router planner does not support queries that reference non-colocated dis
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2 DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -1542,6 +1586,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 12
ERROR: the query contains a join that requires repartitioning ERROR: the query contains a join that requires repartitioning
HINT: Set citus.enable_repartition_joins to on to enable repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning
SELECT a.author_id as first_author, b.word_count as second_word_count SELECT a.author_id as first_author, b.word_count as second_word_count
@ -1652,11 +1704,6 @@ DETAIL: A command for a distributed function is run. To make sure subsequent co
SELECT 1 FROM authors_reference r JOIN ( SELECT 1 FROM authors_reference r JOIN (
SELECT s.datid FROM number1() s LEFT JOIN pg_database d ON s.datid = d.oid SELECT s.datid FROM number1() s LEFT JOIN pg_database d ON s.datid = d.oid
) num_db ON (r.id = num_db.datid) LIMIT 1; ) num_db ON (r.id = num_db.datid) LIMIT 1;
DEBUG: found no worker with all shard placements
DEBUG: generating subplan XXX_1 for subquery SELECT datid FROM multi_router_planner.number1() s(datid)
DEBUG: Creating router plan
DEBUG: generating subplan XXX_2 for subquery SELECT s.datid FROM ((SELECT intermediate_result.datid FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) s LEFT JOIN pg_database d ON (((s.datid)::oid OPERATOR(pg_catalog.=) d.oid)))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT 1 FROM (multi_router_planner.authors_reference r JOIN (SELECT intermediate_result.datid FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) num_db ON ((r.id OPERATOR(pg_catalog.=) num_db.datid))) LIMIT 1
DEBUG: Creating router plan DEBUG: Creating router plan
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1666,11 +1713,6 @@ DEBUG: Creating router plan
CREATE VIEW num_db AS CREATE VIEW num_db AS
SELECT s.datid FROM number1() s LEFT JOIN pg_database d ON s.datid = d.oid; SELECT s.datid FROM number1() s LEFT JOIN pg_database d ON s.datid = d.oid;
SELECT 1 FROM authors_reference r JOIN num_db ON (r.id = num_db.datid) LIMIT 1; SELECT 1 FROM authors_reference r JOIN num_db ON (r.id = num_db.datid) LIMIT 1;
DEBUG: found no worker with all shard placements
DEBUG: generating subplan XXX_1 for subquery SELECT datid FROM multi_router_planner.number1() s(datid)
DEBUG: Creating router plan
DEBUG: generating subplan XXX_2 for subquery SELECT s.datid FROM ((SELECT intermediate_result.datid FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) s LEFT JOIN pg_database d ON (((s.datid)::oid OPERATOR(pg_catalog.=) d.oid)))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT 1 FROM (multi_router_planner.authors_reference r JOIN (SELECT intermediate_result.datid FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) num_db ON ((r.id OPERATOR(pg_catalog.=) num_db.datid))) LIMIT 1
DEBUG: Creating router plan DEBUG: Creating router plan
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1679,9 +1721,6 @@ DEBUG: Creating router plan
-- with a CTE in a view -- with a CTE in a view
WITH cte AS MATERIALIZED (SELECT * FROM num_db) WITH cte AS MATERIALIZED (SELECT * FROM num_db)
SELECT 1 FROM authors_reference r JOIN cte ON (r.id = cte.datid) LIMIT 1; SELECT 1 FROM authors_reference r JOIN cte ON (r.id = cte.datid) LIMIT 1;
DEBUG: found no worker with all shard placements
DEBUG: generating subplan XXX_1 for CTE cte: SELECT datid FROM (SELECT s.datid FROM (multi_router_planner.number1() s(datid) LEFT JOIN pg_database d ON (((s.datid)::oid OPERATOR(pg_catalog.=) d.oid)))) num_db
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT 1 FROM (multi_router_planner.authors_reference r JOIN (SELECT intermediate_result.datid FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) cte ON ((r.id OPERATOR(pg_catalog.=) cte.datid))) LIMIT 1
DEBUG: Creating router plan DEBUG: Creating router plan
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -1897,15 +1936,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2 DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -1922,6 +1979,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 30
id | author_id | title | word_count | name | id id | author_id | title | word_count | name | id
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
@ -1935,15 +2000,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5 DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -1960,6 +2043,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 30
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 30
id | author_id | title | word_count | name | id id | author_id | title | word_count | name | id
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
@ -1993,15 +2084,33 @@ DEBUG: router planner does not support queries that reference non-colocated dis
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2 DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -2018,6 +2127,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 30
id | author_id | title | word_count | name | id id | author_id | title | word_count | name | id
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
@ -2030,15 +2147,33 @@ DEBUG: router planner does not support queries that reference non-colocated dis
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2 DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -2055,6 +2190,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 30
id | author_id | title | word_count | name | id id | author_id | title | word_count | name | id
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)

View File

@ -1153,7 +1153,12 @@ SELECT create_reference_table('schema_with_user.test_table');
SET citus.next_shard_id TO 1197000; SET citus.next_shard_id TO 1197000;
-- we do not use run_command_on_coordinator_and_workers here because when there is CASCADE, it causes deadlock -- we do not use run_command_on_coordinator_and_workers here because when there is CASCADE, it causes deadlock
DROP OWNED BY "test-user" CASCADE; DROP OWNED BY "test-user" CASCADE;
NOTICE: drop cascades to table schema_with_user.test_table NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table schema_with_user.test_table
drop cascades to table schema_with_user.test_table_1190039
NOTICE: schema "schema_with_user" does not exist, skipping
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
DROP USER "test-user"; DROP USER "test-user";
DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text); DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text);
-- test run_command_on_* UDFs with schema -- test run_command_on_* UDFs with schema

View File

@ -9,13 +9,6 @@ SET citus.shard_replication_factor TO 1;
CREATE SCHEMA sequence_default; CREATE SCHEMA sequence_default;
SET search_path = sequence_default, public; SET search_path = sequence_default, public;
-- test both distributed and citus local tables -- test both distributed and citus local tables
SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column?
---------------------------------------------------------------------
1
(1 row)
-- Cannot add a column involving DEFAULT nextval('..') because the table is not empty -- Cannot add a column involving DEFAULT nextval('..') because the table is not empty
CREATE SEQUENCE seq_0; CREATE SEQUENCE seq_0;
CREATE SEQUENCE seq_0_local_table; CREATE SEQUENCE seq_0_local_table;
@ -891,10 +884,4 @@ DROP TABLE test_seq_dist;
DROP TABLE sequence_default.seq_test_7_par; DROP TABLE sequence_default.seq_test_7_par;
SET client_min_messages TO error; -- suppress cascading objects dropping SET client_min_messages TO error; -- suppress cascading objects dropping
DROP SCHEMA sequence_default CASCADE; DROP SCHEMA sequence_default CASCADE;
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
SET search_path TO public; SET search_path TO public;

View File

@ -507,15 +507,33 @@ DEBUG: push down of limit count: 3
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2 DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -532,6 +550,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 12
ERROR: the query contains a join that requires repartitioning ERROR: the query contains a join that requires repartitioning
HINT: Set citus.enable_repartition_joins to on to enable repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning
-- but they can be executed via repartition join planner -- but they can be executed via repartition join planner
@ -545,15 +571,33 @@ DEBUG: push down of limit count: 3
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2 DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -570,6 +614,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 12
first_author | second_word_count first_author | second_word_count
--------------------------------------------------------------------- ---------------------------------------------------------------------
10 | 19519 10 | 19519
@ -655,15 +707,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 2 DETAIL: Creating dependency on merge taskId 2
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -680,6 +750,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 8 DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 12 DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 10
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 15
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 18
ERROR: the query contains a join that requires repartitioning ERROR: the query contains a join that requires repartitioning
HINT: Set citus.enable_repartition_joins to on to enable repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning
-- system columns from shard tables can be queried and retrieved -- system columns from shard tables can be queried and retrieved

View File

@ -75,7 +75,7 @@ SELECT citus_table_size('customer_copy_hash'),
citus_table_size('supplier'); citus_table_size('supplier');
citus_table_size | citus_table_size | citus_table_size citus_table_size | citus_table_size | citus_table_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
548864 | 548864 | 442368 548864 | 548864 | 655360
(1 row) (1 row)
CREATE INDEX index_1 on customer_copy_hash(c_custkey); CREATE INDEX index_1 on customer_copy_hash(c_custkey);
@ -104,19 +104,19 @@ VACUUM (FULL) supplier;
SELECT citus_table_size('supplier'); SELECT citus_table_size('supplier');
citus_table_size citus_table_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
376832 565248
(1 row) (1 row)
SELECT citus_relation_size('supplier'); SELECT citus_relation_size('supplier');
citus_relation_size citus_relation_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
376832 565248
(1 row) (1 row)
SELECT citus_total_relation_size('supplier'); SELECT citus_total_relation_size('supplier');
citus_total_relation_size citus_total_relation_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
376832 565248
(1 row) (1 row)
CREATE INDEX index_2 on supplier(s_suppkey); CREATE INDEX index_2 on supplier(s_suppkey);
@ -124,19 +124,19 @@ VACUUM (FULL) supplier;
SELECT citus_table_size('supplier'); SELECT citus_table_size('supplier');
citus_table_size citus_table_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
376832 565248
(1 row) (1 row)
SELECT citus_relation_size('supplier'); SELECT citus_relation_size('supplier');
citus_relation_size citus_relation_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
376832 565248
(1 row) (1 row)
SELECT citus_total_relation_size('supplier'); SELECT citus_total_relation_size('supplier');
citus_total_relation_size citus_total_relation_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
458752 688128
(1 row) (1 row)
-- Test inside the transaction -- Test inside the transaction

View File

@ -78,6 +78,12 @@ SELECT * FROM pg_dist_shard_placement;
DROP EXTENSION citus; DROP EXTENSION citus;
CREATE EXTENSION citus; CREATE EXTENSION citus;
-- re-add the nodes to the cluster -- re-add the nodes to the cluster
SELECT 1 FROM citus_set_coordinator_host('localhost');
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_1_port);
?column? ?column?
--------------------------------------------------------------------- ---------------------------------------------------------------------

View File

@ -986,20 +986,25 @@ SELECT create_distributed_table('test_colocated_table_1', 'id', colocate_with =>
(1 row) (1 row)
CREATE TABLE test_colocated_table_2(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id)); CREATE TABLE test_colocated_table_2(id int PRIMARY KEY, value_1 int);
SELECT create_distributed_table('test_colocated_table_2', 'id', colocate_with => 'test_colocated_table_1'); SELECT create_distributed_table('test_colocated_table_2', 'id', colocate_with => 'test_colocated_table_1');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
CREATE TABLE test_colocated_table_3(id int PRIMARY KEY, value_1 int, FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id), FOREIGN KEY(id) REFERENCES test_colocated_table_1(id), FOREIGN KEY(id) REFERENCES test_colocated_table_2(id)); ALTER TABLE test_colocated_table_2 ADD FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id);
ALTER TABLE test_colocated_table_2 ADD FOREIGN KEY(id) REFERENCES test_colocated_table_1(id);
CREATE TABLE test_colocated_table_3(id int PRIMARY KEY, value_1 int);
SELECT create_distributed_table('test_colocated_table_3', 'id', colocate_with => 'test_colocated_table_1'); SELECT create_distributed_table('test_colocated_table_3', 'id', colocate_with => 'test_colocated_table_1');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
ALTER TABLE test_colocated_table_3 ADD FOREIGN KEY(value_1) REFERENCES test_reference_table_fkey(id);
ALTER TABLE test_colocated_table_3 ADD FOREIGN KEY(id) REFERENCES test_colocated_table_1(id);
ALTER TABLE test_colocated_table_3 ADD FOREIGN KEY(id) REFERENCES test_colocated_table_2(id);
INSERT INTO test_reference_table_fkey SELECT i FROM generate_series (0, 100) i; INSERT INTO test_reference_table_fkey SELECT i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_1 SELECT i, i FROM generate_series (0, 100) i; INSERT INTO test_colocated_table_1 SELECT i, i FROM generate_series (0, 100) i;
INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i; INSERT INTO test_colocated_table_2 SELECT i, i FROM generate_series (0, 100) i;
@ -1159,7 +1164,7 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupId=>0);
SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass; SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'ref_table'::regclass;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
2 3
(1 row) (1 row)
\c - mx_isolation_role_ent - :master_port \c - mx_isolation_role_ent - :master_port

View File

@ -1275,3 +1275,9 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0;
TRUNCATE TABLE pg_catalog.pg_dist_colocation; TRUNCATE TABLE pg_catalog.pg_dist_colocation;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100;
ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id;
SELECT citus_set_coordinator_host('localhost');
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)

View File

@ -1,16 +1,5 @@
-- Tests for prepared transaction recovery -- Tests for prepared transaction recovery
SET citus.next_shard_id TO 1220000; SET citus.next_shard_id TO 1220000;
-- reference tables can have placements on the coordinator. Add it so
-- verify we recover transactions which do DML on coordinator placements
-- properly.
SET client_min_messages TO ERROR;
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
-- enforce 1 connection per placement since -- enforce 1 connection per placement since
-- the tests are prepared for that -- the tests are prepared for that
SET citus.force_max_query_parallelization TO ON; SET citus.force_max_query_parallelization TO ON;
@ -516,9 +505,3 @@ DROP TABLE test_recovery;
DROP TABLE test_recovery_single; DROP TABLE test_recovery_single;
DROP TABLE test_2pcskip; DROP TABLE test_2pcskip;
DROP TABLE test_reference; DROP TABLE test_reference;
SELECT 1 FROM master_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)

View File

@ -374,9 +374,10 @@ ORDER BY
shardid, nodename, nodeport; shardid, nodename, nodeport;
shardid | shardstate | nodename | nodeport shardid | shardstate | nodename | nodeport
--------------------------------------------------------------------- ---------------------------------------------------------------------
1410006 | 1 | localhost | 57636
1410006 | 1 | localhost | 57637 1410006 | 1 | localhost | 57637
1410006 | 1 | localhost | 57638 1410006 | 1 | localhost | 57638
(2 rows) (3 rows)
-- verify table is not dropped -- verify table is not dropped
\dt transactional_drop_reference \dt transactional_drop_reference
@ -670,13 +671,6 @@ ORDER BY
\c - - - :master_port \c - - - :master_port
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;
-- try using the coordinator as a worker and then dropping the table
SELECT 1 FROM master_add_node('localhost', :master_port, groupid := 0);
?column?
---------------------------------------------------------------------
1
(1 row)
CREATE TABLE citus_local (id serial, k int); CREATE TABLE citus_local (id serial, k int);
SELECT create_distributed_table('citus_local', 'id'); SELECT create_distributed_table('citus_local', 'id');
create_distributed_table create_distributed_table
@ -686,12 +680,6 @@ SELECT create_distributed_table('citus_local', 'id');
INSERT INTO citus_local (k) VALUES (2); INSERT INTO citus_local (k) VALUES (2);
DROP TABLE citus_local; DROP TABLE citus_local;
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
-- clean the workspace -- clean the workspace
DROP TABLE transactional_drop_shards, transactional_drop_reference; DROP TABLE transactional_drop_shards, transactional_drop_reference;
-- test DROP TABLE as a non-superuser in a transaction block -- test DROP TABLE as a non-superuser in a transaction block

View File

@ -432,8 +432,12 @@ CREATE TABLE dist(id int, ref_id int REFERENCES ref(id));
INSERT INTO dist SELECT x,x FROM generate_series(1,10000) x; INSERT INTO dist SELECT x,x FROM generate_series(1,10000) x;
-- test that we do not cascade truncates to local referencing tables -- test that we do not cascade truncates to local referencing tables
SELECT truncate_local_data_after_distributing_table('ref'); SELECT truncate_local_data_after_distributing_table('ref');
ERROR: cannot truncate a table referenced in a foreign key constraint by a local table NOTICE: truncate cascades to table "dist"
DETAIL: Table "dist" references "ref" truncate_local_data_after_distributing_table
---------------------------------------------------------------------
(1 row)
-- test that we allow distributing tables that have foreign keys to reference tables -- test that we allow distributing tables that have foreign keys to reference tables
SELECT create_distributed_table('dist','id'); SELECT create_distributed_table('dist','id');
NOTICE: Copying data from local table... NOTICE: Copying data from local table...
@ -465,7 +469,8 @@ SELECT * FROM table_sizes;
--------------------------------------------------------------------- ---------------------------------------------------------------------
dist | f dist | f
ref | f ref | f
(2 rows) ref_1210032 | t
(3 rows)
ROLLBACK; ROLLBACK;
-- the following should truncate dist table only -- the following should truncate dist table only
@ -480,8 +485,9 @@ SELECT * FROM table_sizes;
name | has_data name | has_data
--------------------------------------------------------------------- ---------------------------------------------------------------------
dist | f dist | f
ref | t ref | f
(2 rows) ref_1210032 | t
(3 rows)
ROLLBACK; ROLLBACK;
DROP TABLE ref, dist; DROP TABLE ref, dist;

View File

@ -370,6 +370,8 @@ NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000 NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should propagate to all workers because table is distributed table -- should propagate to all workers because table is distributed table
VACUUM distributed_vacuum_table; VACUUM distributed_vacuum_table;
NOTICE: issuing VACUUM multi_utilities.distributed_vacuum_table_970001 NOTICE: issuing VACUUM multi_utilities.distributed_vacuum_table_970001
@ -382,12 +384,16 @@ NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000 NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- only reference_vacuum_table should propagate -- only reference_vacuum_table should propagate
VACUUM local_vacuum_table, reference_vacuum_table; VACUUM local_vacuum_table, reference_vacuum_table;
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000 NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000 NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- vacuum (disable_page_skipping) aggressively process pages of the relation, it does not respect visibility map -- vacuum (disable_page_skipping) aggressively process pages of the relation, it does not respect visibility map
VACUUM (DISABLE_PAGE_SKIPPING true) local_vacuum_table; VACUUM (DISABLE_PAGE_SKIPPING true) local_vacuum_table;
VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table; VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table;
@ -440,6 +446,8 @@ NOTICE: issuing VACUUM (ANALYZE) multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (ANALYZE) multi_utilities.reference_vacuum_table_970000 NOTICE: issuing VACUUM (ANALYZE) multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (ANALYZE) multi_utilities.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- give enough time for stats to be updated.(updated per 500ms by default) -- give enough time for stats to be updated.(updated per 500ms by default)
select pg_sleep(1); select pg_sleep(1);
pg_sleep pg_sleep
@ -499,6 +507,8 @@ NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002 NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should propagate to all workers because table is distributed table -- should propagate to all workers because table is distributed table
ANALYZE distributed_analyze_table; ANALYZE distributed_analyze_table;
NOTICE: issuing ANALYZE multi_utilities.distributed_analyze_table_970003 NOTICE: issuing ANALYZE multi_utilities.distributed_analyze_table_970003
@ -511,12 +521,16 @@ NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002 NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- only reference_analyze_table should propagate -- only reference_analyze_table should propagate
ANALYZE local_analyze_table, reference_analyze_table; ANALYZE local_analyze_table, reference_analyze_table;
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002 NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002 NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE multi_utilities.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should not propagate because ddl propagation is disabled -- should not propagate because ddl propagation is disabled
SET citus.enable_ddl_propagation TO OFF; SET citus.enable_ddl_propagation TO OFF;
ANALYZE distributed_analyze_table; ANALYZE distributed_analyze_table;

View File

@ -404,13 +404,6 @@ where val = 'asdf';
3 3
(1 row) (1 row)
SELECT 1 FROM citus_add_node('localhost', :master_port, groupId => 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column?
---------------------------------------------------------------------
1
(1 row)
BEGIN; BEGIN;
CREATE TABLE generated_stored_col_test (x int, y int generated always as (x+1) stored); CREATE TABLE generated_stored_col_test (x int, y int generated always as (x+1) stored);
SELECT citus_add_local_table_to_metadata('generated_stored_col_test'); SELECT citus_add_local_table_to_metadata('generated_stored_col_test');
@ -639,12 +632,6 @@ NOTICE: renaming the new table to test_pg12.generated_stored_ref
(4 rows) (4 rows)
ROLLBACK; ROLLBACK;
SELECT citus_remove_node('localhost', :master_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)
CREATE TABLE superuser_columnar_table (a int) USING columnar; CREATE TABLE superuser_columnar_table (a int) USING columnar;
CREATE USER read_access; CREATE USER read_access;
SET ROLE read_access; SET ROLE read_access;

View File

@ -1332,12 +1332,6 @@ set client_min_messages to error;
drop schema pg14 cascade; drop schema pg14 cascade;
create schema pg14; create schema pg14;
set search_path to pg14; set search_path to pg14;
select 1 from citus_add_node('localhost',:master_port,groupid=>0);
?column?
---------------------------------------------------------------------
1
(1 row)
-- test adding foreign table to metadata with the guc -- test adding foreign table to metadata with the guc
-- will test truncating foreign tables later -- will test truncating foreign tables later
CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial); CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial);
@ -1505,9 +1499,3 @@ set client_min_messages to error;
drop extension postgres_fdw cascade; drop extension postgres_fdw cascade;
drop schema pg14 cascade; drop schema pg14 cascade;
reset client_min_messages; reset client_min_messages;
select 1 from citus_remove_node('localhost',:master_port);
?column?
---------------------------------------------------------------------
1
(1 row)

View File

@ -218,6 +218,9 @@ BEGIN;
NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to column col_3 of table generated_stored_ref DETAIL: drop cascades to column col_3 of table generated_stored_ref
drop cascades to column col_5 of table generated_stored_ref drop cascades to column col_5 of table generated_stored_ref
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to column col_3 of table generated_stored_ref_960016
drop cascades to column col_5 of table generated_stored_ref_960016
ALTER TABLE generated_stored_ref DROP COLUMN col_4; ALTER TABLE generated_stored_ref DROP COLUMN col_4;
-- show that undistribute_table works fine -- show that undistribute_table works fine
SELECT undistribute_table('generated_stored_ref'); SELECT undistribute_table('generated_stored_ref');
@ -269,15 +272,6 @@ CREATE TABLE tbl2
-- on local tables works fine -- on local tables works fine
MERGE INTO tbl1 USING tbl2 ON (true) MERGE INTO tbl1 USING tbl2 ON (true)
WHEN MATCHED THEN DELETE; WHEN MATCHED THEN DELETE;
-- add coordinator node as a worker
SET client_min_messages to ERROR;
SELECT 1 FROM master_add_node('localhost', :master_port, groupId => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
RESET client_min_messages;
-- one table is Citus local table, fails -- one table is Citus local table, fails
SELECT citus_add_local_table_to_metadata('tbl1'); SELECT citus_add_local_table_to_metadata('tbl1');
citus_add_local_table_to_metadata citus_add_local_table_to_metadata
@ -398,12 +392,6 @@ SET search_path TO pg15;
SET client_min_messages to ERROR; SET client_min_messages to ERROR;
DROP TABLE FKTABLE_local, PKTABLE_local; DROP TABLE FKTABLE_local, PKTABLE_local;
RESET client_min_messages; RESET client_min_messages;
SELECT 1 FROM citus_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
SELECT create_distributed_table('tbl1', 'x'); SELECT create_distributed_table('tbl1', 'x');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -880,8 +868,8 @@ SELECT create_reference_table('FKTABLE');
SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid; SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid;
pg_get_constraintdef pg_get_constraintdef
--------------------------------------------------------------------- ---------------------------------------------------------------------
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default) FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default)
FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null)
(2 rows) (2 rows)
\c - - - :worker_1_port \c - - - :worker_1_port
@ -1274,6 +1262,7 @@ SELECT create_reference_table('set_on_default_test_referenced');
(1 row) (1 row)
-- should error since col_3 defaults to a sequence
CREATE TABLE set_on_default_test_referencing( CREATE TABLE set_on_default_test_referencing(
col_1 int, col_2 int, col_3 serial, col_4 int, col_1 int, col_2 int, col_3 serial, col_4 int,
FOREIGN KEY(col_1, col_3) FOREIGN KEY(col_1, col_3)
@ -1281,10 +1270,7 @@ CREATE TABLE set_on_default_test_referencing(
ON DELETE SET DEFAULT (col_1) ON DELETE SET DEFAULT (col_1)
ON UPDATE SET DEFAULT ON UPDATE SET DEFAULT
); );
-- should error since col_3 defaults to a sequence
SELECT create_reference_table('set_on_default_test_referencing');
ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences ERROR: cannot create foreign key constraint since Citus does not support ON DELETE / UPDATE SET DEFAULT actions on the columns that default to sequences
DROP TABLE set_on_default_test_referencing;
CREATE TABLE set_on_default_test_referencing( CREATE TABLE set_on_default_test_referencing(
col_1 int, col_2 int, col_3 serial, col_4 int, col_1 int, col_2 int, col_3 serial, col_4 int,
FOREIGN KEY(col_1, col_3) FOREIGN KEY(col_1, col_3)
@ -1447,12 +1433,6 @@ NOTICE: renaming the new table to pg15.foreign_table_test
(1 row) (1 row)
SELECT 1 FROM citus_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)
DROP SERVER foreign_server CASCADE; DROP SERVER foreign_server CASCADE;
NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects
-- PG15 now supports specifying oid on CREATE DATABASE -- PG15 now supports specifying oid on CREATE DATABASE

View File

@ -15,13 +15,6 @@ SET search_path TO pgmerge_schema;
SET citus.use_citus_managed_tables to true; SET citus.use_citus_managed_tables to true;
\set SHOW_CONTEXT errors \set SHOW_CONTEXT errors
SET citus.next_shard_id TO 4001000; SET citus.next_shard_id TO 4001000;
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column?
---------------------------------------------------------------------
1
(1 row)
CREATE USER regress_merge_privs; CREATE USER regress_merge_privs;
CREATE USER regress_merge_no_privs; CREATE USER regress_merge_no_privs;
DROP TABLE IF EXISTS target; DROP TABLE IF EXISTS target;
@ -2133,9 +2126,3 @@ drop cascades to table source2
drop cascades to function merge_trigfunc() drop cascades to function merge_trigfunc()
DROP USER regress_merge_privs; DROP USER regress_merge_privs;
DROP USER regress_merge_no_privs; DROP USER regress_merge_no_privs;
SELECT 1 FROM master_remove_node('localhost', :master_port);
?column?
---------------------------------------------------------------------
1
(1 row)

View File

@ -30,13 +30,6 @@ CREATE FOREIGN TABLE foreign_table (
) )
SERVER foreign_server_dependent_schema SERVER foreign_server_dependent_schema
OPTIONS (schema_name 'test_dependent_schema', table_name 'foreign_table_test'); OPTIONS (schema_name 'test_dependent_schema', table_name 'foreign_table_test');
SELECT 1 FROM citus_add_node('localhost', :master_port, groupId=>0);
NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata
?column?
---------------------------------------------------------------------
1
(1 row)
-- verify that the aggregate is propagated to the new node -- verify that the aggregate is propagated to the new node
SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid::text like '%propagate_foreign_server.array_agg%';$$); SELECT run_command_on_workers($$select aggfnoid from pg_aggregate where aggfnoid::text like '%propagate_foreign_server.array_agg%';$$);
run_command_on_workers run_command_on_workers

View File

@ -2,13 +2,6 @@ CREATE SCHEMA publication;
CREATE SCHEMA "publication-1"; CREATE SCHEMA "publication-1";
SET search_path TO publication; SET search_path TO publication;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
-- for citus_add_local_table_to_metadata / create_distributed_table_concurrently
SELECT citus_set_coordinator_host('localhost', :master_port);
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)
CREATE OR REPLACE FUNCTION activate_node_snapshot() CREATE OR REPLACE FUNCTION activate_node_snapshot()
RETURNS text[] RETURNS text[]
LANGUAGE C STRICT LANGUAGE C STRICT
@ -264,7 +257,6 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE; DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA "publication-1" CASCADE;
SELECT citus_remove_node('localhost', :master_port);
\q \q
\endif \endif
-- recreate a mixed publication -- recreate a mixed publication
@ -371,9 +363,3 @@ DROP PUBLICATION pubpartitioned;
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE; DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA "publication-1" CASCADE;
SELECT citus_remove_node('localhost', :master_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)

View File

@ -2,13 +2,6 @@ CREATE SCHEMA publication;
CREATE SCHEMA "publication-1"; CREATE SCHEMA "publication-1";
SET search_path TO publication; SET search_path TO publication;
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
-- for citus_add_local_table_to_metadata / create_distributed_table_concurrently
SELECT citus_set_coordinator_host('localhost', :master_port);
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)
CREATE OR REPLACE FUNCTION activate_node_snapshot() CREATE OR REPLACE FUNCTION activate_node_snapshot()
RETURNS text[] RETURNS text[]
LANGUAGE C STRICT LANGUAGE C STRICT
@ -264,10 +257,4 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP SCHEMA publication CASCADE; DROP SCHEMA publication CASCADE;
DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA "publication-1" CASCADE;
SELECT citus_remove_node('localhost', :master_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)
\q \q

View File

@ -2,13 +2,6 @@ CREATE SCHEMA query_single_shard_table;
SET search_path TO query_single_shard_table; SET search_path TO query_single_shard_table;
SET citus.next_shard_id TO 1620000; SET citus.next_shard_id TO 1620000;
SET citus.shard_count TO 32; SET citus.shard_count TO 32;
SET client_min_messages TO WARNING;
SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
SET client_min_messages TO NOTICE; SET client_min_messages TO NOTICE;
CREATE TABLE nullkey_c1_t1(a int, b int); CREATE TABLE nullkey_c1_t1(a int, b int);
CREATE TABLE nullkey_c1_t2(a int, b int); CREATE TABLE nullkey_c1_t2(a int, b int);
@ -1879,9 +1872,3 @@ DEBUG: Creating router plan
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP SCHEMA query_single_shard_table CASCADE; DROP SCHEMA query_single_shard_table CASCADE;
SELECT citus_remove_node('localhost', :master_port);
citus_remove_node
---------------------------------------------------------------------
(1 row)

View File

@ -2,14 +2,6 @@ CREATE SCHEMA recurring_outer_join;
SET search_path TO recurring_outer_join; SET search_path TO recurring_outer_join;
SET citus.next_shard_id TO 1520000; SET citus.next_shard_id TO 1520000;
SET citus.shard_count TO 32; SET citus.shard_count TO 32;
-- idempotently add node to allow this test to run without add_coordinator
SET client_min_messages TO WARNING;
SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0);
?column?
---------------------------------------------------------------------
1
(1 row)
SET client_min_messages TO DEBUG1; SET client_min_messages TO DEBUG1;
CREATE TABLE dist_1 (a int, b int); CREATE TABLE dist_1 (a int, b int);
SELECT create_distributed_table('dist_1', 'a'); SELECT create_distributed_table('dist_1', 'a');
@ -2012,9 +2004,3 @@ DEBUG: performing repartitioned INSERT ... SELECT
ROLLBACK; ROLLBACK;
SET client_min_messages TO ERROR; SET client_min_messages TO ERROR;
DROP SCHEMA recurring_outer_join CASCADE; DROP SCHEMA recurring_outer_join CASCADE;
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)

View File

@ -71,10 +71,6 @@ UPDATE distributed_table SET dept = foo.max_dept FROM
) as foo WHERE foo.max_dept >= dept and tenant_id = '8'; ) as foo WHERE foo.max_dept >= dept and tenant_id = '8';
DEBUG: generating subplan XXX_1 for subquery SELECT max(dept) AS max_dept FROM (SELECT DISTINCT distributed_table_1.tenant_id, distributed_table_1.dept FROM recursive_dml_with_different_planner_executors.distributed_table distributed_table_1) distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT second_distributed_table.tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) DEBUG: generating subplan XXX_1 for subquery SELECT max(dept) AS max_dept FROM (SELECT DISTINCT distributed_table_1.tenant_id, distributed_table_1.dept FROM recursive_dml_with_different_planner_executors.distributed_table distributed_table_1) distributed_table WHERE (tenant_id OPERATOR(pg_catalog.=) ANY (SELECT second_distributed_table.tenant_id FROM recursive_dml_with_different_planner_executors.second_distributed_table WHERE (second_distributed_table.dept OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))))
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.max_dept FROM (SELECT intermediate_result.max_dept FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max_dept integer)) foo WHERE ((foo.max_dept OPERATOR(pg_catalog.>=) distributed_table.dept) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) '8'::text)) DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE recursive_dml_with_different_planner_executors.distributed_table SET dept = foo.max_dept FROM (SELECT intermediate_result.max_dept FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(max_dept integer)) foo WHERE ((foo.max_dept OPERATOR(pg_catalog.>=) distributed_table.dept) AND (distributed_table.tenant_id OPERATOR(pg_catalog.=) '8'::text))
RESET client_min_messages; SET client_min_messages TO WARNING;
DROP SCHEMA recursive_dml_with_different_planner_executors CASCADE; DROP SCHEMA recursive_dml_with_different_planner_executors CASCADE;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table distributed_table
drop cascades to table second_distributed_table
drop cascades to table reference_table
SET search_path TO public; SET search_path TO public;

View File

@ -491,18 +491,14 @@ SELECT MAX(x) FROM (
UNION ALL UNION ALL
SELECT 1 as x FROM (SELECT 1 FROM tbl1, tbl2 WHERE b > 0) AS s1 WHERE false SELECT 1 as x FROM (SELECT 1 FROM tbl1, tbl2 WHERE b > 0) AS s1 WHERE false
) as res; ) as res;
DEBUG: Wrapping relation "tbl2" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT b FROM push_down_filters.tbl2 WHERE (b OPERATOR(pg_catalog.>) 0)
DEBUG: Wrapping relation "tbl2" to a subquery
DEBUG: generating subplan XXX_2 for subquery SELECT b FROM push_down_filters.tbl2 WHERE false
DEBUG: generating subplan XXX_3 for subquery SELECT 1 AS x FROM (SELECT 1 FROM push_down_filters.tbl1, (SELECT tbl2_1.b FROM (SELECT intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(b integer)) tbl2_1) tbl2 WHERE (tbl2.b OPERATOR(pg_catalog.>) 0)) s1("?column?") WHERE true UNION ALL SELECT 1 AS x FROM (SELECT 1 FROM push_down_filters.tbl1, (SELECT tbl2_1.b FROM (SELECT intermediate_result.b FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(b integer)) tbl2_1) tbl2 WHERE (tbl2.b OPERATOR(pg_catalog.>) 0)) s1("?column?") WHERE false
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT max(x) AS max FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) res
max max
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
(1 row) (1 row)
DROP TABLE tbl1, tbl2; DROP TABLE tbl1, tbl2;
CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false)"
PL/pgSQL function citus_drop_trigger() line XX at PERFORM
CREATE table tbl2(a int, b int, d int); CREATE table tbl2(a int, b int, d int);
CREATE table tbl1(a int, b int, c int); CREATE table tbl1(a int, b int, c int);
INSERT INTO tbl1 VALUES (1,1,1); INSERT INTO tbl1 VALUES (1,1,1);
@ -563,12 +559,6 @@ SELECT 1 as x FROM (SELECT 1 FROM tbl1, tbl2 WHERE tbl2.b > 0) AS s1 WHERE true
UNION ALL UNION ALL
SELECT 1 as x FROM (SELECT 1 FROM tbl1, tbl2 WHERE tbl2.b > 0) AS s1 WHERE false SELECT 1 as x FROM (SELECT 1 FROM tbl1, tbl2 WHERE tbl2.b > 0) AS s1 WHERE false
) as res; ) as res;
DEBUG: Wrapping relation "tbl2" to a subquery
DEBUG: generating subplan XXX_1 for subquery SELECT b FROM push_down_filters.tbl2 WHERE (b OPERATOR(pg_catalog.>) 0)
DEBUG: Wrapping relation "tbl2" to a subquery
DEBUG: generating subplan XXX_2 for subquery SELECT b FROM push_down_filters.tbl2 WHERE false
DEBUG: generating subplan XXX_3 for subquery SELECT 1 AS x FROM (SELECT 1 FROM push_down_filters.tbl1, (SELECT NULL::integer AS a, tbl2_1.b, NULL::integer AS d FROM (SELECT intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(b integer)) tbl2_1) tbl2 WHERE (tbl2.b OPERATOR(pg_catalog.>) 0)) s1("?column?") WHERE true UNION ALL SELECT 1 AS x FROM (SELECT 1 FROM push_down_filters.tbl1, (SELECT NULL::integer AS a, tbl2_1.b, NULL::integer AS d FROM (SELECT intermediate_result.b FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(b integer)) tbl2_1) tbl2 WHERE (tbl2.b OPERATOR(pg_catalog.>) 0)) s1("?column?") WHERE false
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT max(x) AS max FROM (SELECT intermediate_result.x FROM read_intermediate_result('XXX_3'::text, 'binary'::citus_copy_format) intermediate_result(x integer)) res
max max
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 1
@ -577,4 +567,4 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT max(x) AS max
\set VERBOSITY terse \set VERBOSITY terse
RESET client_min_messages; RESET client_min_messages;
DROP SCHEMA push_down_filters CASCADE; DROP SCHEMA push_down_filters CASCADE;
NOTICE: drop cascades to 7 other objects NOTICE: drop cascades to 8 other objects

View File

@ -152,8 +152,11 @@ SELECT ref_table.* FROM ref_table JOIN (SELECT * FROM recursive_defined_non_recu
(3 rows) (3 rows)
SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM local_table l WHERE l.a = ref_table.a); SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM local_table l WHERE l.a = ref_table.a);
ERROR: direct joins between distributed and local tables are not supported a | b
HINT: Use CTE's or subqueries to select from local tables and use them in joins ---------------------------------------------------------------------
1 | 1
(1 row)
SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM local_table l WHERE l.a = ref_table.a) AND false; SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM local_table l WHERE l.a = ref_table.a) AND false;
a | b a | b
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -196,9 +199,5 @@ SELECT ref_table.* FROM ref_table WHERE EXISTS (SELECT * FROM recursive_defined_
--------------------------------------------------------------------- ---------------------------------------------------------------------
(0 rows) (0 rows)
SET client_min_messages TO WARNING;
DROP SCHEMA postgres_local_table CASCADE; DROP SCHEMA postgres_local_table CASCADE;
NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table local_table
drop cascades to view recursive_view
drop cascades to view recursive_defined_non_recursive_view
drop cascades to table ref_table

View File

@ -1020,20 +1020,6 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
(1 row) (1 row)
COMMIT; COMMIT;
SET client_min_messages TO WARNING;
SET search_path TO 'public'; SET search_path TO 'public';
DROP SCHEMA access_tracking CASCADE; DROP SCHEMA access_tracking CASCADE;
NOTICE: drop cascades to 14 other objects
DETAIL: drop cascades to function access_tracking.relation_select_access_mode(oid)
drop cascades to function access_tracking.relation_dml_access_mode(oid)
drop cascades to function access_tracking.relation_ddl_access_mode(oid)
drop cascades to function access_tracking.distributed_relation(text)
drop cascades to function access_tracking.relation_access_mode_to_text(text,integer)
drop cascades to view access_tracking.relation_accesses
drop cascades to table access_tracking.table_1
drop cascades to table access_tracking.table_2
drop cascades to table access_tracking.table_4
drop cascades to table access_tracking.table_5
drop cascades to table access_tracking.table_6
drop cascades to table access_tracking.table_7
drop cascades to table access_tracking.partitioning_test
drop cascades to table access_tracking.table_3

View File

@ -5,3 +5,10 @@ SELECT master_remove_node('localhost', :master_port);
(1 row) (1 row)
-- restore coordinator for the rest of the tests
SELECT citus_set_coordinator_host('localhost', :master_port);
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)

View File

@ -0,0 +1,6 @@
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)

View File

@ -38,7 +38,7 @@ SELECT count(*) FROM pg_dist_placement p JOIN pg_dist_node n USING(groupid)
AND p.shardid IN (101500, 101501, 101502); AND p.shardid IN (101500, 101501, 101502);
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
3 4
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
@ -47,7 +47,7 @@ SELECT count(*) FROM pg_dist_placement p JOIN pg_dist_node n USING(groupid)
AND p.shardid IN (101500, 101501, 101502); AND p.shardid IN (101500, 101501, 101502);
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
3 4
(1 row) (1 row)
SET search_path TO disable_node_with_replicated_tables; SET search_path TO disable_node_with_replicated_tables;

View File

@ -1,5 +1,11 @@
CREATE SCHEMA run_command_on_all_nodes; CREATE SCHEMA run_command_on_all_nodes;
SET search_path TO run_command_on_all_nodes; SET search_path TO run_command_on_all_nodes;
SELECT master_remove_node('localhost', :master_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
-- check coordinator isn't in metadata -- check coordinator isn't in metadata
SELECT count(*) != 0 AS "Coordinator is in Metadata" SELECT count(*) != 0 AS "Coordinator is in Metadata"
FROM pg_dist_node FROM pg_dist_node
@ -205,3 +211,9 @@ DROP SCHEMA run_command_on_all_nodes CASCADE;
NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table run_command_on_all_nodes.tbl DETAIL: drop cascades to table run_command_on_all_nodes.tbl
drop cascades to table run_command_on_all_nodes.test drop cascades to table run_command_on_all_nodes.test
SELECT citus_set_coordinator_host('localhost');
citus_set_coordinator_host
---------------------------------------------------------------------
(1 row)

View File

@ -22,7 +22,7 @@ $$
DECLARE DECLARE
result bool; result bool;
BEGIN BEGIN
SELECT tx_count = worker_count FROM (SELECT count(*) as tx_count FROM pg_dist_transaction WHERE gid LIKE 'citus_%_' || pg_backend_pid() || '%_%') as s1, (SELECT count(*) as worker_count FROM pg_dist_node WHERE noderole = 'primary') as s2 INTO result; SELECT tx_count = worker_count FROM (SELECT count(*) as tx_count FROM pg_dist_transaction WHERE gid LIKE 'citus_%_' || pg_backend_pid() || '%_%') as s1, (SELECT count(*) as worker_count FROM pg_dist_node WHERE noderole = 'primary' AND groupid <> 0 ) as s2 INTO result;
RETURN result; RETURN result;
END; END;
$$ $$
@ -669,13 +669,14 @@ HINT: To remove the local data, run: SELECT truncate_local_data_after_distribut
ABORT; ABORT;
SET search_path TO 'public'; SET search_path TO 'public';
DROP SCHEMA test_seq_ddl CASCADE; DROP SCHEMA test_seq_ddl CASCADE;
NOTICE: drop cascades to 11 other objects NOTICE: drop cascades to 12 other objects
DETAIL: drop cascades to function test_seq_ddl.distributed_2pcs_are_equal_to_worker_count() DETAIL: drop cascades to function test_seq_ddl.distributed_2pcs_are_equal_to_worker_count()
drop cascades to function test_seq_ddl.distributed_2pcs_are_equal_to_placement_count() drop cascades to function test_seq_ddl.distributed_2pcs_are_equal_to_placement_count()
drop cascades to function test_seq_ddl.no_distributed_2pcs() drop cascades to function test_seq_ddl.no_distributed_2pcs()
drop cascades to function test_seq_ddl.set_local_multi_shard_modify_mode_to_sequential() drop cascades to function test_seq_ddl.set_local_multi_shard_modify_mode_to_sequential()
drop cascades to table test_seq_ddl.test_table drop cascades to table test_seq_ddl.test_table
drop cascades to table test_seq_ddl.ref_test drop cascades to table test_seq_ddl.ref_test
drop cascades to table test_seq_ddl.ref_test_16004
drop cascades to table test_seq_ddl.test_table_rep_2 drop cascades to table test_seq_ddl.test_table_rep_2
drop cascades to table test_seq_ddl.test_seq_truncate drop cascades to table test_seq_ddl.test_seq_truncate
drop cascades to table test_seq_ddl.test_seq_truncate_rep_2 drop cascades to table test_seq_ddl.test_seq_truncate_rep_2

View File

@ -321,15 +321,33 @@ DEBUG: push down of limit count: 2
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5 DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -346,6 +364,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 30
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 30
DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_set_local.test t1, recursive_set_local.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 2 DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_set_local.test t1, recursive_set_local.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 2
DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_set_local.local_test DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_set_local.local_test
DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries
@ -360,9 +386,5 @@ DEBUG: Creating router plan
1 1
(2 rows) (2 rows)
RESET client_min_messages; SET client_min_messages TO WARNING;
DROP SCHEMA recursive_set_local CASCADE; DROP SCHEMA recursive_set_local CASCADE;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table test
drop cascades to table ref
drop cascades to table local_test

View File

@ -916,15 +916,33 @@ DEBUG: push down of limit count: 0
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5 DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -941,6 +959,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 30
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 30
DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 0 DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) LIMIT 0
DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_union.test DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_union.test
@ -957,15 +983,33 @@ DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 5 DETAIL: Creating dependency on merge taskId 5
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -982,6 +1026,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 30
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 30
DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y) DEBUG: generating subplan XXX_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x OPERATOR(pg_catalog.=) t2.y)
DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_union.test DEBUG: generating subplan XXX_2 for subquery SELECT x FROM recursive_union.test
@ -1098,12 +1150,5 @@ DEBUG: Creating router plan
2 | 2 2 | 2
(2 rows) (2 rows)
RESET client_min_messages; SET client_min_messages TO WARNING;
DROP SCHEMA recursive_union CASCADE; DROP SCHEMA recursive_union CASCADE;
NOTICE: drop cascades to 6 other objects
DETAIL: drop cascades to table test
drop cascades to table ref
drop cascades to table test_not_colocated
drop cascades to view set_view_recursive
drop cascades to view set_view_pushdown
drop cascades to view set_view_recursive_second

View File

@ -472,12 +472,13 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
(1 row) (1 row)
DROP SCHEMA "shard Move Fkeys Indexes" CASCADE; DROP SCHEMA "shard Move Fkeys Indexes" CASCADE;
NOTICE: drop cascades to 7 other objects NOTICE: drop cascades to 8 other objects
DETAIL: drop cascades to extension btree_gist DETAIL: drop cascades to extension btree_gist
drop cascades to table "shard Move Fkeys Indexes".sensors drop cascades to table "shard Move Fkeys Indexes".sensors
drop cascades to table "shard Move Fkeys Indexes".colocated_dist_table drop cascades to table "shard Move Fkeys Indexes".colocated_dist_table
drop cascades to table "shard Move Fkeys Indexes".colocated_partitioned_table drop cascades to table "shard Move Fkeys Indexes".colocated_partitioned_table
drop cascades to table "shard Move Fkeys Indexes".reference_table drop cascades to table "shard Move Fkeys Indexes".reference_table
drop cascades to table "shard Move Fkeys Indexes".reference_table_8970028
drop cascades to table "shard Move Fkeys Indexes".index_backed_rep_identity drop cascades to table "shard Move Fkeys Indexes".index_backed_rep_identity
drop cascades to table "shard Move Fkeys Indexes".multiple_unique_keys drop cascades to table "shard Move Fkeys Indexes".multiple_unique_keys
DROP ROLE mx_rebalancer_role_ent; DROP ROLE mx_rebalancer_role_ent;

View File

@ -358,10 +358,11 @@ ALTER TABLE sensors_2020_01_01 DROP CONSTRAINT fkey_from_child_to_child;
-- cleanup -- cleanup
\c - postgres - :master_port \c - postgres - :master_port
DROP SCHEMA "blocking shard Move Fkeys Indexes" CASCADE; DROP SCHEMA "blocking shard Move Fkeys Indexes" CASCADE;
NOTICE: drop cascades to 5 other objects NOTICE: drop cascades to 6 other objects
DETAIL: drop cascades to table "blocking shard Move Fkeys Indexes".sensors DETAIL: drop cascades to table "blocking shard Move Fkeys Indexes".sensors
drop cascades to table "blocking shard Move Fkeys Indexes".colocated_dist_table drop cascades to table "blocking shard Move Fkeys Indexes".colocated_dist_table
drop cascades to table "blocking shard Move Fkeys Indexes".colocated_partitioned_table drop cascades to table "blocking shard Move Fkeys Indexes".colocated_partitioned_table
drop cascades to table "blocking shard Move Fkeys Indexes".reference_table drop cascades to table "blocking shard Move Fkeys Indexes".reference_table
drop cascades to table "blocking shard Move Fkeys Indexes".reference_table_8970028
drop cascades to table "blocking shard Move Fkeys Indexes".index_backed_rep_identity drop cascades to table "blocking shard Move Fkeys Indexes".index_backed_rep_identity
DROP ROLE mx_rebalancer_blocking_role_ent; DROP ROLE mx_rebalancer_blocking_role_ent;

View File

@ -196,15 +196,33 @@ DETAIL: Creating dependency on merge taskId 20
DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 0 and 4
DEBUG: join prunable for task partitionId 0 and 5
DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 1 and 4
DEBUG: join prunable for task partitionId 1 and 5
DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 2 and 4
DEBUG: join prunable for task partitionId 2 and 5
DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2 DEBUG: join prunable for task partitionId 3 and 2
DEBUG: join prunable for task partitionId 3 and 4
DEBUG: join prunable for task partitionId 3 and 5
DEBUG: join prunable for task partitionId 4 and 0
DEBUG: join prunable for task partitionId 4 and 1
DEBUG: join prunable for task partitionId 4 and 2
DEBUG: join prunable for task partitionId 4 and 3
DEBUG: join prunable for task partitionId 4 and 5
DEBUG: join prunable for task partitionId 5 and 0
DEBUG: join prunable for task partitionId 5 and 1
DEBUG: join prunable for task partitionId 5 and 2
DEBUG: join prunable for task partitionId 5 and 3
DEBUG: join prunable for task partitionId 5 and 4
DEBUG: pruning merge fetch taskId 1 DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 9 DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 2 DEBUG: pruning merge fetch taskId 2
@ -221,6 +239,14 @@ DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 24 DETAIL: Creating dependency on merge taskId 24
DEBUG: pruning merge fetch taskId 11 DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 20 DETAIL: Creating dependency on merge taskId 20
DEBUG: pruning merge fetch taskId 13
DETAIL: Creating dependency on merge taskId 29
DEBUG: pruning merge fetch taskId 14
DETAIL: Creating dependency on merge taskId 25
DEBUG: pruning merge fetch taskId 16
DETAIL: Creating dependency on merge taskId 34
DEBUG: pruning merge fetch taskId 17
DETAIL: Creating dependency on merge taskId 30
ERROR: the query contains a join that requires repartitioning ERROR: the query contains a join that requires repartitioning
HINT: Set citus.enable_repartition_joins to on to enable repartitioning HINT: Set citus.enable_repartition_joins to on to enable repartitioning
-- single hash repartitioning is not supported between different column types -- single hash repartitioning is not supported between different column types

View File

@ -718,15 +718,6 @@ END;
$$; $$;
ERROR: (3/3) failed to execute one of the tasks ERROR: (3/3) failed to execute one of the tasks
CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE
SET client_min_messages TO DEFAULT; SET client_min_messages TO WARNING;
DROP SCHEMA subquery_and_ctes CASCADE; DROP SCHEMA subquery_and_ctes CASCADE;
NOTICE: drop cascades to 8 other objects
DETAIL: drop cascades to table users_table
drop cascades to table events_table
drop cascades to table users_table_local
drop cascades to table dist_table
drop cascades to function func()
drop cascades to table ref_table_1
drop cascades to table ref_table_2
drop cascades to table dist
SET search_path TO public; SET search_path TO public;

View File

@ -224,7 +224,5 @@ SELECT count(*) FROM append_table WHERE extra = 1;
UPDATE append_table a sET extra = 1 FROM append_table b WHERE a.key = b.key; UPDATE append_table a sET extra = 1 FROM append_table b WHERE a.key = b.key;
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
END; END;
SET client_min_messages TO WARNING;
DROP SCHEMA subquery_append CASCADE; DROP SCHEMA subquery_append CASCADE;
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table append_table
drop cascades to table ref_table

Some files were not shown because too many files have changed in this diff Show More