From f74447b3b7f024ecc88294a0fff923d056692b00 Mon Sep 17 00:00:00 2001 From: Halil Ozan Akgul Date: Tue, 9 Mar 2021 10:43:43 +0300 Subject: [PATCH] Creates new colocation for colocate_with:='none' too --- .../commands/create_distributed_table.c | 4 +- .../commands/drop_distributed_table.c | 5 + .../distributed/metadata/metadata_cache.c | 39 +++++ src/include/distributed/metadata_cache.h | 1 + .../expected/drop_partitioned_table.out | 4 + ...ure_create_distributed_table_non_empty.out | 13 +- .../regress/expected/failure_create_table.out | 25 ++- .../regress/expected/function_propagation.out | 9 +- ...lation_ensure_dependency_activate_node.out | 64 ++++++- ..._ref_update_delete_upsert_vs_all_on_mx.out | 34 +--- .../expected/multi_colocation_utils.out | 163 ++++++++++++++---- .../multi_fix_partition_shard_index_names.out | 1 + .../regress/expected/multi_metadata_sync.out | 13 +- .../multi_mx_alter_distributed_table.out | 30 ++-- .../expected/multi_mx_create_table.out | 47 ++--- .../expected/start_stop_metadata_sync.out | 13 +- ...ation_ensure_dependency_activate_node.spec | 16 +- ...ref_update_delete_upsert_vs_all_on_mx.spec | 2 +- src/test/regress/sql/function_propagation.sql | 1 + .../regress/sql/multi_colocation_utils.sql | 39 ++++- .../multi_fix_partition_shard_index_names.sql | 2 + src/test/regress/sql/multi_metadata_sync.sql | 1 + .../regress/sql/multi_mx_create_table.sql | 1 + .../regress/sql/start_stop_metadata_sync.sql | 1 + 24 files changed, 384 insertions(+), 144 deletions(-) diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 26a905f23..62bcbb14b 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -959,7 +959,9 @@ ColocationIdForNewTable(Oid relationId, Var *distributionColumn, } else if (IsColocateWithNone(colocateWithTableName)) { - colocationId = GetNextColocationId(); + colocationId = CreateColocationGroup(shardCount, ShardReplicationFactor, + distributionColumnType, + distributionColumnCollation); createdColocationGroup = true; } diff --git a/src/backend/distributed/commands/drop_distributed_table.c b/src/backend/distributed/commands/drop_distributed_table.c index d4718aab8..1e214adf3 100644 --- a/src/backend/distributed/commands/drop_distributed_table.c +++ b/src/backend/distributed/commands/drop_distributed_table.c @@ -12,6 +12,7 @@ #include "miscadmin.h" +#include "distributed/colocation_utils.h" #include "distributed/commands/utility_hook.h" #include "distributed/commands.h" #include "distributed/metadata_utility.h" @@ -70,6 +71,8 @@ master_remove_partition_metadata(PG_FUNCTION_ARGS) char *schemaName = text_to_cstring(schemaNameText); char *tableName = text_to_cstring(tableNameText); + uint32 colocationId = ColocationIdViaCatalog(relationId); + /* * The SQL_DROP trigger calls this function even for tables that are * not distributed. In that case, silently ignore. This is not very @@ -87,6 +90,8 @@ master_remove_partition_metadata(PG_FUNCTION_ARGS) DeletePartitionRow(relationId); + DeleteColocationGroupIfNoTablesBelong(colocationId); + PG_RETURN_VOID(); } diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index af80df0c4..1aab6ca88 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -596,6 +596,45 @@ PartitionColumnViaCatalog(Oid relationId) } +/* + * ColocationIdViaCatalog gets a relationId and returns the colocation + * id column from pg_dist_partition via reading from catalog. + */ +uint32 +ColocationIdViaCatalog(Oid relationId) +{ + HeapTuple partitionTuple = PgDistPartitionTupleViaCatalog(relationId); + if (!HeapTupleIsValid(partitionTuple)) + { + return INVALID_COLOCATION_ID; + } + + Datum datumArray[Natts_pg_dist_partition]; + bool isNullArray[Natts_pg_dist_partition]; + + Relation pgDistPartition = table_open(DistPartitionRelationId(), AccessShareLock); + + TupleDesc tupleDescriptor = RelationGetDescr(pgDistPartition); + heap_deform_tuple(partitionTuple, tupleDescriptor, datumArray, isNullArray); + + if (isNullArray[Anum_pg_dist_partition_colocationid - 1]) + { + /* colocation id cannot be NULL, still let's make sure */ + heap_freetuple(partitionTuple); + table_close(pgDistPartition, NoLock); + return INVALID_COLOCATION_ID; + } + + Datum colocationIdDatum = datumArray[Anum_pg_dist_partition_colocationid - 1]; + uint32 colocationId = DatumGetUInt32(colocationIdDatum); + + heap_freetuple(partitionTuple); + table_close(pgDistPartition, NoLock); + + return colocationId; +} + + /* * PgDistPartitionTupleViaCatalog is a helper function that searches * pg_dist_partition for the given relationId. The caller is responsible diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h index e190aef6f..e8b2d6a9c 100644 --- a/src/include/distributed/metadata_cache.h +++ b/src/include/distributed/metadata_cache.h @@ -150,6 +150,7 @@ extern char PgDistPartitionViaCatalog(Oid relationId); extern List * LookupDistShardTuples(Oid relationId); extern char PartitionMethodViaCatalog(Oid relationId); extern Var * PartitionColumnViaCatalog(Oid relationId); +extern uint32 ColocationIdViaCatalog(Oid relationId); extern bool IsCitusLocalTableByDistParams(char partitionMethod, char replicationModel); extern List * CitusTableList(void); extern ShardInterval * LoadShardInterval(uint64 shardId); diff --git a/src/test/regress/expected/drop_partitioned_table.out b/src/test/regress/expected/drop_partitioned_table.out index 2cfd6a7b7..d636857a2 100644 --- a/src/test/regress/expected/drop_partitioned_table.out +++ b/src/test/regress/expected/drop_partitioned_table.out @@ -353,6 +353,8 @@ NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.pa NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') +NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(100047) +NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(100047) ROLLBACK; NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK @@ -374,6 +376,8 @@ NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCAD NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.child1_xxxxx CASCADE +NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(100047) +NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(100047) ROLLBACK; NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index 3a88e0192..667aabaab 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -119,9 +119,12 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly +WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -591,9 +594,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: failure on connection marked as essential: localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -658,9 +659,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. +ERROR: failure on connection marked as essential: localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index 4a575ed19..8a66f372c 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -90,9 +90,12 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly +WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -353,9 +356,12 @@ SELECT citus.mitmproxy('conn.kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly +WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -389,9 +395,12 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly +WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -465,9 +474,12 @@ SELECT citus.mitmproxy('conn.kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly +WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -533,9 +545,12 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: server closed the connection unexpectedly +WARNING: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. +connection not open +CONTEXT: while executing command on localhost:xxxxx +ERROR: failure on connection marked as essential: localhost:xxxxx ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/function_propagation.out b/src/test/regress/expected/function_propagation.out index 5c61761fb..e27b7c10b 100644 --- a/src/test/regress/expected/function_propagation.out +++ b/src/test/regress/expected/function_propagation.out @@ -1,5 +1,6 @@ CREATE SCHEMA function_propagation_schema; SET search_path TO 'function_propagation_schema'; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000; -- Check whether supported dependencies can be distributed while propagating functions -- Check types SET citus.enable_metadata_sync TO OFF; @@ -1115,7 +1116,7 @@ SELECT create_distributed_function('func_to_colocate(int)', colocate_with:='tbl_ SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc; distribution_argument_index | colocationid | force_delegation --------------------------------------------------------------------- - | 10003 | + | 10001 | (1 row) -- convert to non-delegated @@ -1143,7 +1144,7 @@ SELECT create_distributed_function('func_to_colocate(int)','$1','tbl_to_colocate SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc; distribution_argument_index | colocationid | force_delegation --------------------------------------------------------------------- - 0 | 10005 | + 0 | 10002 | (1 row) -- try create or replace the same func @@ -1152,7 +1153,7 @@ CREATE OR REPLACE FUNCTION func_to_colocate (a int) returns int as $$select 1;$$ SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc; distribution_argument_index | colocationid | force_delegation --------------------------------------------------------------------- - 0 | 10005 | + 0 | 10002 | (1 row) -- convert to non-delegated @@ -1180,7 +1181,7 @@ SELECT create_distributed_function('func_to_colocate(int)','$1','tbl_to_colocate SELECT distribution_argument_index, colocationid, force_delegation FROM pg_catalog.pg_dist_object WHERE objid = 'func_to_colocate'::regproc; distribution_argument_index | colocationid | force_delegation --------------------------------------------------------------------- - 0 | 10005 | t + 0 | 10002 | t (1 row) -- convert to non-delegated diff --git a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out index ab76d8596..980cf3a82 100644 --- a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out +++ b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out @@ -1235,7 +1235,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas +starting permutation: s1-print-distributed-objects s2-create-table-for-colocation s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1306,6 +1306,16 @@ master_remove_node (1 row) +step s2-create-table-for-colocation: + CREATE SCHEMA col_schema; + CREATE TABLE col_schema.col_tbl (a INT, b INT); + SELECT create_distributed_table('col_schema.col_tbl', 'a'); + +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + step s1-add-worker: SELECT 1 FROM master_add_node('localhost', 57638); @@ -1372,11 +1382,13 @@ pg_identify_object_as_address --------------------------------------------------------------------- (database,{regression},{}) (role,{postgres},{}) +(schema,{col_schema},{}) (schema,{myschema},{}) (schema,{public},{}) +(table,"{col_schema,col_tbl}",{}) (table,"{myschema,t1}",{}) (table,"{myschema,t2}",{}) -(6 rows) +(8 rows) count --------------------------------------------------------------------- @@ -2139,7 +2151,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas +starting permutation: s1-print-distributed-objects s2-create-table-for-colocation s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2210,6 +2222,16 @@ master_remove_node (1 row) +step s2-create-table-for-colocation: + CREATE SCHEMA col_schema; + CREATE TABLE col_schema.col_tbl (a INT, b INT); + SELECT create_distributed_table('col_schema.col_tbl', 'a'); + +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + step s1-begin: BEGIN; @@ -2269,8 +2291,10 @@ pg_identify_object_as_address (database,{regression},{}) (function,"{public,add}","{integer,integer}") (role,{postgres},{}) +(schema,{col_schema},{}) (schema,{public},{}) -(4 rows) +(table,"{col_schema,col_tbl}",{}) +(6 rows) count --------------------------------------------------------------------- @@ -2322,7 +2346,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas +starting permutation: s1-print-distributed-objects s2-create-table-for-colocation s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2393,6 +2417,16 @@ master_remove_node (1 row) +step s2-create-table-for-colocation: + CREATE SCHEMA col_schema; + CREATE TABLE col_schema.col_tbl (a INT, b INT); + SELECT create_distributed_table('col_schema.col_tbl', 'a'); + +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + step s1-begin: BEGIN; @@ -2459,8 +2493,10 @@ pg_identify_object_as_address (database,{regression},{}) (function,"{public,add}","{integer,integer}") (role,{postgres},{}) +(schema,{col_schema},{}) (schema,{public},{}) -(4 rows) +(table,"{col_schema,col_tbl}",{}) +(6 rows) count --------------------------------------------------------------------- @@ -2512,7 +2548,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas +starting permutation: s1-print-distributed-objects s2-create-table-for-colocation s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2583,6 +2619,16 @@ master_remove_node (1 row) +step s2-create-table-for-colocation: + CREATE SCHEMA col_schema; + CREATE TABLE col_schema.col_tbl (a INT, b INT); + SELECT create_distributed_table('col_schema.col_tbl', 'a'); + +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + step s2-begin: BEGIN; @@ -2650,9 +2696,11 @@ pg_identify_object_as_address (database,{regression},{}) (function,"{myschema,add}","{integer,integer}") (role,{postgres},{}) +(schema,{col_schema},{}) (schema,{myschema},{}) (schema,{public},{}) -(5 rows) +(table,"{col_schema,col_tbl}",{}) +(7 rows) count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out b/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out index 88f4d68cd..6a2e595a0 100644 --- a/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out @@ -204,7 +204,7 @@ restore_isolation_tester_func (1 row) -starting permutation: s1-add-primary-key s1-start-session-level-connection s1-begin-on-worker s1-upsert s2-start-session-level-connection s2-begin-on-worker s2-drop s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count +starting permutation: s1-add-primary-key s1-start-session-level-connection s1-begin-on-worker s1-upsert s2-drop s1-commit-worker s1-stop-connection s3-select-count create_reference_table --------------------------------------------------------------------- @@ -237,22 +237,6 @@ run_commands_on_session_level_connection_to_node (1 row) -step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); - -start_session_level_connection_to_node ---------------------------------------------------------------------- - -(1 row) - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); - -run_commands_on_session_level_connection_to_node ---------------------------------------------------------------------- - -(1 row) - step s2-drop: DROP TABLE ref_table; @@ -265,14 +249,6 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-drop: <... completed> -step s2-commit-worker: - SELECT run_commands_on_session_level_connection_to_node('COMMIT'); - -run_commands_on_session_level_connection_to_node ---------------------------------------------------------------------- - -(1 row) - step s1-stop-connection: SELECT stop_session_level_connection_to_node(); @@ -281,14 +257,6 @@ stop_session_level_connection_to_node (1 row) -step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); - -stop_session_level_connection_to_node ---------------------------------------------------------------------- - -(1 row) - step s3-select-count: SELECT COUNT(*) FROM ref_table; diff --git a/src/test/regress/expected/multi_colocation_utils.out b/src/test/regress/expected/multi_colocation_utils.out index 1fa58264c..319dc69cf 100644 --- a/src/test/regress/expected/multi_colocation_utils.out +++ b/src/test/regress/expected/multi_colocation_utils.out @@ -451,7 +451,8 @@ SELECT * FROM pg_dist_colocation 5 | 2 | 1 | 23 | 0 6 | 2 | 2 | 25 | 100 7 | 8 | 2 | 23 | 0 -(4 rows) + 22 | 1 | -1 | 0 | 0 +(5 rows) -- check to see whether metadata is synced SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$ @@ -464,11 +465,13 @@ $$); 57637 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 1, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 2, "colocationid": 6, "replicationfactor": 2, "distributioncolumntype": "25", "distributioncolumncollation": "100"} 57637 | {"shardcount": 8, "colocationid": 7, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} + 57637 | {"shardcount": 1, "colocationid": 22, "replicationfactor": -1, "distributioncolumntype": "0", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 1, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 6, "replicationfactor": 2, "distributioncolumntype": "25", "distributioncolumncollation": "100"} 57638 | {"shardcount": 8, "colocationid": 7, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} -(8 rows) + 57638 | {"shardcount": 1, "colocationid": 22, "replicationfactor": -1, "distributioncolumntype": "0", "distributioncolumncollation": "0"} +(10 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 @@ -508,18 +511,15 @@ DROP TABLE table2_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- - 4 | 2 | 2 | 23 | 0 -(1 row) +(0 rows) -- check to see whether metadata is synced SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$ -SELECT array_agg(row_to_json(c)) FROM pg_dist_colocation c WHERE colocationid = 4 +SELECT coalesce(array_agg(row_to_json(c)), '{}') FROM pg_dist_colocation c WHERE colocationid = 4 $$); - nodeport | unnest + nodeport | unnest --------------------------------------------------------------------- - 57637 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} - 57638 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} -(2 rows) +(0 rows) -- create dropped colocation group again SET citus.shard_count = 2; @@ -606,33 +606,36 @@ SELECT * FROM pg_dist_colocation ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- - 4 | 2 | 2 | 23 | 0 5 | 2 | 1 | 23 | 0 6 | 2 | 2 | 25 | 100 7 | 8 | 2 | 23 | 0 + 8 | 2 | 2 | 23 | 0 + 9 | 2 | 2 | 23 | 0 + 10 | 2 | 2 | 23 | 0 11 | 3 | 2 | 23 | 0 -(5 rows) + 22 | 1 | -1 | 0 | 0 +(8 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid --------------------------------------------------------------------- - table1_groupe | 4 - table2_groupe | 4 - table3_groupe | 4 - schema_colocation.table4_groupe | 4 - table4_groupe | 4 table1_groupb | 5 table2_groupb | 5 table1_groupc | 6 table2_groupc | 6 table1_groupd | 7 table2_groupd | 7 - table1_group_none_1 | 8 - table2_group_none_1 | 8 - table1_group_none_2 | 9 - table1_group_none_3 | 10 + table1_groupe | 8 + table2_groupe | 8 + table3_groupe | 8 + schema_colocation.table4_groupe | 8 + table4_groupe | 8 + table1_group_none_1 | 9 + table2_group_none_1 | 9 + table1_group_none_2 | 10 + table1_group_none_3 | 11 table1_group_default | 11 (16 rows) @@ -699,12 +702,15 @@ SELECT * FROM pg_dist_colocation ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype | distributioncolumncollation --------------------------------------------------------------------- - 4 | 2 | 2 | 23 | 0 5 | 2 | 1 | 23 | 0 6 | 2 | 2 | 25 | 100 7 | 8 | 2 | 23 | 0 + 8 | 2 | 2 | 23 | 0 + 9 | 2 | 2 | 23 | 0 + 10 | 2 | 2 | 23 | 0 11 | 3 | 2 | 23 | 0 -(5 rows) + 22 | 1 | -1 | 0 | 0 +(8 rows) -- cross check with internal colocation API SELECT @@ -739,8 +745,9 @@ ORDER BY table3_groupe | table4_groupe | t schema_colocation.table4_groupe | table4_groupe | t table1_group_none_1 | table2_group_none_1 | t + table1_group_none_3 | table1_group_default | t table1_groupf | table2_groupf | t -(16 rows) +(17 rows) -- check created shards SELECT @@ -979,7 +986,10 @@ SELECT * FROM pg_dist_colocation 3 | 8 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 5 | 2 | 2 | 23 | 0 -(5 rows) + 6 | 2 | 2 | 23 | 0 + 7 | 1 | -1 | 0 | 0 + 8 | 2 | 2 | 23 | 0 +(8 rows) -- check to see whether metadata is synced SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$ @@ -993,12 +1003,18 @@ $$); 57637 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} + 57637 | {"shardcount": 2, "colocationid": 6, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} + 57637 | {"shardcount": 1, "colocationid": 7, "replicationfactor": -1, "distributioncolumntype": "0", "distributioncolumncollation": "0"} + 57637 | {"shardcount": 2, "colocationid": 8, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 1, "replicationfactor": 1, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 2, "replicationfactor": 2, "distributioncolumntype": "25", "distributioncolumncollation": "100"} 57638 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} -(10 rows) + 57638 | {"shardcount": 2, "colocationid": 6, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} + 57638 | {"shardcount": 1, "colocationid": 7, "replicationfactor": -1, "distributioncolumntype": "0", "distributioncolumncollation": "0"} + 57638 | {"shardcount": 2, "colocationid": 8, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} +(16 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 @@ -1015,7 +1031,7 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition table1_groupe | 5 table3_groupe | 5 table1_group_none | 6 - table2_group_none | 7 + table2_group_none | 8 (11 rows) -- move the all tables in colocation group 5 to colocation group 7 @@ -1073,7 +1089,9 @@ SELECT * FROM pg_dist_colocation 3 | 8 | 2 | 23 | 0 4 | 2 | 2 | 23 | 0 5 | 2 | 2 | 23 | 0 -(5 rows) + 7 | 1 | -1 | 0 | 0 + 8 | 2 | 2 | 23 | 0 +(7 rows) -- check to see whether metadata is synced SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$ @@ -1087,12 +1105,16 @@ $$); 57637 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57637 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} + 57637 | {"shardcount": 1, "colocationid": 7, "replicationfactor": -1, "distributioncolumntype": "0", "distributioncolumncollation": "0"} + 57637 | {"shardcount": 2, "colocationid": 8, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 1, "replicationfactor": 1, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 2, "replicationfactor": 2, "distributioncolumntype": "25", "distributioncolumncollation": "100"} 57638 | {"shardcount": 8, "colocationid": 3, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 4, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} 57638 | {"shardcount": 2, "colocationid": 5, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} -(10 rows) + 57638 | {"shardcount": 1, "colocationid": 7, "replicationfactor": -1, "distributioncolumntype": "0", "distributioncolumncollation": "0"} + 57638 | {"shardcount": 2, "colocationid": 8, "replicationfactor": 2, "distributioncolumntype": "23", "distributioncolumncollation": "0"} +(14 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 @@ -1108,8 +1130,8 @@ SELECT logicalrelid, colocationid FROM pg_dist_partition table2_groupe | 4 table1_groupe | 5 table3_groupe | 5 - table1_group_none | 7 - table2_group_none | 7 + table1_group_none | 8 + table2_group_none | 8 (11 rows) -- try to colocate different replication models @@ -1443,3 +1465,84 @@ DROP TABLE range_table; DROP TABLE none; DROP TABLE ref; DROP TABLE local_table; +CREATE TABLE tbl_1 (a INT, b INT); +CREATE TABLE tbl_2 (a INT, b INT); +CREATE TABLE tbl_3 (a INT, b INT); +SELECT create_distributed_table('tbl_1', 'a', shard_count:=4); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('tbl_2', 'a', shard_count:=4); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('tbl_3', 'a', shard_count:=4, colocate_with:='NONE'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT colocation_id AS col_id_1 FROM citus_tables WHERE table_name::text = 'tbl_1' \gset +SELECT colocation_id AS col_id_2 FROM citus_tables WHERE table_name::text = 'tbl_2' \gset +SELECT colocation_id AS col_id_3 FROM citus_tables WHERE table_name::text = 'tbl_3' \gset +-- check that tables are colocated correctly +SELECT :col_id_1 = :col_id_2; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT :col_id_1 = :col_id_3; + ?column? +--------------------------------------------------------------------- + f +(1 row) + +-- check that there are separate rows for both colocation groups in pg_dist_colocation +SELECT result FROM run_command_on_all_nodes(' + SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_1 +); + result +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes(' + SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_3 +); + result +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +DROP TABLE tbl_1, tbl_3; +-- check that empty colocation group is dropped and non-empty is not +SELECT result FROM run_command_on_all_nodes(' + SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_1 +); + result +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT result FROM run_command_on_all_nodes(' + SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_3 +); + result +--------------------------------------------------------------------- + 0 + 0 + 0 +(3 rows) + +DROP TABLE tbl_2; diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index 91a65dc02..890497202 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -8,6 +8,7 @@ SET citus.next_shard_id TO 910000; SET citus.shard_replication_factor TO 1; CREATE SCHEMA fix_idx_names; SET search_path TO fix_idx_names, public; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1370000; -- stop metadata sync for one of the worker nodes so we test both cases SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); NOTICE: dropping metadata on the node (localhost,57637) diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 749254292..859c3ff58 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -19,6 +19,7 @@ NOTICE: dropping metadata on the node (localhost,57638) (1 row) ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 2; SET citus.replicate_reference_tables_on_activate TO off; SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id \gset @@ -1895,12 +1896,12 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 4, 's') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 4, 's') + SELECT citus_internal_add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 5, 's') + SELECT citus_internal_add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 5, 's') SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10005, 's') - SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10003, 't') - SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10005, 's') + SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') + SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') + SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') @@ -1924,7 +1925,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; SET citus.enable_ddl_propagation TO 'on' SET citus.enable_ddl_propagation TO 'on' UPDATE pg_dist_local_group SET groupid = 1 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10002, 7, 1, 'integer'::regtype, NULL, NULL), (10003, 1, -1, 0, NULL, NULL), (10004, 3, 1, 'integer'::regtype, NULL, NULL), (10005, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL), (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 5, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 5, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 5, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310020, 1, 0, 1, 100020), (1310021, 1, 0, 5, 100021), (1310022, 1, 0, 1, 100022), (1310023, 1, 0, 5, 100023), (1310024, 1, 0, 1, 100024)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; diff --git a/src/test/regress/expected/multi_mx_alter_distributed_table.out b/src/test/regress/expected/multi_mx_alter_distributed_table.out index 427dfe027..9557cac3a 100644 --- a/src/test/regress/expected/multi_mx_alter_distributed_table.out +++ b/src/test/regress/expected/multi_mx_alter_distributed_table.out @@ -269,13 +269,13 @@ RESET client_min_messages; SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); logicalrelid | colocationid --------------------------------------------------------------------- - test_proc_colocation_0 | 1410004 + test_proc_colocation_0 | 1410005 (1 row) SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0'); proname | colocationid --------------------------------------------------------------------- - proc_0 | 1410004 + proc_0 | 1410005 (1 row) -- shardCount is not null && cascade_to_colocated is true @@ -302,13 +302,13 @@ RESET client_min_messages; SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); logicalrelid | colocationid --------------------------------------------------------------------- - test_proc_colocation_0 | 1410003 + test_proc_colocation_0 | 1410006 (1 row) SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0'); proname | colocationid --------------------------------------------------------------------- - proc_0 | 1410003 + proc_0 | 1410006 (1 row) -- colocatewith is not null && cascade_to_colocated is true @@ -356,13 +356,13 @@ RESET client_min_messages; SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); logicalrelid | colocationid --------------------------------------------------------------------- - test_proc_colocation_0 | 1410005 + test_proc_colocation_0 | 1410008 (1 row) SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0'); proname | colocationid --------------------------------------------------------------------- - proc_0 | 1410005 + proc_0 | 1410008 (1 row) -- try a case with more than one procedure @@ -386,14 +386,14 @@ SELECT create_distributed_function('proc_1(float8)', 'dist_key', 'test_proc_colo SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); logicalrelid | colocationid --------------------------------------------------------------------- - test_proc_colocation_0 | 1410005 + test_proc_colocation_0 | 1410008 (1 row) SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0', 'proc_1') ORDER BY proname; proname | colocationid --------------------------------------------------------------------- - proc_0 | 1410005 - proc_1 | 1410005 + proc_0 | 1410008 + proc_1 | 1410008 (2 rows) SET client_min_messages TO DEBUG1; @@ -437,14 +437,14 @@ RESET client_min_messages; SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); logicalrelid | colocationid --------------------------------------------------------------------- - test_proc_colocation_0 | 1410003 + test_proc_colocation_0 | 1410009 (1 row) SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0', 'proc_1') ORDER BY proname; proname | colocationid --------------------------------------------------------------------- - proc_0 | 1410003 - proc_1 | 1410003 + proc_0 | 1410009 + proc_1 | 1410009 (2 rows) -- case which shouldn't preserve colocation for now @@ -462,14 +462,14 @@ NOTICE: renaming the new table to mx_alter_distributed_table.test_proc_colocati SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid::regclass::text IN ('test_proc_colocation_0'); logicalrelid | colocationid --------------------------------------------------------------------- - test_proc_colocation_0 | 1410006 + test_proc_colocation_0 | 1410010 (1 row) SELECT proname, colocationid FROM pg_proc JOIN pg_catalog.pg_dist_object ON pg_proc.oid = pg_catalog.pg_dist_object.objid WHERE proname IN ('proc_0', 'proc_1') ORDER BY proname; proname | colocationid --------------------------------------------------------------------- - proc_0 | 1410003 - proc_1 | 1410003 + proc_0 | 1410009 + proc_1 | 1410009 (2 rows) SET client_min_messages TO WARNING; diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index 6036bd325..c443bfb3d 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -2,6 +2,7 @@ -- MULTI_MX_CREATE_TABLE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1220000; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -406,29 +407,29 @@ FROM pg_dist_partition NATURAL JOIN shard_counts ORDER BY colocationid, logicalrelid; logicalrelid | colocationid | shard_count | partmethod | repmodel --------------------------------------------------------------------- - customer_mx | 1390001 | 1 | n | t - nation_mx | 1390001 | 1 | n | t - part_mx | 1390001 | 1 | n | t - supplier_mx | 1390001 | 1 | n | t - citus_mx_test_schema_join_1.nation_hash | 1390003 | 4 | h | s - citus_mx_test_schema_join_1.nation_hash_2 | 1390003 | 4 | h | s - citus_mx_test_schema_join_2.nation_hash | 1390003 | 4 | h | s - citus_mx_test_schema.nation_hash_collation_search_path | 1390003 | 4 | h | s - citus_mx_test_schema.nation_hash_composite_types | 1390003 | 4 | h | s - mx_ddl_table | 1390003 | 4 | h | s - app_analytics_events_mx | 1390003 | 4 | h | s - company_employees_mx | 1390003 | 4 | h | s - nation_hash | 1390006 | 16 | h | s - citus_mx_test_schema.nation_hash | 1390006 | 16 | h | s - lineitem_mx | 1390007 | 16 | h | s - orders_mx | 1390007 | 16 | h | s - limit_orders_mx | 1390008 | 2 | h | s - articles_hash_mx | 1390008 | 2 | h | s - multiple_hash_mx | 1390009 | 2 | h | s - researchers_mx | 1390010 | 2 | h | s - labs_mx | 1390011 | 1 | h | s - objects_mx | 1390011 | 1 | h | s - articles_single_shard_hash_mx | 1390011 | 1 | h | s + nation_hash | 1220000 | 16 | h | s + citus_mx_test_schema.nation_hash | 1220000 | 16 | h | s + customer_mx | 1220001 | 1 | n | t + nation_mx | 1220001 | 1 | n | t + part_mx | 1220001 | 1 | n | t + supplier_mx | 1220001 | 1 | n | t + citus_mx_test_schema_join_1.nation_hash | 1220002 | 4 | h | s + citus_mx_test_schema_join_1.nation_hash_2 | 1220002 | 4 | h | s + citus_mx_test_schema_join_2.nation_hash | 1220002 | 4 | h | s + citus_mx_test_schema.nation_hash_collation_search_path | 1220002 | 4 | h | s + citus_mx_test_schema.nation_hash_composite_types | 1220002 | 4 | h | s + mx_ddl_table | 1220002 | 4 | h | s + app_analytics_events_mx | 1220002 | 4 | h | s + company_employees_mx | 1220002 | 4 | h | s + lineitem_mx | 1220003 | 16 | h | s + orders_mx | 1220003 | 16 | h | s + limit_orders_mx | 1220004 | 2 | h | s + articles_hash_mx | 1220004 | 2 | h | s + multiple_hash_mx | 1220005 | 2 | h | s + researchers_mx | 1220006 | 2 | h | s + labs_mx | 1220007 | 1 | h | s + objects_mx | 1220007 | 1 | h | s + articles_single_shard_hash_mx | 1220007 | 1 | h | s (23 rows) -- check the citus_tables view diff --git a/src/test/regress/expected/start_stop_metadata_sync.out b/src/test/regress/expected/start_stop_metadata_sync.out index d3f961124..152375323 100644 --- a/src/test/regress/expected/start_stop_metadata_sync.out +++ b/src/test/regress/expected/start_stop_metadata_sync.out @@ -1,6 +1,7 @@ CREATE SCHEMA start_stop_metadata_sync; SET search_path TO "start_stop_metadata_sync"; SET citus.next_shard_id TO 980000; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 980000; SET client_min_messages TO WARNING; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; @@ -157,12 +158,12 @@ SELECT * FROM test_matview; SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'events%' ORDER BY logicalrelid::text; logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- - events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f - events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f - events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f - events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390013 | c | f - events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390013 | c | f - events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390013 | c | f + events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 980000 | s | f + events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 980000 | s | f + events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 980000 | s | f + events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 980001 | c | f + events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 980001 | c | f + events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 980001 | c | f (6 rows) SELECT count(*) > 0 FROM pg_dist_node; diff --git a/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec b/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec index 5891c153a..5b74d5643 100644 --- a/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec +++ b/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec @@ -18,6 +18,7 @@ teardown SELECT 1 FROM master_add_node('localhost', 57638); RESET search_path; + DROP SCHEMA IF EXISTS col_schema CASCADE; DROP TABLE IF EXISTS t1 CASCADE; DROP TABLE IF EXISTS t2 CASCADE; DROP TABLE IF EXISTS t3 CASCADE; @@ -118,6 +119,13 @@ step "s2-commit" COMMIT; } +step "s2-create-table-for-colocation" +{ + CREATE SCHEMA col_schema; + CREATE TABLE col_schema.col_tbl (a INT, b INT); + SELECT create_distributed_table('col_schema.col_tbl', 'a'); +} + // prints from session 2 are run at the end when the worker has already been added by the // test step "s2-print-distributed-objects" @@ -199,7 +207,7 @@ permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-sche // concurrency tests with multi schema distribution permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" -permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s2-create-table-for-colocation" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s1-commit" "s2-create-table" "s2-commit" "s3-create-table" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // type and schema tests @@ -212,10 +220,10 @@ permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-sche // s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by // "s2-commit", because "COMMIT" syncs the messages -permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s2-create-table-for-colocation" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s2-create-table-for-colocation" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // we cannot run the following operations concurrently // the problem is that NOTIFY event doesn't (reliably) happen before COMMIT // so we have to commit s2 before s1 starts -permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s2-create-table-for-colocation" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" diff --git a/src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec b/src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec index 6fc02d78e..27e532f5e 100644 --- a/src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec @@ -110,7 +110,7 @@ step "s3-select-count" permutation "s1-add-primary-key" "s1-start-session-level-connection" "s1-begin-on-worker" "s1-upsert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert-select-ref-table" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" -permutation "s1-add-primary-key" "s1-start-session-level-connection" "s1-begin-on-worker" "s1-upsert" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-drop" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" +permutation "s1-add-primary-key" "s1-start-session-level-connection" "s1-begin-on-worker" "s1-upsert" "s2-drop" "s1-commit-worker" "s1-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-delete" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" //Not able to test the next permutation, until issue with CREATE INDEX CONCURRENTLY's locks is resolved. Issue #2966 //permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s3-select-count" "s1-stop-connection" diff --git a/src/test/regress/sql/function_propagation.sql b/src/test/regress/sql/function_propagation.sql index 579a1aa9f..eca10beb5 100644 --- a/src/test/regress/sql/function_propagation.sql +++ b/src/test/regress/sql/function_propagation.sql @@ -1,5 +1,6 @@ CREATE SCHEMA function_propagation_schema; SET search_path TO 'function_propagation_schema'; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000; -- Check whether supported dependencies can be distributed while propagating functions diff --git a/src/test/regress/sql/multi_colocation_utils.sql b/src/test/regress/sql/multi_colocation_utils.sql index 245c2ce6d..57b7c366e 100644 --- a/src/test/regress/sql/multi_colocation_utils.sql +++ b/src/test/regress/sql/multi_colocation_utils.sql @@ -236,7 +236,7 @@ SELECT * FROM pg_dist_colocation WHERE colocationid = 4; -- check to see whether metadata is synced SELECT nodeport, unnest(result::jsonb[]) FROM run_command_on_workers($$ -SELECT array_agg(row_to_json(c)) FROM pg_dist_colocation c WHERE colocationid = 4 +SELECT coalesce(array_agg(row_to_json(c)), '{}') FROM pg_dist_colocation c WHERE colocationid = 4 $$); -- create dropped colocation group again @@ -591,3 +591,40 @@ DROP TABLE range_table; DROP TABLE none; DROP TABLE ref; DROP TABLE local_table; + + +CREATE TABLE tbl_1 (a INT, b INT); +CREATE TABLE tbl_2 (a INT, b INT); +CREATE TABLE tbl_3 (a INT, b INT); + +SELECT create_distributed_table('tbl_1', 'a', shard_count:=4); +SELECT create_distributed_table('tbl_2', 'a', shard_count:=4); +SELECT create_distributed_table('tbl_3', 'a', shard_count:=4, colocate_with:='NONE'); + +SELECT colocation_id AS col_id_1 FROM citus_tables WHERE table_name::text = 'tbl_1' \gset +SELECT colocation_id AS col_id_2 FROM citus_tables WHERE table_name::text = 'tbl_2' \gset +SELECT colocation_id AS col_id_3 FROM citus_tables WHERE table_name::text = 'tbl_3' \gset + +-- check that tables are colocated correctly +SELECT :col_id_1 = :col_id_2; +SELECT :col_id_1 = :col_id_3; + +-- check that there are separate rows for both colocation groups in pg_dist_colocation +SELECT result FROM run_command_on_all_nodes(' + SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_1 +); +SELECT result FROM run_command_on_all_nodes(' + SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_3 +); + +DROP TABLE tbl_1, tbl_3; + +-- check that empty colocation group is dropped and non-empty is not +SELECT result FROM run_command_on_all_nodes(' + SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_1 +); +SELECT result FROM run_command_on_all_nodes(' + SELECT count(*) FROM pg_dist_colocation WHERE colocationid = ' || :col_id_3 +); + +DROP TABLE tbl_2; diff --git a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql index 3ee453074..b3996152f 100644 --- a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql +++ b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql @@ -9,6 +9,8 @@ SET citus.shard_replication_factor TO 1; CREATE SCHEMA fix_idx_names; SET search_path TO fix_idx_names, public; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1370000; + -- stop metadata sync for one of the worker nodes so we test both cases SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index 0d67bb68b..d70c69653 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -10,6 +10,7 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 2; SET citus.replicate_reference_tables_on_activate TO off; SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql index 0a685e5ce..52270409e 100644 --- a/src/test/regress/sql/multi_mx_create_table.sql +++ b/src/test/regress/sql/multi_mx_create_table.sql @@ -3,6 +3,7 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1220000; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT start_metadata_sync_to_node('localhost', :worker_2_port); diff --git a/src/test/regress/sql/start_stop_metadata_sync.sql b/src/test/regress/sql/start_stop_metadata_sync.sql index 4e30cef1c..c1fed6243 100644 --- a/src/test/regress/sql/start_stop_metadata_sync.sql +++ b/src/test/regress/sql/start_stop_metadata_sync.sql @@ -1,6 +1,7 @@ CREATE SCHEMA start_stop_metadata_sync; SET search_path TO "start_stop_metadata_sync"; SET citus.next_shard_id TO 980000; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 980000; SET client_min_messages TO WARNING; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1;