diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 97278d3fd..7e2d3c2e2 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -158,6 +158,7 @@ MetadataCreateCommands(void) { List *metadataSnapshotCommandList = NIL; List *distributedTableList = DistributedTableList(); + List *mxTableList = NIL; List *workerNodeList = WorkerNodeList(); ListCell *distributedTableCell = NULL; char *nodeListInsertCommand = NULL; @@ -167,26 +168,67 @@ MetadataCreateCommands(void) metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, nodeListInsertCommand); - /* iterate over the distributed tables */ + /* create the list of mx tables */ foreach(distributedTableCell, distributedTableList) { DistTableCacheEntry *cacheEntry = (DistTableCacheEntry *) lfirst(distributedTableCell); - List *clusteredTableDDLEvents = NIL; + if (ShouldSyncTableMetadata(cacheEntry->relationId)) + { + mxTableList = lappend(mxTableList, cacheEntry); + } + } + + /* create the mx tables, but not the metadata */ + foreach(distributedTableCell, mxTableList) + { + DistTableCacheEntry *cacheEntry = + (DistTableCacheEntry *) lfirst(distributedTableCell); + Oid relationId = cacheEntry->relationId; + + List *commandList = GetTableDDLEvents(relationId); + char *tableOwnerResetCommand = TableOwnerResetCommand(relationId); + + metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, + commandList); + metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, + tableOwnerResetCommand); + } + + /* construct the foreign key constraints after all tables are created */ + foreach(distributedTableCell, mxTableList) + { + DistTableCacheEntry *cacheEntry = + (DistTableCacheEntry *) lfirst(distributedTableCell); + + List *foreignConstraintCommands = + GetTableForeignConstraintCommands(cacheEntry->relationId); + + metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, + foreignConstraintCommands); + } + + /* after all tables are created, create the metadata */ + foreach(distributedTableCell, mxTableList) + { + DistTableCacheEntry *cacheEntry = + (DistTableCacheEntry *) lfirst(distributedTableCell); List *shardIntervalList = NIL; List *shardCreateCommandList = NIL; + char *metadataCommand = NULL; + char *truncateTriggerCreateCommand = NULL; Oid clusteredTableId = cacheEntry->relationId; - /* add only clustered tables */ - if (!ShouldSyncTableMetadata(clusteredTableId)) - { - continue; - } + /* add the table metadata command first*/ + metadataCommand = DistributionCreateCommand(cacheEntry); + metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, + metadataCommand); - /* add the DDL events first */ - clusteredTableDDLEvents = GetDistributedTableDDLEvents(cacheEntry); - metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, - clusteredTableDDLEvents); + /* add the truncate trigger command after the table became distributed */ + truncateTriggerCreateCommand = + TruncateTriggerCreateCommand(cacheEntry->relationId); + metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, + truncateTriggerCreateCommand); /* add the pg_dist_shard{,placement} entries */ shardIntervalList = LoadShardIntervalList(clusteredTableId); @@ -476,34 +518,6 @@ NodeDeleteCommand(uint32 nodeId) } -/* - * GetDistributedTableDDLEvents returns the full set of DDL commands necessary to - * create this relation on a worker. This includes setting up any sequences, - * setting the owner of the table, and inserting into metadata tables. - */ -List * -GetDistributedTableDDLEvents(DistTableCacheEntry *cacheEntry) -{ - char *ownerResetCommand = NULL; - char *metadataCommand = NULL; - char *truncateTriggerCreateCommand = NULL; - Oid relationId = cacheEntry->relationId; - - List *commandList = GetTableDDLEvents(relationId); - - ownerResetCommand = TableOwnerResetCommand(relationId); - commandList = lappend(commandList, ownerResetCommand); - - metadataCommand = DistributionCreateCommand(cacheEntry); - commandList = lappend(commandList, metadataCommand); - - truncateTriggerCreateCommand = TruncateTriggerCreateCommand(relationId); - commandList = lappend(commandList, truncateTriggerCreateCommand); - - return commandList; -} - - /* * LocalGroupIdUpdateCommand creates the SQL command required to set the local group id * of a worker and returns the command in a string. diff --git a/src/backend/distributed/worker/worker_drop_protocol.c b/src/backend/distributed/worker/worker_drop_protocol.c index 6876d090f..077f78dc9 100644 --- a/src/backend/distributed/worker/worker_drop_protocol.c +++ b/src/backend/distributed/worker/worker_drop_protocol.c @@ -42,6 +42,8 @@ static void DeletePartitionRow(Oid distributedRelationId); * not dropped as in the case of "DROP TABLE distributed_table;" command. * * The function errors out if the input relation Oid is not a regular or foreign table. + * The function is meant to be called only by the coordinator, therefore requires + * superuser privileges. */ Datum worker_drop_distributed_table(PG_FUNCTION_ARGS) @@ -55,6 +57,8 @@ worker_drop_distributed_table(PG_FUNCTION_ARGS) ListCell *shardCell = NULL; char relationKind = '\0'; + EnsureSuperUser(); + /* first check the relation type */ distributedRelation = relation_open(relationId, AccessShareLock); relationKind = distributedRelation->rd_rel->relkind; @@ -96,8 +100,8 @@ worker_drop_distributed_table(PG_FUNCTION_ARGS) } else { - /* drop the table only */ - performDeletion(&distributedTableObject, DROP_RESTRICT, + /* drop the table with cascade since other tables may be referring to it */ + performDeletion(&distributedTableObject, DROP_CASCADE, PERFORM_DELETION_INTERNAL); } diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index c0c2a5edb..94efd573a 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -28,7 +28,6 @@ extern char * TableOwnerResetCommand(Oid distributedRelationId); extern char * NodeListInsertCommand(List *workerNodeList); extern List * ShardListInsertCommand(List *shardIntervalList); extern char * NodeDeleteCommand(uint32 nodeId); -extern List * GetDistributedTableDDLEvents(DistTableCacheEntry *cacheEntry); #define DELETE_ALL_NODES "TRUNCATE pg_dist_node" diff --git a/src/test/regress/expected/multi_metadata_snapshot.out b/src/test/regress/expected/multi_metadata_snapshot.out index 3e6954900..b8caa16c7 100644 --- a/src/test/regress/expected/multi_metadata_snapshot.out +++ b/src/test/regress/expected/multi_metadata_snapshot.out @@ -249,6 +249,50 @@ SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table': 1 (1 row) +-- Make sure that start_metadata_sync_to_node considers foreign key constraints +SET citus.shard_replication_factor TO 1; +CREATE SCHEMA mx_testing_schema_2; +CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE(col1, col3)); +CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text, + FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1 (col1, col3)); +SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1'); + create_distributed_table +-------------------------- + +(1 row) + +SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); + create_distributed_table +-------------------------- + +(1 row) + +UPDATE + pg_dist_partition SET repmodel='s' +WHERE + logicalrelid='mx_testing_schema.fk_test_1'::regclass + OR logicalrelid='mx_testing_schema_2.fk_test_2'::regclass; + +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +----------------------------- + +(1 row) + +-- Check that foreign key metadata exists on the worker +\c - - - :worker_1_port +\d mx_testing_schema_2.fk_test_2 +Table "mx_testing_schema_2.fk_test_2" + Column | Type | Modifiers +--------+---------+----------- + col1 | integer | + col2 | integer | + col3 | text | +Foreign-key constraints: + "fk_test_2_col1_fkey" FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) + +\c - - - :master_port +RESET citus.shard_replication_factor; -- Check that repeated calls to start_metadata_sync_to_node has no side effects \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); diff --git a/src/test/regress/sql/multi_metadata_snapshot.sql b/src/test/regress/sql/multi_metadata_snapshot.sql index d760c039c..91c091b18 100644 --- a/src/test/regress/sql/multi_metadata_snapshot.sql +++ b/src/test/regress/sql/multi_metadata_snapshot.sql @@ -83,6 +83,33 @@ SELECT * FROM pg_dist_colocation ORDER BY colocationid; -- Make sure that truncate trigger has been set for the MX table on worker SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; +-- Make sure that start_metadata_sync_to_node considers foreign key constraints +SET citus.shard_replication_factor TO 1; + +CREATE SCHEMA mx_testing_schema_2; + +CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE(col1, col3)); +CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text, + FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1 (col1, col3)); + +SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1'); +SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); + +UPDATE + pg_dist_partition SET repmodel='s' +WHERE + logicalrelid='mx_testing_schema.fk_test_1'::regclass + OR logicalrelid='mx_testing_schema_2.fk_test_2'::regclass; + +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + +-- Check that foreign key metadata exists on the worker +\c - - - :worker_1_port +\d mx_testing_schema_2.fk_test_2 +\c - - - :master_port + +RESET citus.shard_replication_factor; + -- Check that repeated calls to start_metadata_sync_to_node has no side effects \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port);