From f88cc230bf43d6f933a41e19880b81c6273f8dce Mon Sep 17 00:00:00 2001 From: Burak Velioglu Date: Thu, 27 Jan 2022 22:41:29 +0300 Subject: [PATCH 01/28] Handle tables and objects as metadata. Update UDFs accordingly With this commit we've started to propagate sequences and shell tables within the object dependency resolution. So, ensuring any dependencies for any object will consider shell tables and sequences as well. Separate logics for both shell tables and sequences have been removed. Since both shell tables and sequences logic were implemented as a part of the metadata handling before that logic, we were propagating them while syncing table metadata. With this commit we've divided metadata (which means anything except shards thereafter) syncing logic into multiple parts and implemented it either as a part of ActivateNode. You can check the functions called in ActivateNode to check definition of different metadata. Definitions of start_metadata_sync_to_node and citus_activate_node have also been updated. citus_activate_node will basically create an active node with all metadata and reference table shards. start_metadata_sync_to_node will be same with citus_activate_node except replicating reference tables. stop_metadata_sync_to_node will remove all the metadata. All of those UDFs need to be called by superuser. --- .../citus_add_local_table_to_metadata.c | 41 +- .../commands/create_distributed_table.c | 101 +--- .../distributed/commands/dependencies.c | 62 +- src/backend/distributed/commands/sequence.c | 76 +++ src/backend/distributed/commands/table.c | 29 +- src/backend/distributed/metadata/dependency.c | 40 +- .../distributed/metadata/metadata_sync.c | 556 +++++++----------- .../distributed/metadata/node_metadata.c | 332 +++++++++-- .../metadata/pg_get_object_address_12_13_14.c | 1 + .../distributed/operations/node_protocol.c | 28 +- .../distributed/operations/stage_protocol.c | 8 +- .../distributed/sql/citus--10.2-4--11.0-1.sql | 1 + .../sql/downgrades/citus--11.0-1--10.2-4.sql | 1 + .../udfs/worker_drop_shell_table/11.0-1.sql | 7 + .../udfs/worker_drop_shell_table/latest.sql | 7 + src/backend/distributed/test/metadata_sync.c | 64 +- src/backend/distributed/utils/maintenanced.c | 2 +- .../distributed/utils/reference_table_utils.c | 18 +- .../worker/worker_create_or_replace.c | 70 ++- .../worker/worker_data_fetch_protocol.c | 23 +- .../distributed/worker/worker_drop_protocol.c | 79 +++ src/include/distributed/commands.h | 3 + .../distributed/coordinator_protocol.h | 3 +- src/include/distributed/metadata_sync.h | 40 +- src/include/distributed/metadata_utility.h | 12 +- .../distributed/reference_table_utils.h | 2 +- .../distributed/worker_create_or_replace.h | 4 + src/include/distributed/worker_manager.h | 3 + .../expected/citus_local_tables_mx.out | 8 +- .../expected/distributed_functions.out | 14 - .../expected/failure_mx_metadata_sync.out | 87 +-- .../isolation_distributed_transaction_id.out | 5 +- ...lation_ensure_dependency_activate_node.out | 32 +- .../isolation_metadata_sync_deadlock.out | 139 ----- ...licate_reference_tables_to_coordinator.out | 17 +- .../expected/isolation_update_node.out | 2 +- .../regress/expected/local_shard_copy.out | 8 - .../local_shard_execution_dropped_column.out | 12 +- .../expected/master_copy_shard_placement.out | 2 + .../expected/multi_cluster_management.out | 49 +- .../expected/multi_colocation_utils.out | 35 +- src/test/regress/expected/multi_extension.out | 8 +- .../multi_fix_partition_shard_index_names.out | 26 +- .../regress/expected/multi_metadata_sync.out | 471 ++++++++++----- .../expected/multi_mx_add_coordinator.out | 24 +- .../regress/expected/multi_partitioning.out | 6 +- .../expected/multi_sequence_default.out | 174 +++--- .../multi_unsupported_worker_operations.out | 5 - .../non_super_user_object_metadata.out | 42 +- .../resync_metadata_with_sequences.out | 6 +- .../expected/start_stop_metadata_sync.out | 78 +-- src/test/regress/expected/turn_mx_on.out | 6 + src/test/regress/expected/turn_mx_on_0.out | 6 + src/test/regress/expected/turn_mx_on_1.out | 6 + .../expected/upgrade_list_citus_objects.out | 3 +- .../input/multi_alter_table_statements.source | 20 + src/test/regress/input/multi_copy.source | 12 +- src/test/regress/isolation_schedule | 2 +- .../multi_alter_table_statements.source | 40 ++ src/test/regress/output/multi_copy.source | 10 +- .../isolation_distributed_transaction_id.spec | 7 +- .../isolation_metadata_sync_deadlock.spec | 25 - ...icate_reference_tables_to_coordinator.spec | 2 +- .../regress/spec/isolation_update_node.spec | 2 + .../regress/sql/citus_local_tables_mx.sql | 4 +- .../regress/sql/distributed_functions.sql | 12 - .../regress/sql/failure_mx_metadata_sync.sql | 36 +- .../local_shard_execution_dropped_column.sql | 5 +- .../sql/master_copy_shard_placement.sql | 2 + .../regress/sql/multi_cluster_management.sql | 12 +- .../regress/sql/multi_colocation_utils.sql | 20 +- src/test/regress/sql/multi_extension.sql | 3 +- .../multi_fix_partition_shard_index_names.sql | 2 +- src/test/regress/sql/multi_metadata_sync.sql | 99 ++-- .../regress/sql/multi_mx_add_coordinator.sql | 11 +- src/test/regress/sql/multi_partitioning.sql | 2 +- .../regress/sql/multi_sequence_default.sql | 93 +-- .../multi_unsupported_worker_operations.sql | 1 - .../sql/non_super_user_object_metadata.sql | 16 +- .../sql/resync_metadata_with_sequences.sql | 2 +- .../regress/sql/start_stop_metadata_sync.sql | 17 +- src/test/regress/sql/turn_mx_on.sql | 1 + 82 files changed, 1832 insertions(+), 1510 deletions(-) create mode 100644 src/backend/distributed/sql/udfs/worker_drop_shell_table/11.0-1.sql create mode 100644 src/backend/distributed/sql/udfs/worker_drop_shell_table/latest.sql diff --git a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c index a73b06e38..f8c2a3042 100644 --- a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c +++ b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c @@ -30,6 +30,7 @@ #include "distributed/commands.h" #include "distributed/commands/sequence.h" #include "distributed/commands/utility_hook.h" +#include "distributed/metadata/distobject.h" #include "distributed/foreign_key_relationship.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" @@ -90,7 +91,7 @@ static void TransferSequenceOwnership(Oid ownedSequenceId, Oid targetRelationId, char *columnName); static void InsertMetadataForCitusLocalTable(Oid citusLocalTableId, uint64 shardId, bool autoConverted); -static void FinalizeCitusLocalTableCreation(Oid relationId, List *dependentSequenceList); +static void FinalizeCitusLocalTableCreation(Oid relationId); PG_FUNCTION_INFO_V1(citus_add_local_table_to_metadata); @@ -307,7 +308,13 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve ObjectAddressSet(tableAddress, RelationRelationId, relationId); /* - * Ensure dependencies first as we will create shell table on the other nodes + * Ensure that the sequences used in column defaults of the table + * have proper types + */ + EnsureRelationHasCompatibleSequenceTypes(relationId); + + /* + * Ensure dependencies exist as we will create shell table on the other nodes * in the MX case. */ EnsureDependenciesExistOnAllNodes(&tableAddress); @@ -354,18 +361,7 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve InsertMetadataForCitusLocalTable(shellRelationId, shardId, autoConverted); - /* - * Ensure that the sequences used in column defaults of the table - * have proper types - */ - List *attnumList = NIL; - List *dependentSequenceList = NIL; - GetDependentSequencesWithRelation(shellRelationId, &attnumList, - &dependentSequenceList, 0); - EnsureDistributedSequencesHaveOneType(shellRelationId, dependentSequenceList, - attnumList); - - FinalizeCitusLocalTableCreation(shellRelationId, dependentSequenceList); + FinalizeCitusLocalTableCreation(shellRelationId); } @@ -657,8 +653,10 @@ GetShellTableDDLEventsForCitusLocalTable(Oid relationId) */ IncludeSequenceDefaults includeSequenceDefaults = NEXTVAL_SEQUENCE_DEFAULTS; + bool creatingShellTableOnRemoteNode = false; List *tableDDLCommands = GetFullTableCreationCommands(relationId, - includeSequenceDefaults); + includeSequenceDefaults, + creatingShellTableOnRemoteNode); List *shellTableDDLEvents = NIL; TableDDLCommand *tableDDLCommand = NULL; @@ -1227,7 +1225,7 @@ InsertMetadataForCitusLocalTable(Oid citusLocalTableId, uint64 shardId, * sequences dependent with the table. */ static void -FinalizeCitusLocalTableCreation(Oid relationId, List *dependentSequenceList) +FinalizeCitusLocalTableCreation(Oid relationId) { /* * If it is a foreign table, then skip creating citus truncate trigger @@ -1240,16 +1238,7 @@ FinalizeCitusLocalTableCreation(Oid relationId, List *dependentSequenceList) if (ShouldSyncTableMetadata(relationId)) { - if (ClusterHasKnownMetadataWorkers()) - { - /* - * Ensure sequence dependencies and mark them as distributed - * before creating table metadata on workers - */ - MarkSequenceListDistributedAndPropagateWithDependencies(relationId, - dependentSequenceList); - } - CreateTableMetadataOnWorkers(relationId); + SyncCitusTableMetadata(relationId); } /* diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 870c78621..43bd1ebd6 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -113,8 +113,9 @@ static void EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMe static bool ShouldLocalTableBeEmpty(Oid relationId, char distributionMethod, bool viaDeprecatedAPI); static void EnsureCitusTableCanBeCreated(Oid relationOid); -static void EnsureSequenceExistOnMetadataWorkersForRelation(Oid relationId, - Oid sequenceOid); +static void EnsureDistributedSequencesHaveOneType(Oid relationId, + List *dependentSequenceList, + List *attnumList); static List * GetFKeyCreationCommandsRelationInvolvedWithTableType(Oid relationId, int tableTypeFlag); static Oid DropFKeysAndUndistributeTable(Oid relationId); @@ -432,6 +433,12 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio DropFKeysRelationInvolvedWithTableType(relationId, INCLUDE_LOCAL_TABLES); } + /* + * Ensure that the sequences used in column defaults of the table + * have proper types + */ + EnsureRelationHasCompatibleSequenceTypes(relationId); + /* * distributed tables might have dependencies on different objects, since we create * shards for a distributed table via multiple sessions these objects will be created @@ -446,7 +453,6 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio colocateWithTableName, viaDeprecatedAPI); - /* * Due to dropping columns, the parent's distribution key may not match the * partition's distribution key. The input distributionColumn belongs to @@ -494,16 +500,6 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio InsertIntoPgDistPartition(relationId, distributionMethod, distributionColumn, colocationId, replicationModel, autoConverted); - /* - * Ensure that the sequences used in column defaults of the table - * have proper types - */ - List *attnumList = NIL; - List *dependentSequenceList = NIL; - GetDependentSequencesWithRelation(relationId, &attnumList, &dependentSequenceList, 0); - EnsureDistributedSequencesHaveOneType(relationId, dependentSequenceList, - attnumList); - /* foreign tables do not support TRUNCATE trigger */ if (RegularTable(relationId)) { @@ -537,17 +533,7 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio if (ShouldSyncTableMetadata(relationId)) { - if (ClusterHasKnownMetadataWorkers()) - { - /* - * Ensure both sequence and its' dependencies and mark them as distributed - * before creating table metadata on workers - */ - MarkSequenceListDistributedAndPropagateWithDependencies(relationId, - dependentSequenceList); - } - - CreateTableMetadataOnWorkers(relationId); + SyncCitusTableMetadata(relationId); } /* @@ -605,11 +591,15 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio * If any other distributed table uses the input sequence, it checks whether * the types of the columns using the sequence match. If they don't, it errors out. * Otherwise, the condition is ensured. + * Since the owner of the sequence may not distributed yet, it should be added + * explicitly. */ void -EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId) +EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId, Oid ownerRelationId) { List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE); + citusTableIdList = list_append_unique_oid(citusTableIdList, ownerRelationId); + Oid citusTableId = InvalidOid; foreach_oid(citusTableId, citusTableIdList) { @@ -676,59 +666,18 @@ AlterSequenceType(Oid seqOid, Oid typeOid) /* - * MarkSequenceListDistributedAndPropagateWithDependencies ensures sequences and their - * dependencies for the given sequence list exist on all nodes and marks them as distributed. + * EnsureRelationHasCompatibleSequenceTypes ensures that sequences used for columns + * of the table have compatible types both with the column type on that table and + * all other distributed tables' columns they have used for */ void -MarkSequenceListDistributedAndPropagateWithDependencies(Oid relationId, - List *sequenceList) +EnsureRelationHasCompatibleSequenceTypes(Oid relationId) { - Oid sequenceOid = InvalidOid; - foreach_oid(sequenceOid, sequenceList) - { - MarkSequenceDistributedAndPropagateWithDependencies(relationId, sequenceOid); - } -} + List *attnumList = NIL; + List *dependentSequenceList = NIL; - -/* - * MarkSequenceDistributedAndPropagateWithDependencies ensures sequence and its' - * dependencies for the given sequence exist on all nodes and marks them as distributed. - */ -void -MarkSequenceDistributedAndPropagateWithDependencies(Oid relationId, Oid sequenceOid) -{ - /* get sequence address */ - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid); - EnsureDependenciesExistOnAllNodes(&sequenceAddress); - EnsureSequenceExistOnMetadataWorkersForRelation(relationId, sequenceOid); - MarkObjectDistributed(&sequenceAddress); -} - - -/* - * EnsureSequenceExistOnMetadataWorkersForRelation ensures sequence for the given relation - * exist on each worker node with metadata. - */ -static void -EnsureSequenceExistOnMetadataWorkersForRelation(Oid relationId, Oid sequenceOid) -{ - Assert(ShouldSyncTableMetadata(relationId)); - - char *ownerName = TableOwner(relationId); - List *sequenceDDLList = DDLCommandsForSequence(sequenceOid, ownerName); - - /* prevent recursive propagation */ - SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); - - const char *sequenceCommand = NULL; - foreach_ptr(sequenceCommand, sequenceDDLList) - { - SendCommandToWorkersWithMetadata(sequenceCommand); - } - - SendCommandToWorkersWithMetadata(ENABLE_DDL_PROPAGATION); + GetDependentSequencesWithRelation(relationId, &attnumList, &dependentSequenceList, 0); + EnsureDistributedSequencesHaveOneType(relationId, dependentSequenceList, attnumList); } @@ -737,7 +686,7 @@ EnsureSequenceExistOnMetadataWorkersForRelation(Oid relationId, Oid sequenceOid) * in which the sequence is used as default is supported for each sequence in input * dependentSequenceList, and then alters the sequence type if not the same with the column type. */ -void +static void EnsureDistributedSequencesHaveOneType(Oid relationId, List *dependentSequenceList, List *attnumList) { @@ -753,7 +702,7 @@ EnsureDistributedSequencesHaveOneType(Oid relationId, List *dependentSequenceLis * that sequence is supported */ Oid seqTypId = GetAttributeTypeOid(relationId, attnum); - EnsureSequenceTypeSupported(sequenceOid, seqTypId); + EnsureSequenceTypeSupported(sequenceOid, seqTypId, relationId); /* * Alter the sequence's data type in the coordinator if needed. diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index ed61ef71b..9d9839b5a 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -224,15 +224,47 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) { case OCLASS_CLASS: { + char relKind = get_rel_relkind(dependency->objectId); + /* * types have an intermediate dependency on a relation (aka class), so we do * support classes when the relkind is composite */ - if (get_rel_relkind(dependency->objectId) == RELKIND_COMPOSITE_TYPE) + if (relKind == RELKIND_COMPOSITE_TYPE) { return NIL; } + if (relKind == RELKIND_RELATION || relKind == RELKIND_PARTITIONED_TABLE || + relKind == RELKIND_FOREIGN_TABLE) + { + Oid relationId = dependency->objectId; + List *commandList = NIL; + + if (IsCitusTable(relationId)) + { + bool creatingShellTableOnRemoteNode = true; + List *tableDDLCommands = GetFullTableCreationCommands(relationId, + WORKER_NEXTVAL_SEQUENCE_DEFAULTS, + creatingShellTableOnRemoteNode); + TableDDLCommand *tableDDLCommand = NULL; + foreach_ptr(tableDDLCommand, tableDDLCommands) + { + Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); + commandList = lappend(commandList, GetTableDDLCommand( + tableDDLCommand)); + } + } + + return commandList; + } + + if (relKind == RELKIND_SEQUENCE) + { + char *sequenceOwnerName = TableOwner(dependency->objectId); + return DDLCommandsForSequence(dependency->objectId, sequenceOwnerName); + } + /* if this relation is not supported, break to the error at the end */ break; } @@ -316,14 +348,15 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) /* - * ReplicateAllDependenciesToNode replicate all previously marked objects to a worker - * node. The function also sets clusterHasDistributedFunction if there are any - * distributed functions. + * ReplicateAllObjectsToNodeCommandList returns commands to replicate all + * previously marked objects to a worker node. The function also sets + * clusterHasDistributedFunction if there are any distributed functions. */ -void -ReplicateAllDependenciesToNode(const char *nodeName, int nodePort) +List * +ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort) { - List *ddlCommands = NIL; + /* since we are executing ddl commands disable propagation first, primarily for mx */ + List *ddlCommands = list_make1(DISABLE_DDL_PROPAGATION); /* * collect all dependencies in creation order and get their ddl commands @@ -360,21 +393,10 @@ ReplicateAllDependenciesToNode(const char *nodeName, int nodePort) ddlCommands = list_concat(ddlCommands, GetDependencyCreateDDLCommands(dependency)); } - if (list_length(ddlCommands) <= 0) - { - /* no commands to replicate dependencies to the new worker */ - return; - } - /* since we are executing ddl commands lets disable propagation, primarily for mx */ - ddlCommands = list_concat(list_make1(DISABLE_DDL_PROPAGATION), ddlCommands); + ddlCommands = lappend(ddlCommands, ENABLE_DDL_PROPAGATION); - /* send commands to new workers, the current user should a superuser */ - Assert(superuser()); - SendMetadataCommandListToWorkerInCoordinatedTransaction(nodeName, - nodePort, - CurrentUserName(), - ddlCommands); + return ddlCommands; } diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index f1c2339a2..4aa04b2ef 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -11,6 +11,7 @@ #include "postgres.h" +#include "access/xact.h" #include "catalog/dependency.h" #include "catalog/namespace.h" #include "commands/defrem.h" @@ -23,6 +24,7 @@ #include "distributed/metadata/distobject.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" +#include "distributed/worker_create_or_replace.h" #include "nodes/parsenodes.h" #include "utils/builtins.h" #include "utils/lsyscache.h" @@ -656,3 +658,77 @@ PostprocessAlterSequenceOwnerStmt(Node *node, const char *queryString) return NIL; } + + +/* + * GenerateBackupNameForSequenceCollision generates a new sequence name for an existing + * sequence. The name is generated in such a way that the new name doesn't overlap with + * an existing relation by adding a suffix with incrementing number after the new name. + */ +char * +GenerateBackupNameForSequenceCollision(const ObjectAddress *address) +{ + char *newName = palloc0(NAMEDATALEN); + char suffix[NAMEDATALEN] = { 0 }; + int count = 0; + char *namespaceName = get_namespace_name(get_rel_namespace(address->objectId)); + Oid schemaId = get_namespace_oid(namespaceName, false); + + char *baseName = get_rel_name(address->objectId); + int baseLength = strlen(baseName); + + while (true) + { + int suffixLength = SafeSnprintf(suffix, NAMEDATALEN - 1, "(citus_backup_%d)", + count); + + /* trim the base name at the end to leave space for the suffix and trailing \0 */ + baseLength = Min(baseLength, NAMEDATALEN - suffixLength - 1); + + /* clear newName before copying the potentially trimmed baseName and suffix */ + memset(newName, 0, NAMEDATALEN); + strncpy_s(newName, NAMEDATALEN, baseName, baseLength); + strncpy_s(newName + baseLength, NAMEDATALEN - baseLength, suffix, + suffixLength); + + Oid newRelationId = get_relname_relid(newName, schemaId); + if (newRelationId == InvalidOid) + { + return newName; + } + + count++; + } +} + + +/* + * RenameExistingSequenceWithDifferentTypeIfExists renames the sequence's type if + * that sequence exists and the desired sequence type is different than it's type. + */ +void +RenameExistingSequenceWithDifferentTypeIfExists(RangeVar *sequence, Oid desiredSeqTypeId) +{ + Oid sequenceOid; + RangeVarGetAndCheckCreationNamespace(sequence, NoLock, &sequenceOid); + + if (OidIsValid(sequenceOid)) + { + Form_pg_sequence pgSequenceForm = pg_get_sequencedef(sequenceOid); + if (pgSequenceForm->seqtypid != desiredSeqTypeId) + { + ObjectAddress sequenceAddress = { 0 }; + ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid); + + char *newName = GenerateBackupNameForCollision(&sequenceAddress); + + RenameStmt *renameStmt = CreateRenameStatement(&sequenceAddress, newName); + const char *sqlRenameStmt = DeparseTreeNode((Node *) renameStmt); + ProcessUtilityParseTree((Node *) renameStmt, sqlRenameStmt, + PROCESS_UTILITY_QUERY, + NULL, None_Receiver, NULL); + + CommandCounterIncrement(); + } + } +} diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 26338570c..9236f0e1a 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -1955,6 +1955,12 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) return; } + /* + * Before ensuring each dependency exist, update dependent sequences + * types if necessary. + */ + EnsureRelationHasCompatibleSequenceTypes(relationId); + /* changing a relation could introduce new dependencies */ ObjectAddress tableAddress = { 0 }; ObjectAddressSet(tableAddress, RelationRelationId, relationId); @@ -2045,18 +2051,9 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) Oid seqOid = GetSequenceOid(relationId, attnum); if (seqOid != InvalidOid) { - EnsureDistributedSequencesHaveOneType(relationId, - list_make1_oid( - seqOid), - list_make1_int( - attnum)); - - if (ShouldSyncTableMetadata(relationId) && - ClusterHasKnownMetadataWorkers()) + if (ShouldSyncTableMetadata(relationId)) { needMetadataSyncForNewSequences = true; - MarkSequenceDistributedAndPropagateWithDependencies( - relationId, seqOid); alterTableDefaultNextvalCmd = GetAddColumnWithNextvalDefaultCmd(seqOid, relationId, @@ -2088,16 +2085,9 @@ PostprocessAlterTableStmt(AlterTableStmt *alterTableStatement) Oid seqOid = GetSequenceOid(relationId, attnum); if (seqOid != InvalidOid) { - EnsureDistributedSequencesHaveOneType(relationId, - list_make1_oid(seqOid), - list_make1_int(attnum)); - - if (ShouldSyncTableMetadata(relationId) && - ClusterHasKnownMetadataWorkers()) + if (ShouldSyncTableMetadata(relationId)) { needMetadataSyncForNewSequences = true; - MarkSequenceDistributedAndPropagateWithDependencies(relationId, - seqOid); alterTableDefaultNextvalCmd = GetAlterColumnWithNextvalDefaultCmd( seqOid, relationId, command->name); } @@ -2627,8 +2617,7 @@ ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) * We currently don't support adding a serial column for an MX table * TODO: record the dependency in the workers */ - if (ShouldSyncTableMetadata(relationId) && - ClusterHasKnownMetadataWorkers()) + if (ShouldSyncTableMetadata(relationId)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg( diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index 0118b6433..820cb848f 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -37,6 +37,7 @@ #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" #include "distributed/version_compat.h" #include "miscadmin.h" #include "utils/fmgroids.h" @@ -120,6 +121,7 @@ typedef struct ViewDependencyNode }ViewDependencyNode; +static List * GetRelationSequenceDependencyList(Oid relationId); static List * GetRelationTriggerFunctionDependencyList(Oid relationId); static List * GetRelationStatsSchemaDependencyList(Oid relationId); static DependencyDefinition * CreateObjectAddressDependencyDef(Oid classId, Oid objectId); @@ -671,11 +673,20 @@ SupportedDependencyByCitus(const ObjectAddress *address) case OCLASS_CLASS: { + char relKind = get_rel_relkind(address->objectId); + /* * composite types have a reference to a relation of composite type, we need * to follow those to get the dependencies of type fields. + * + * As we also handle tables as objects as well, follow dependencies + * for tables. */ - if (get_rel_relkind(address->objectId) == RELKIND_COMPOSITE_TYPE) + if (relKind == RELKIND_COMPOSITE_TYPE || + relKind == RELKIND_RELATION || + relKind == RELKIND_PARTITIONED_TABLE || + relKind == RELKIND_FOREIGN_TABLE || + relKind == RELKIND_SEQUENCE) { return true; } @@ -985,6 +996,15 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe List *statisticsSchemaDependencyList = GetRelationStatsSchemaDependencyList(relationId); result = list_concat(result, statisticsSchemaDependencyList); + + /* + * Get the dependent sequences for tables (both as serial columns and + * columns have nextval with existing sequences) and expand dependency list + * with them. + */ + List *sequenceDependencyList = GetRelationSequenceDependencyList(relationId); + + result = list_concat(result, sequenceDependencyList); } default: @@ -997,6 +1017,24 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe } +/* + * GetRelationSequenceDependencyList returns the sequence dependency definition + * list for the given relation. + */ +static List * +GetRelationSequenceDependencyList(Oid relationId) +{ + List *attnumList = NIL; + List *dependentSequenceList = NIL; + + GetDependentSequencesWithRelation(relationId, &attnumList, &dependentSequenceList, 0); + List *sequenceDependencyDefList = + CreateObjectAddressDependencyDefList(RelationRelationId, dependentSequenceList); + + return sequenceDependencyDefList; +} + + /* * GetRelationStatsSchemaDependencyList returns a list of DependencyDefinition * objects for the schemas that statistics' of the relation with relationId depends. diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 325853a1f..46f7cbc3a 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -83,20 +83,15 @@ char *EnableManualMetadataChangesForUser = ""; -static void EnsureSequentialModeMetadataOperations(void); -static List * DistributedObjectMetadataSyncCommandList(void); -static List * GetDistributedTableDDLEvents(Oid relationId); static void EnsureObjectMetadataIsSane(int distributionArgumentIndex, int colocationId); -static char * LocalGroupIdUpdateCommand(int32 groupId); -static List * SequenceDependencyCommandList(Oid relationId); -static char * TruncateTriggerCreateCommand(Oid relationId); static char * SchemaOwnerName(Oid objectId); static bool HasMetadataWorkers(void); -static List * DetachPartitionCommandList(void); +static void CreateShellTableOnWorkers(Oid relationId); +static void CreateTableMetadataOnWorkers(Oid relationId); static bool ShouldSyncTableMetadataInternal(bool hashDistributed, bool citusTableWithNoDistKey); -static bool SyncMetadataSnapshotToNode(WorkerNode *workerNode, bool raiseOnError); +static bool SyncNodeMetadataSnapshotToNode(WorkerNode *workerNode, bool raiseOnError); static void DropMetadataSnapshotOnNode(WorkerNode *workerNode); static char * CreateSequenceDependencyCommand(Oid relationId, Oid sequenceId, char *columnName); @@ -111,6 +106,7 @@ static RoleSpec * GetRoleSpecObjectForGrantStmt(Oid roleOid); static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid, AclItem *aclItem); static void SetLocalEnableDependencyCreation(bool state); +static void SetLocalReplicateReferenceTablesOnActivate(bool state); static char * GenerateSetRoleQuery(Oid roleOid); static void MetadataSyncSigTermHandler(SIGNAL_ARGS); static void MetadataSyncSigAlrmHandler(SIGNAL_ARGS); @@ -155,7 +151,7 @@ static bool got_SIGALRM = false; /* * start_metadata_sync_to_node function sets hasmetadata column of the given - * node to true, and then synchronizes the metadata on the node. + * node to true, and then activate node without replicating reference tables. */ Datum start_metadata_sync_to_node(PG_FUNCTION_ARGS) @@ -165,20 +161,29 @@ start_metadata_sync_to_node(PG_FUNCTION_ARGS) text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); + EnsureSuperUser(); + EnsureCoordinator(); + char *nodeNameString = text_to_cstring(nodeName); - StartMetadataSyncToNode(nodeNameString, nodePort); + bool prevReplicateRefTablesOnActivate = ReplicateReferenceTablesOnActivate; + SetLocalReplicateReferenceTablesOnActivate(false); + + ActivateNode(nodeNameString, nodePort); + TransactionModifiedNodeMetadata = true; + + SetLocalReplicateReferenceTablesOnActivate(prevReplicateRefTablesOnActivate); PG_RETURN_VOID(); } /* - * StartMetadataSyncToNode is the internal API for + * SyncNodeMetadataToNode is the internal API for * start_metadata_sync_to_node(). */ void -StartMetadataSyncToNode(const char *nodeNameString, int32 nodePort) +SyncNodeMetadataToNode(const char *nodeNameString, int32 nodePort) { char *escapedNodeName = quote_literal_cstr(nodeNameString); @@ -244,7 +249,29 @@ StartMetadataSyncToNode(const char *nodeNameString, int32 nodePort) /* fail if metadata synchronization doesn't succeed */ bool raiseInterrupts = true; - SyncMetadataSnapshotToNode(workerNode, raiseInterrupts); + SyncNodeMetadataSnapshotToNode(workerNode, raiseInterrupts); +} + + +/* + * SyncCitusTableMetadata syncs citus table metadata to worker nodes with metadata. + * Our definition of metadata includes the shell table and its inter relations with + * other shell tables, corresponding pg_dist_object, pg_dist_partiton, pg_dist_shard + * and pg_dist_shard placement entries. + */ +void +SyncCitusTableMetadata(Oid relationId) +{ + CreateShellTableOnWorkers(relationId); + CreateTableMetadataOnWorkers(relationId); + CreateInterTableRelationshipOfRelationOnWorkers(relationId); + + if (!IsTableOwnedByExtension(relationId)) + { + ObjectAddress relationAddress = { 0 }; + ObjectAddressSet(relationAddress, RelationRelationId, relationId); + MarkObjectDistributed(&relationAddress); + } } @@ -260,7 +287,7 @@ StartMetadataSyncToNode(const char *nodeNameString, int32 nodePort) * visible on all connections used by the transaction, meaning we can only use 1 * connection per node. */ -static void +void EnsureSequentialModeMetadataOperations(void) { if (!IsTransactionBlock()) @@ -302,6 +329,7 @@ stop_metadata_sync_to_node(PG_FUNCTION_ARGS) { CheckCitusVersion(ERROR); EnsureCoordinator(); + EnsureSuperUser(); text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); @@ -350,6 +378,8 @@ stop_metadata_sync_to_node(PG_FUNCTION_ARGS) workerNode = SetWorkerColumn(workerNode, Anum_pg_dist_node_metadatasynced, BoolGetDatum(false)); + TransactionModifiedNodeMetadata = true; + PG_RETURN_VOID(); } @@ -443,25 +473,25 @@ ShouldSyncTableMetadataInternal(bool hashDistributed, bool citusTableWithNoDistK /* - * SyncMetadataSnapshotToNode does the following: + * SyncNodeMetadataSnapshotToNode does the following: * 1. Sets the localGroupId on the worker so the worker knows which tuple in * pg_dist_node represents itself. - * 2. Recreates the distributed metadata on the given worker. + * 2. Recreates the node metadata on the given worker. * If raiseOnError is true, it errors out if synchronization fails. */ static bool -SyncMetadataSnapshotToNode(WorkerNode *workerNode, bool raiseOnError) +SyncNodeMetadataSnapshotToNode(WorkerNode *workerNode, bool raiseOnError) { char *currentUser = CurrentUserName(); /* generate and add the local group id's update query */ char *localGroupIdUpdateCommand = LocalGroupIdUpdateCommand(workerNode->groupId); - /* generate the queries which drop the metadata */ - List *dropMetadataCommandList = MetadataDropCommands(); + /* generate the queries which drop the node metadata */ + List *dropMetadataCommandList = NodeMetadataDropCommands(); - /* generate the queries which create the metadata from scratch */ - List *createMetadataCommandList = MetadataCreateCommands(); + /* generate the queries which create the node metadata from scratch */ + List *createMetadataCommandList = NodeMetadataCreateCommands(); List *recreateMetadataSnapshotCommandList = list_make1(localGroupIdUpdateCommand); recreateMetadataSnapshotCommandList = list_concat(recreateMetadataSnapshotCommandList, @@ -506,12 +536,28 @@ DropMetadataSnapshotOnNode(WorkerNode *workerNode) char *userName = CurrentUserName(); - /* generate the queries which drop the metadata */ - List *dropMetadataCommandList = MetadataDropCommands(); - + /* + * Detach partitions, break dependencies between sequences and table then + * remove shell tables first. + */ + List *dropMetadataCommandList = DetachPartitionCommandList(); + dropMetadataCommandList = lappend(dropMetadataCommandList, + BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND); + dropMetadataCommandList = lappend(dropMetadataCommandList, + REMOVE_ALL_SHELL_TABLES_COMMAND); + dropMetadataCommandList = list_concat(dropMetadataCommandList, + NodeMetadataDropCommands()); dropMetadataCommandList = lappend(dropMetadataCommandList, LocalGroupIdUpdateCommand(0)); + /* remove all dist table and object/table related metadata afterwards */ + dropMetadataCommandList = lappend(dropMetadataCommandList, DELETE_ALL_PARTITIONS); + dropMetadataCommandList = lappend(dropMetadataCommandList, DELETE_ALL_SHARDS); + dropMetadataCommandList = lappend(dropMetadataCommandList, DELETE_ALL_PLACEMENTS); + dropMetadataCommandList = lappend(dropMetadataCommandList, + DELETE_ALL_DISTRIBUTED_OBJECTS); + + Assert(superuser()); SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction( workerNode->workerName, workerNode->workerPort, @@ -521,28 +567,19 @@ DropMetadataSnapshotOnNode(WorkerNode *workerNode) /* - * MetadataCreateCommands returns list of queries that are + * NodeMetadataCreateCommands returns list of queries that are * required to create the current metadata snapshot of the node that the * function is called. The metadata snapshot commands includes the * following queries: * * (i) Query that populates pg_dist_node table - * (ii) Queries that create the clustered tables (including foreign keys, - * partitioning hierarchy etc.) - * (iii) Queries that populate pg_dist_partition table referenced by (ii) - * (iv) Queries that populate pg_dist_shard table referenced by (iii) - * (v) Queries that populate pg_dist_placement table referenced by (iv) - * (vi) Queries that populate pg_dist_object table */ List * -MetadataCreateCommands(void) +NodeMetadataCreateCommands(void) { List *metadataSnapshotCommandList = NIL; - List *distributedTableList = CitusTableList(); - List *propagatedTableList = NIL; bool includeNodesFromOtherClusters = true; List *workerNodeList = ReadDistNode(includeNodesFromOtherClusters); - IncludeSequenceDefaults includeSequenceDefaults = WORKER_NEXTVAL_SEQUENCE_DEFAULTS; /* make sure we have deterministic output for our tests */ workerNodeList = SortList(workerNodeList, CompareWorkerNodes); @@ -552,170 +589,6 @@ MetadataCreateCommands(void) metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, nodeListInsertCommand); - /* create the list of tables whose metadata will be created */ - CitusTableCacheEntry *cacheEntry = NULL; - foreach_ptr(cacheEntry, distributedTableList) - { - if (ShouldSyncTableMetadata(cacheEntry->relationId)) - { - propagatedTableList = lappend(propagatedTableList, cacheEntry); - } - } - - /* create the tables, but not the metadata */ - foreach_ptr(cacheEntry, propagatedTableList) - { - Oid relationId = cacheEntry->relationId; - ObjectAddress tableAddress = { 0 }; - - if (IsTableOwnedByExtension(relationId)) - { - /* skip table creation when the Citus table is owned by an extension */ - continue; - } - - List *ddlCommandList = GetFullTableCreationCommands(relationId, - includeSequenceDefaults); - char *tableOwnerResetCommand = TableOwnerResetCommand(relationId); - - /* - * Tables might have dependencies on different objects, since we create shards for - * table via multiple sessions these objects will be created via their own connection - * and committed immediately so they become visible to all sessions creating shards. - */ - ObjectAddressSet(tableAddress, RelationRelationId, relationId); - - /* - * Set object propagation to off as we will mark objects distributed - * at the end of this function. - */ - bool prevDependencyCreationValue = EnableDependencyCreation; - SetLocalEnableDependencyCreation(false); - - EnsureDependenciesExistOnAllNodes(&tableAddress); - - /* - * Ensure sequence dependencies and mark them as distributed - */ - List *attnumList = NIL; - List *dependentSequenceList = NIL; - GetDependentSequencesWithRelation(relationId, &attnumList, - &dependentSequenceList, 0); - - Oid sequenceOid = InvalidOid; - foreach_oid(sequenceOid, dependentSequenceList) - { - ObjectAddress sequenceAddress = { 0 }; - ObjectAddressSet(sequenceAddress, RelationRelationId, sequenceOid); - EnsureDependenciesExistOnAllNodes(&sequenceAddress); - - /* - * Sequences are not marked as distributed while creating table - * if no metadata worker node exists. We are marking all sequences - * distributed while syncing metadata in such case. - */ - MarkObjectDistributed(&sequenceAddress); - } - - SetLocalEnableDependencyCreation(prevDependencyCreationValue); - - List *workerSequenceDDLCommands = SequenceDDLCommandsForTable(relationId); - metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, - workerSequenceDDLCommands); - - /* ddlCommandList contains TableDDLCommand information, need to materialize */ - TableDDLCommand *tableDDLCommand = NULL; - foreach_ptr(tableDDLCommand, ddlCommandList) - { - Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); - metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, - GetTableDDLCommand(tableDDLCommand)); - } - - metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, - tableOwnerResetCommand); - - List *sequenceDependencyCommandList = SequenceDependencyCommandList( - relationId); - metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, - sequenceDependencyCommandList); - } - - /* construct the foreign key constraints after all tables are created */ - foreach_ptr(cacheEntry, propagatedTableList) - { - Oid relationId = cacheEntry->relationId; - - if (IsTableOwnedByExtension(relationId)) - { - /* skip foreign key creation when the Citus table is owned by an extension */ - continue; - } - - List *foreignConstraintCommands = - GetReferencingForeignConstaintCommands(relationId); - - metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, - foreignConstraintCommands); - } - - /* construct partitioning hierarchy after all tables are created */ - foreach_ptr(cacheEntry, propagatedTableList) - { - Oid relationId = cacheEntry->relationId; - - if (IsTableOwnedByExtension(relationId)) - { - /* skip partition creation when the Citus table is owned by an extension */ - continue; - } - - if (PartitionTable(relationId)) - { - char *alterTableAttachPartitionCommands = - GenerateAlterTableAttachPartitionCommand(relationId); - - metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, - alterTableAttachPartitionCommands); - } - } - - /* after all tables are created, create the metadata */ - foreach_ptr(cacheEntry, propagatedTableList) - { - Oid relationId = cacheEntry->relationId; - - /* add the table metadata command first*/ - char *metadataCommand = DistributionCreateCommand(cacheEntry); - metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, - metadataCommand); - - if (!IsForeignTable(relationId)) - { - /* add the truncate trigger command after the table became distributed */ - char *truncateTriggerCreateCommand = - TruncateTriggerCreateCommand(cacheEntry->relationId); - metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, - truncateTriggerCreateCommand); - } - - /* add the pg_dist_shard{,placement} entries */ - List *shardIntervalList = LoadShardIntervalList(relationId); - List *shardCreateCommandList = ShardListInsertCommand(shardIntervalList); - - metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, - shardCreateCommandList); - } - - /* As the last step, propagate the pg_dist_object entities */ - if (ShouldPropagate()) - { - List *distributedObjectSyncCommandList = - DistributedObjectMetadataSyncCommandList(); - metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, - distributedObjectSyncCommandList); - } - return metadataSnapshotCommandList; } @@ -724,7 +597,7 @@ MetadataCreateCommands(void) * DistributedObjectMetadataSyncCommandList returns the necessary commands to create * pg_dist_object entries on the new node. */ -static List * +List * DistributedObjectMetadataSyncCommandList(void) { HeapTuple pgDistObjectTup = NULL; @@ -829,118 +702,42 @@ DistributedObjectMetadataSyncCommandList(void) /* - * GetDistributedTableDDLEvents returns the full set of DDL commands necessary to - * create the given distributed table on a worker. The list includes setting up any - * sequences, setting the owner of the table, inserting table and shard metadata, - * setting the truncate trigger and foreign key constraints. + * CitusTableMetadataCreateCommandList returns the set of commands necessary to + * create the given distributed table metadata on a worker. */ -static List * -GetDistributedTableDDLEvents(Oid relationId) +List * +CitusTableMetadataCreateCommandList(Oid relationId) { CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry(relationId); List *commandList = NIL; - IncludeSequenceDefaults includeSequenceDefaults = WORKER_NEXTVAL_SEQUENCE_DEFAULTS; - - /* if the table is owned by an extension we only propagate pg_dist_* records */ - bool tableOwnedByExtension = IsTableOwnedByExtension(relationId); - if (!tableOwnedByExtension) - { - /* - * Commands to create the table, these commands are TableDDLCommands so lets - * materialize to the non-sharded version - */ - List *tableDDLCommands = GetFullTableCreationCommands(relationId, - includeSequenceDefaults); - TableDDLCommand *tableDDLCommand = NULL; - foreach_ptr(tableDDLCommand, tableDDLCommands) - { - Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); - commandList = lappend(commandList, GetTableDDLCommand(tableDDLCommand)); - } - - /* command to associate sequences with table */ - List *sequenceDependencyCommandList = SequenceDependencyCommandList( - relationId); - commandList = list_concat(commandList, sequenceDependencyCommandList); - } /* command to insert pg_dist_partition entry */ char *metadataCommand = DistributionCreateCommand(cacheEntry); commandList = lappend(commandList, metadataCommand); - /* commands to create the truncate trigger of the table */ - if (!IsForeignTable(relationId)) - { - char *truncateTriggerCreateCommand = TruncateTriggerCreateCommand(relationId); - commandList = lappend(commandList, truncateTriggerCreateCommand); - } - /* commands to insert pg_dist_shard & pg_dist_placement entries */ List *shardIntervalList = LoadShardIntervalList(relationId); List *shardMetadataInsertCommandList = ShardListInsertCommand(shardIntervalList); commandList = list_concat(commandList, shardMetadataInsertCommandList); - if (!tableOwnedByExtension) - { - /* commands to create foreign key constraints */ - List *foreignConstraintCommands = - GetReferencingForeignConstaintCommands(relationId); - commandList = list_concat(commandList, foreignConstraintCommands); - - /* commands to create partitioning hierarchy */ - if (PartitionTable(relationId)) - { - char *alterTableAttachPartitionCommands = - GenerateAlterTableAttachPartitionCommand(relationId); - commandList = lappend(commandList, alterTableAttachPartitionCommands); - } - } - return commandList; } /* - * MetadataDropCommands returns list of queries that are required to - * drop all the metadata of the node that are related to clustered tables. + * NodeMetadataDropCommands returns list of queries that are required to + * drop all the metadata of the node that are not related to clustered tables. * The drop metadata snapshot commands includes the following queries: * - * (i) Query to disable DDL propagation (necessary for (ii) - * (ii) Queries that DETACH all partitions of distributed tables - * (iii) Queries that delete all the rows from pg_dist_node table - * (iv) Queries that drop the clustered tables and remove its references from - * the pg_dist_partition. Note that distributed relation ids are gathered - * from the worker itself to prevent dropping any non-distributed tables - * with the same name. - * (v) Queries that delete all the rows from pg_dist_shard table referenced by (iv) - * (vi) Queries that delete all the rows from pg_dist_placement table - * referenced by (v) - * (vii) Queries that delete all the rows from pg_dist_object table + * (i) Queries that delete all the rows from pg_dist_node table */ List * -MetadataDropCommands(void) +NodeMetadataDropCommands(void) { List *dropSnapshotCommandList = NIL; - List *detachPartitionCommandList = DetachPartitionCommandList(); - - dropSnapshotCommandList = list_concat(dropSnapshotCommandList, - detachPartitionCommandList); - - /* - * We are re-creating the metadata, so not lose track of the - * sequences by preventing them dropped via DROP TABLE. - */ - dropSnapshotCommandList = - lappend(dropSnapshotCommandList, - BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND); - - dropSnapshotCommandList = lappend(dropSnapshotCommandList, - REMOVE_ALL_CITUS_TABLES_COMMAND); dropSnapshotCommandList = lappend(dropSnapshotCommandList, DELETE_ALL_NODES); - dropSnapshotCommandList = lappend(dropSnapshotCommandList, - DELETE_ALL_DISTRIBUTED_OBJECTS); return dropSnapshotCommandList; } @@ -1530,7 +1327,7 @@ PlacementUpsertCommand(uint64 shardId, uint64 placementId, int shardState, * LocalGroupIdUpdateCommand creates the SQL command required to set the local group id * of a worker and returns the command in a string. */ -static char * +char * LocalGroupIdUpdateCommand(int32 groupId) { StringInfo updateCommand = makeStringInfo(); @@ -1542,36 +1339,6 @@ LocalGroupIdUpdateCommand(int32 groupId) } -/* - * SequenceDDLCommandsForTable returns a list of commands which create sequences (and - * their schemas) to run on workers before creating the relation. The sequence creation - * commands are wrapped with a `worker_apply_sequence_command` call, which sets the - * sequence space uniquely for each worker. Notice that this function is relevant only - * during metadata propagation to workers and adds nothing to the list of sequence - * commands if none of the workers is marked as receiving metadata changes. - */ -List * -SequenceDDLCommandsForTable(Oid relationId) -{ - List *allSequencesDDLList = NIL; - - List *attnumList = NIL; - List *dependentSequenceList = NIL; - GetDependentSequencesWithRelation(relationId, &attnumList, &dependentSequenceList, 0); - - char *ownerName = TableOwner(relationId); - - Oid sequenceOid = InvalidOid; - foreach_oid(sequenceOid, dependentSequenceList) - { - List *sequenceDDLCommands = DDLCommandsForSequence(sequenceOid, ownerName); - allSequencesDDLList = list_concat(allSequencesDDLList, sequenceDDLCommands); - } - - return allSequencesDDLList; -} - - /* * DDLCommandsForSequence returns the DDL commands needs to be run to create the * sequence and alter the owner to the given owner name. @@ -1785,7 +1552,7 @@ GetSequencesFromAttrDef(Oid attrdefOid) * necessary to ensure that the sequence is dropped when the table is * dropped. */ -static List * +List * SequenceDependencyCommandList(Oid relationId) { List *sequenceCommandList = NIL; @@ -1815,7 +1582,8 @@ SequenceDependencyCommandList(Oid relationId) CreateSequenceDependencyCommand(relationId, sequenceId, columnName); sequenceCommandList = lappend(sequenceCommandList, - sequenceDependencyCommand); + makeTableDDLCommandString( + sequenceDependencyCommand)); } return sequenceCommandList; @@ -2090,6 +1858,20 @@ SetLocalEnableDependencyCreation(bool state) } +/* + * SetLocalReplicateReferenceTablesOnActivate sets the + * replicate_reference_tables_on_activate locally + */ +void +SetLocalReplicateReferenceTablesOnActivate(bool state) +{ + set_config_option("citus.replicate_reference_tables_on_activate", + state == true ? "on" : "off", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); +} + + static char * GenerateSetRoleQuery(Oid roleOid) { @@ -2104,7 +1886,7 @@ GenerateSetRoleQuery(Oid roleOid) * TruncateTriggerCreateCommand creates a SQL query calling worker_create_truncate_trigger * function, which creates the truncate trigger on the worker. */ -static char * +TableDDLCommand * TruncateTriggerCreateCommand(Oid relationId) { StringInfo triggerCreateCommand = makeStringInfo(); @@ -2114,7 +1896,10 @@ TruncateTriggerCreateCommand(Oid relationId) "SELECT worker_create_truncate_trigger(%s)", quote_literal_cstr(tableName)); - return triggerCreateCommand->data; + TableDDLCommand *triggerDDLCommand = makeTableDDLCommandString( + triggerCreateCommand->data); + + return triggerDDLCommand; } @@ -2168,16 +1953,101 @@ HasMetadataWorkers(void) /* - * CreateTableMetadataOnWorkers creates the list of commands needed to create the - * given distributed table and sends these commands to all metadata workers i.e. workers - * with hasmetadata=true. Before sending the commands, in order to prevent recursive - * propagation, DDL propagation on workers are disabled with a - * `SET citus.enable_ddl_propagation TO off;` command. + * CreateInterTableRelationshipOfRelationOnWorkers create inter table relationship + * for the the given relation id on each worker node with metadata. */ void +CreateInterTableRelationshipOfRelationOnWorkers(Oid relationId) +{ + /* if the table is owned by an extension we don't create */ + bool tableOwnedByExtension = IsTableOwnedByExtension(relationId); + if (tableOwnedByExtension) + { + return; + } + + List *commandList = + InterTableRelationshipOfRelationCommandList(relationId); + + /* prevent recursive propagation */ + SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + + const char *command = NULL; + foreach_ptr(command, commandList) + { + SendCommandToWorkersWithMetadata(command); + } +} + + +/* + * InterTableRelationshipOfRelationCommandList returns the command list to create + * inter table relationship for the given relation. + */ +List * +InterTableRelationshipOfRelationCommandList(Oid relationId) +{ + /* commands to create foreign key constraints */ + List *commandList = GetReferencingForeignConstaintCommands(relationId); + + /* commands to create partitioning hierarchy */ + if (PartitionTable(relationId)) + { + char *alterTableAttachPartitionCommands = + GenerateAlterTableAttachPartitionCommand(relationId); + commandList = lappend(commandList, alterTableAttachPartitionCommands); + } + + return commandList; +} + + +/* + * CreateShellTableOnWorkers creates the shell table on each worker node with metadata + * including sequence dependency and truncate triggers. + */ +static void +CreateShellTableOnWorkers(Oid relationId) +{ + if (IsTableOwnedByExtension(relationId)) + { + return; + } + + List *commandList = list_make1(DISABLE_DDL_PROPAGATION); + + IncludeSequenceDefaults includeSequenceDefaults = WORKER_NEXTVAL_SEQUENCE_DEFAULTS; + bool creatingShellTableOnRemoteNode = true; + List *tableDDLCommands = GetFullTableCreationCommands(relationId, + includeSequenceDefaults, + creatingShellTableOnRemoteNode); + + TableDDLCommand *tableDDLCommand = NULL; + foreach_ptr(tableDDLCommand, tableDDLCommands) + { + Assert(CitusIsA(tableDDLCommand, TableDDLCommand)); + commandList = lappend(commandList, GetTableDDLCommand(tableDDLCommand)); + } + + const char *command = NULL; + foreach_ptr(command, commandList) + { + SendCommandToWorkersWithMetadata(command); + } +} + + +/* + * CreateTableMetadataOnWorkers creates the list of commands needed to create the + * metadata of the given distributed table and sends these commands to all metadata + * workers i.e. workers with hasmetadata=true. Before sending the commands, in order + * to prevent recursive propagation, DDL propagation on workers are disabled with a + * `SET citus.enable_ddl_propagation TO off;` command. + */ +static void CreateTableMetadataOnWorkers(Oid relationId) { - List *commandList = GetDistributedTableDDLEvents(relationId); + List *commandList = CitusTableMetadataCreateCommandList(relationId); /* prevent recursive propagation */ SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); @@ -2200,7 +2070,7 @@ CreateTableMetadataOnWorkers(Oid relationId) * an extra step, if there are no partitions to DETACH, this function simply returns * empty list to not disable/enable DDL propagation for nothing. */ -static List * +List * DetachPartitionCommandList(void) { List *detachPartitionCommandList = NIL; @@ -2242,7 +2112,7 @@ DetachPartitionCommandList(void) /* - * SyncMetadataToNodes tries recreating the metadata snapshot in the + * SyncNodeMetadataToNodes tries recreating the metadata snapshot in the * metadata workers that are out of sync. Returns the result of * synchronization. * @@ -2250,13 +2120,13 @@ DetachPartitionCommandList(void) * since updates on the pg_dist_node metadata must be rollbacked if anything * goes wrong. */ -static MetadataSyncResult -SyncMetadataToNodes(void) +static NodeMetadataSyncResult +SyncNodeMetadataToNodes(void) { - MetadataSyncResult result = METADATA_SYNC_SUCCESS; + NodeMetadataSyncResult result = NODE_METADATA_SYNC_SUCCESS; if (!IsCoordinator()) { - return METADATA_SYNC_SUCCESS; + return NODE_METADATA_SYNC_SUCCESS; } /* @@ -2266,7 +2136,7 @@ SyncMetadataToNodes(void) */ if (!ConditionalLockRelationOid(DistNodeRelationId(), RowExclusiveLock)) { - return METADATA_SYNC_FAILED_LOCK; + return NODE_METADATA_SYNC_FAILED_LOCK; } List *syncedWorkerList = NIL; @@ -2277,12 +2147,12 @@ SyncMetadataToNodes(void) if (workerNode->hasMetadata && !workerNode->metadataSynced) { bool raiseInterrupts = false; - if (!SyncMetadataSnapshotToNode(workerNode, raiseInterrupts)) + if (!SyncNodeMetadataSnapshotToNode(workerNode, raiseInterrupts)) { ereport(WARNING, (errmsg("failed to sync metadata to %s:%d", workerNode->workerName, workerNode->workerPort))); - result = METADATA_SYNC_FAILED_SYNC; + result = NODE_METADATA_SYNC_FAILED_SYNC; } else { @@ -2303,7 +2173,7 @@ SyncMetadataToNodes(void) if (!nodeUpdated->metadataSynced) { /* set the result to FAILED to trigger the sync again */ - result = METADATA_SYNC_FAILED_SYNC; + result = NODE_METADATA_SYNC_FAILED_SYNC; } } @@ -2312,11 +2182,11 @@ SyncMetadataToNodes(void) /* - * SyncMetadataToNodesMain is the main function for syncing metadata to + * SyncNodeMetadataToNodesMain is the main function for syncing node metadata to * MX nodes. It retries until success and then exits. */ void -SyncMetadataToNodesMain(Datum main_arg) +SyncNodeMetadataToNodesMain(Datum main_arg) { Oid databaseOid = DatumGetObjectId(main_arg); @@ -2357,11 +2227,11 @@ SyncMetadataToNodesMain(Datum main_arg) { UseCoordinatedTransaction(); - MetadataSyncResult result = SyncMetadataToNodes(); - syncedAllNodes = (result == METADATA_SYNC_SUCCESS); + NodeMetadataSyncResult result = SyncNodeMetadataToNodes(); + syncedAllNodes = (result == NODE_METADATA_SYNC_SUCCESS); /* we use LISTEN/NOTIFY to wait for metadata syncing in tests */ - if (result != METADATA_SYNC_FAILED_LOCK) + if (result != NODE_METADATA_SYNC_FAILED_LOCK) { Async_Notify(METADATA_SYNC_CHANNEL, NULL); } @@ -2445,11 +2315,11 @@ MetadataSyncSigAlrmHandler(SIGNAL_ARGS) /* - * SpawnSyncMetadataToNodes starts a background worker which runs metadata + * SpawnSyncNodeMetadataToNodes starts a background worker which runs node metadata * sync. On success it returns workers' handle. Otherwise it returns NULL. */ BackgroundWorkerHandle * -SpawnSyncMetadataToNodes(Oid database, Oid extensionOwner) +SpawnSyncNodeMetadataToNodes(Oid database, Oid extensionOwner) { BackgroundWorker worker; BackgroundWorkerHandle *handle = NULL; @@ -2467,7 +2337,7 @@ SpawnSyncMetadataToNodes(Oid database, Oid extensionOwner) worker.bgw_restart_time = BGW_NEVER_RESTART; strcpy_s(worker.bgw_library_name, sizeof(worker.bgw_library_name), "citus"); strcpy_s(worker.bgw_function_name, sizeof(worker.bgw_library_name), - "SyncMetadataToNodesMain"); + "SyncNodeMetadataToNodesMain"); worker.bgw_main_arg = ObjectIdGetDatum(MyDatabaseId); memcpy_s(worker.bgw_extra, sizeof(worker.bgw_extra), &extensionOwner, sizeof(Oid)); diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index 63ac316a8..b5423a8a2 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -40,8 +40,10 @@ #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" +#include "distributed/multi_partitioning_utils.h" #include "distributed/shared_connection_stats.h" #include "distributed/string_utils.h" +#include "distributed/metadata/pg_dist_object.h" #include "distributed/transaction_recovery.h" #include "distributed/version_compat.h" #include "distributed/worker_manager.h" @@ -90,7 +92,6 @@ typedef struct NodeMetadata } NodeMetadata; /* local function forward declarations */ -static int ActivateNode(char *nodeName, int nodePort); static void RemoveNodeFromCluster(char *nodeName, int32 nodePort); static void ErrorIfNodeContainsNonRemovablePlacements(WorkerNode *workerNode); static bool PlacementHasActivePlacementOnAnotherGroup(GroupShardPlacement @@ -105,9 +106,12 @@ static void InsertPlaceholderCoordinatorRecord(void); static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, NodeMetadata *nodeMetadata); static void DeleteNodeRow(char *nodename, int32 nodeport); -static void SetUpDistributedTableDependencies(WorkerNode *workerNode); +static void SyncDistributedObjectsToNode(WorkerNode *workerNode); +static void UpdateLocalGroupIdOnNode(WorkerNode *workerNode); +static void SyncPgDistTableMetadataToNode(WorkerNode *workerNode); +static List * InterTableRelationshipCommandList(); static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple); -static void PropagateNodeWideObjects(WorkerNode *newWorkerNode); +static List * PropagateNodeWideObjectsCommandList(); static WorkerNode * ModifiableWorkerNode(const char *nodeName, int32 nodePort); static bool NodeIsLocal(WorkerNode *worker); static void SetLockTimeoutLocally(int32 lock_cooldown); @@ -573,54 +577,118 @@ master_set_node_property(PG_FUNCTION_ARGS) /* - * SetUpDistributedTableDependencies sets up up the following on a node if it's - * a primary node that currently stores data: - * - All dependencies (e.g., types, schemas) - * - Reference tables, because they are needed to handle queries efficiently. - * - Distributed functions + * InterTableRelationshipCommandList returns the command list to + * set up the multiple integrations including * - * Note that we do not create the distributed dependencies on the coordinator - * since all the dependencies should be present in the coordinator already. + * (i) Foreign keys + * (ii) Partionining hierarchy + * + * for each citus table. */ -static void -SetUpDistributedTableDependencies(WorkerNode *newWorkerNode) +static List * +InterTableRelationshipCommandList() { - if (NodeIsPrimary(newWorkerNode)) + List *distributedTableList = CitusTableList(); + List *propagatedTableList = NIL; + List *multipleTableIntegrationCommandList = NIL; + + CitusTableCacheEntry *cacheEntry = NULL; + foreach_ptr(cacheEntry, distributedTableList) { - EnsureNoModificationsHaveBeenDone(); - - if (ShouldPropagate() && !NodeIsCoordinator(newWorkerNode)) + /* + * Skip foreign key and partition creation when we shouldn't need to sync + * tablem metadata or the Citus table is owned by an extension. + */ + if (ShouldSyncTableMetadata(cacheEntry->relationId) && + !IsTableOwnedByExtension(cacheEntry->relationId)) { - PropagateNodeWideObjects(newWorkerNode); - ReplicateAllDependenciesToNode(newWorkerNode->workerName, - newWorkerNode->workerPort); - } - else if (!NodeIsCoordinator(newWorkerNode)) - { - ereport(WARNING, (errmsg("citus.enable_object_propagation is off, not " - "creating distributed objects on worker"), - errdetail("distributed objects are only kept in sync when " - "citus.enable_object_propagation is set to on. " - "Newly activated nodes will not get these " - "objects created"))); - } - - if (ReplicateReferenceTablesOnActivate) - { - ReplicateAllReferenceTablesToNode(newWorkerNode->workerName, - newWorkerNode->workerPort); + propagatedTableList = lappend(propagatedTableList, cacheEntry); } } + + foreach_ptr(cacheEntry, propagatedTableList) + { + Oid relationId = cacheEntry->relationId; + + List *commandListForRelation = + InterTableRelationshipOfRelationCommandList(relationId); + + multipleTableIntegrationCommandList = list_concat( + multipleTableIntegrationCommandList, + commandListForRelation); + } + + multipleTableIntegrationCommandList = lcons(DISABLE_DDL_PROPAGATION, + multipleTableIntegrationCommandList); + multipleTableIntegrationCommandList = lappend(multipleTableIntegrationCommandList, + ENABLE_DDL_PROPAGATION); + + return multipleTableIntegrationCommandList; } /* - * PropagateNodeWideObjects is called during node activation to propagate any object that - * should be propagated for every node. These are generally not linked to any distributed - * object but change system wide behaviour. + * PgDistTableMetadataSyncCommandList returns the command list to sync the pg_dist_* + * (except pg_dist_node) metadata. We call them as table metadata. */ -static void -PropagateNodeWideObjects(WorkerNode *newWorkerNode) +List * +PgDistTableMetadataSyncCommandList(void) +{ + List *distributedTableList = CitusTableList(); + List *propagatedTableList = NIL; + List *metadataSnapshotCommandList = NIL; + + /* create the list of tables whose metadata will be created */ + CitusTableCacheEntry *cacheEntry = NULL; + foreach_ptr(cacheEntry, distributedTableList) + { + if (ShouldSyncTableMetadata(cacheEntry->relationId)) + { + propagatedTableList = lappend(propagatedTableList, cacheEntry); + } + } + + /* remove all dist table and object related metadata first */ + metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, + DELETE_ALL_PARTITIONS); + metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, DELETE_ALL_SHARDS); + metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, + DELETE_ALL_PLACEMENTS); + metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, + DELETE_ALL_DISTRIBUTED_OBJECTS); + + /* create pg_dist_partition, pg_dist_shard and pg_dist_placement entries */ + foreach_ptr(cacheEntry, propagatedTableList) + { + List *tableMetadataCreateCommandList = + CitusTableMetadataCreateCommandList(cacheEntry->relationId); + + metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, + tableMetadataCreateCommandList); + } + + /* As the last step, propagate the pg_dist_object entities */ + Assert(ShouldPropagate()); + List *distributedObjectSyncCommandList = DistributedObjectMetadataSyncCommandList(); + metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, + distributedObjectSyncCommandList); + + metadataSnapshotCommandList = lcons(DISABLE_DDL_PROPAGATION, + metadataSnapshotCommandList); + metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, + ENABLE_DDL_PROPAGATION); + + return metadataSnapshotCommandList; +} + + +/* + * PropagateNodeWideObjectsCommandList is called during node activation to + * propagate any object that should be propagated for every node. These are + * generally not linked to any distributed object but change system wide behaviour. + */ +static List * +PropagateNodeWideObjectsCommandList() { /* collect all commands */ List *ddlCommands = NIL; @@ -640,13 +708,142 @@ PropagateNodeWideObjects(WorkerNode *newWorkerNode) /* if there are command wrap them in enable_ddl_propagation off */ ddlCommands = lcons(DISABLE_DDL_PROPAGATION, ddlCommands); ddlCommands = lappend(ddlCommands, ENABLE_DDL_PROPAGATION); + } + + return ddlCommands; +} + + +/* + * SyncDistributedObjectsCommandList returns commands to sync object dependencies + * to the given worker node. To be idempotent, it first drops the ones required to be + * dropped. + * + * Object dependencies include: + * + * - All dependencies (e.g., types, schemas, sequences) + * - All shell distributed tables + * - Inter relation between those shell tables + * - Node wide objects + * + * We also update the local group id here, as handling sequence dependencies + * requires it. + */ +List * +SyncDistributedObjectsCommandList(WorkerNode *workerNode) +{ + List *commandList = NIL; + + /* + * Propagate node wide objects. It includes only roles for now. + */ + commandList = list_concat(commandList, PropagateNodeWideObjectsCommandList()); + + /* + * Detach partitions, break dependencies between sequences and table then + * remove shell tables first. + */ + commandList = list_concat(commandList, DetachPartitionCommandList()); + commandList = lappend(commandList, BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND); + commandList = lappend(commandList, REMOVE_ALL_SHELL_TABLES_COMMAND); + + /* + * Replicate all objects of the pg_dist_object to the remote node. + */ + commandList = list_concat(commandList, ReplicateAllObjectsToNodeCommandList( + workerNode->workerName, workerNode->workerPort)); + + /* + * After creating each table, handle the inter table relationship between + * those tables. + */ + commandList = list_concat(commandList, InterTableRelationshipCommandList()); + + return commandList; +} + + +/* + * SyncDistributedObjectsToNode sync the distributed objects to the node. It includes + * - All dependencies (e.g., types, schemas, sequences) + * - All shell distributed table + * - Inter relation between those shell tables + * + * Note that we do not create the distributed dependencies on the coordinator + * since all the dependencies should be present in the coordinator already. + */ +static void +SyncDistributedObjectsToNode(WorkerNode *workerNode) +{ + if (NodeIsCoordinator(workerNode)) + { + /* coordinator has all the objects */ + return; + } + + if (!NodeIsPrimary(workerNode)) + { + /* secondary nodes gets the objects from their primaries via replication */ + return; + } + + EnsureNoModificationsHaveBeenDone(); + EnsureSequentialModeMetadataOperations(); + + Assert(ShouldPropagate()); + + List *commandList = SyncDistributedObjectsCommandList(workerNode); + + /* send commands to new workers, the current user should be a superuser */ + Assert(superuser()); + SendMetadataCommandListToWorkerInCoordinatedTransaction( + workerNode->workerName, + workerNode->workerPort, + CurrentUserName(), + commandList); +} + + +/* + * UpdateLocalGroupIdOnNode updates local group id on node. + */ +static void +UpdateLocalGroupIdOnNode(WorkerNode *workerNode) +{ + if (NodeIsPrimary(workerNode) && !NodeIsCoordinator(workerNode)) + { + List *commandList = list_make1(LocalGroupIdUpdateCommand(workerNode->groupId)); /* send commands to new workers, the current user should be a superuser */ Assert(superuser()); - SendMetadataCommandListToWorkerInCoordinatedTransaction(newWorkerNode->workerName, - newWorkerNode->workerPort, - CurrentUserName(), - ddlCommands); + SendMetadataCommandListToWorkerInCoordinatedTransaction( + workerNode->workerName, + workerNode->workerPort, + CurrentUserName(), + commandList); + } +} + + +/* + * SyncPgDistTableMetadataToNode syncs the pg_dist_partition, pg_dist_shard + * pg_dist_placement and pg_dist_object metadata entries. + * + */ +static void +SyncPgDistTableMetadataToNode(WorkerNode *workerNode) +{ + if (NodeIsPrimary(workerNode) && !NodeIsCoordinator(workerNode)) + { + List *syncPgDistMetadataCommandList = PgDistTableMetadataSyncCommandList(); + + /* send commands to new workers, the current user should be a superuser */ + Assert(superuser()); + SendMetadataCommandListToWorkerInCoordinatedTransaction( + workerNode->workerName, + workerNode->workerPort, + CurrentUserName(), + syncPgDistMetadataCommandList); } } @@ -847,7 +1044,7 @@ PrimaryNodeForGroup(int32 groupId, bool *groupContainsNodes) * includes only replicating the reference tables and setting isactive column of the * given node. */ -static int +int ActivateNode(char *nodeName, int nodePort) { bool isActive = true; @@ -897,10 +1094,11 @@ ActivateNode(char *nodeName, int nodePort) workerNode = SetWorkerColumnLocalOnly(workerNode, Anum_pg_dist_node_isactive, BoolGetDatum(isActive)); - bool syncMetadata = - EnableMetadataSyncByDefault && NodeIsPrimary(workerNode); - if (syncMetadata) + /* TODO: Once all tests will be enabled for MX, we can remove sync by default check */ + bool syncMetadata = EnableMetadataSyncByDefault && NodeIsPrimary(workerNode); + + if (syncMetadata && EnableDependencyCreation) { /* * We are going to sync the metadata anyway in this transaction, so do @@ -908,13 +1106,43 @@ ActivateNode(char *nodeName, int nodePort) */ SetWorkerColumn(workerNode, Anum_pg_dist_node_metadatasynced, BoolGetDatum(true)); - } - SetUpDistributedTableDependencies(workerNode); + /* + * Update local group id first, as object dependency logic requires to have + * updated local group id. + */ + UpdateLocalGroupIdOnNode(workerNode); - if (syncMetadata) - { - StartMetadataSyncToNode(nodeName, nodePort); + /* + * Sync distributed objects first. We must sync distributed objects before + * replicating reference tables to the remote node, as reference tables may + * need such objects. + */ + SyncDistributedObjectsToNode(workerNode); + + /* + * We need to replicate reference tables before syncing node metadata, otherwise + * reference table replication logic would try to get lock on the new node before + * having the shard placement on it + */ + if (ReplicateReferenceTablesOnActivate) + { + ReplicateAllReferenceTablesToNode(workerNode); + } + + /* + * Sync node metadata. We must sync node metadata before syncing table + * related pg_dist_xxx metadata. Since table related metadata requires + * to have right pg_dist_node entries. + */ + SyncNodeMetadataToNode(nodeName, nodePort); + + /* + * As the last step, sync the table related metadata to the remote node. + * We must handle it as the last step because of limitations shared with + * above comments. + */ + SyncPgDistTableMetadataToNode(workerNode); } /* finally, let all other active metadata nodes to learn about this change */ diff --git a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c index 4f22f977c..f2d66fb59 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c +++ b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c @@ -471,6 +471,7 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, break; } + case OBJECT_FOREIGN_TABLE: case OBJECT_TABLE: { /* table distribution already does the ownership check, so we can stick to that over acl_check */ diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index da9314143..0cb0a5ace 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -141,8 +141,10 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS) functionContext->multi_call_memory_ctx); /* allocate DDL statements, and then save position in DDL statements */ + bool creatingShellTableOnRemoteNode = false; List *tableDDLEventList = GetFullTableCreationCommands(relationId, - includeSequenceDefaults); + includeSequenceDefaults, + creatingShellTableOnRemoteNode); tableDDLEventCell = list_head(tableDDLEventList); ListCellAndListWrapper *wrapper = palloc0(sizeof(ListCellAndListWrapper)); wrapper->list = tableDDLEventList; @@ -458,8 +460,9 @@ ResolveRelationId(text *relationName, bool missingOk) * constraint and trigger definitions. */ List * -GetFullTableCreationCommands(Oid relationId, IncludeSequenceDefaults - includeSequenceDefaults) +GetFullTableCreationCommands(Oid relationId, + IncludeSequenceDefaults includeSequenceDefaults, + bool creatingShellTableOnRemoteNode) { List *tableDDLEventList = NIL; @@ -471,6 +474,25 @@ GetFullTableCreationCommands(Oid relationId, IncludeSequenceDefaults List *postLoadCreationCommandList = GetPostLoadTableCreationCommands(relationId, true, true); + if (creatingShellTableOnRemoteNode) + { + /* + * While creating shell tables, we need to associate dependencies between + * sequences and the relation. We also need to add truncate trigger for it + * if it is not the foreign table. + */ + List *sequenceDependencyCommandList = SequenceDependencyCommandList(relationId); + tableDDLEventList = list_concat(tableDDLEventList, sequenceDependencyCommandList); + + if (!IsForeignTable(relationId)) + { + TableDDLCommand *truncateTriggerCommand = TruncateTriggerCreateCommand( + relationId); + tableDDLEventList = lappend(tableDDLEventList, + truncateTriggerCommand); + } + } + tableDDLEventList = list_concat(tableDDLEventList, postLoadCreationCommandList); return tableDDLEventList; diff --git a/src/backend/distributed/operations/stage_protocol.c b/src/backend/distributed/operations/stage_protocol.c index 046fedf19..c473e7974 100644 --- a/src/backend/distributed/operations/stage_protocol.c +++ b/src/backend/distributed/operations/stage_protocol.c @@ -325,8 +325,10 @@ CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId, List *foreignConstraintCommandList = GetReferencingForeignConstaintCommands(relationId); IncludeSequenceDefaults includeSequenceDefaults = NO_SEQUENCE_DEFAULTS; + bool creatingShellTableOnRemoteNode = false; List *ddlCommandList = GetFullTableCreationCommands(relationId, - includeSequenceDefaults); + includeSequenceDefaults, + creatingShellTableOnRemoteNode); uint32 connectionFlag = FOR_DDL; char *relationOwner = TableOwner(relationId); @@ -438,8 +440,10 @@ CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements, bool useExclusiveConnection, bool colocatedShard) { IncludeSequenceDefaults includeSequenceDefaults = NO_SEQUENCE_DEFAULTS; + bool creatingShellTableOnRemoteNode = false; List *ddlCommandList = GetFullTableCreationCommands(distributedRelationId, - includeSequenceDefaults); + includeSequenceDefaults, + creatingShellTableOnRemoteNode); List *foreignConstraintCommandList = GetReferencingForeignConstaintCommands(distributedRelationId); diff --git a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql index 7f437570e..9c23b31c2 100644 --- a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql +++ b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql @@ -12,6 +12,7 @@ #include "udfs/citus_internal_add_object_metadata/11.0-1.sql" #include "udfs/citus_run_local_command/11.0-1.sql" #include "udfs/worker_drop_sequence_dependency/11.0-1.sql" +#include "udfs/worker_drop_shell_table/11.0-1.sql" DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text); diff --git a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql index cc8a6fcd1..6fbe07ea3 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql @@ -47,6 +47,7 @@ DROP FUNCTION pg_catalog.citus_check_cluster_node_health (); DROP FUNCTION pg_catalog.citus_internal_add_object_metadata(text, text[], text[], integer, integer, boolean); DROP FUNCTION pg_catalog.citus_run_local_command(text); DROP FUNCTION pg_catalog.worker_drop_sequence_dependency(text); +DROP FUNCTION pg_catalog.worker_drop_shell_table(table_name text); CREATE OR REPLACE VIEW pg_catalog.citus_shards_on_worker AS SELECT n.nspname as "Schema", diff --git a/src/backend/distributed/sql/udfs/worker_drop_shell_table/11.0-1.sql b/src/backend/distributed/sql/udfs/worker_drop_shell_table/11.0-1.sql new file mode 100644 index 000000000..92a99ccf4 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_drop_shell_table/11.0-1.sql @@ -0,0 +1,7 @@ +CREATE FUNCTION pg_catalog.worker_drop_shell_table(table_name text) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_drop_shell_table$$; + +COMMENT ON FUNCTION worker_drop_shell_table(table_name text) + IS 'drop the distributed table only without the metadata'; diff --git a/src/backend/distributed/sql/udfs/worker_drop_shell_table/latest.sql b/src/backend/distributed/sql/udfs/worker_drop_shell_table/latest.sql new file mode 100644 index 000000000..92a99ccf4 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_drop_shell_table/latest.sql @@ -0,0 +1,7 @@ +CREATE FUNCTION pg_catalog.worker_drop_shell_table(table_name text) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_drop_shell_table$$; + +COMMENT ON FUNCTION worker_drop_shell_table(table_name text) + IS 'drop the distributed table only without the metadata'; diff --git a/src/backend/distributed/test/metadata_sync.c b/src/backend/distributed/test/metadata_sync.c index 0c8622a83..ca00e7d6a 100644 --- a/src/backend/distributed/test/metadata_sync.c +++ b/src/backend/distributed/test/metadata_sync.c @@ -20,6 +20,7 @@ #include "distributed/maintenanced.h" #include "distributed/metadata_sync.h" #include "distributed/remote_commands.h" +#include "distributed/worker_manager.h" #include "postmaster/postmaster.h" #include "miscadmin.h" #include "storage/latch.h" @@ -28,45 +29,66 @@ /* declarations for dynamic loading */ -PG_FUNCTION_INFO_V1(master_metadata_snapshot); +PG_FUNCTION_INFO_V1(activate_node_snapshot); PG_FUNCTION_INFO_V1(wait_until_metadata_sync); PG_FUNCTION_INFO_V1(trigger_metadata_sync); PG_FUNCTION_INFO_V1(raise_error_in_metadata_sync); /* - * master_metadata_snapshot prints all the queries that are required - * to generate a metadata snapshot. + * activate_node_snapshot prints all the queries that are required + * to activate a node. */ Datum -master_metadata_snapshot(PG_FUNCTION_ARGS) +activate_node_snapshot(PG_FUNCTION_ARGS) { - List *dropSnapshotCommands = MetadataDropCommands(); - List *createSnapshotCommands = MetadataCreateCommands(); - List *snapshotCommandList = NIL; - int snapshotCommandIndex = 0; + /* + * Activate node commands are created using the given worker node, + * so we are using first primary worker node just for test purposes. + */ + WorkerNode *dummyWorkerNode = GetFirstPrimaryWorkerNode(); + + List *updateLocalGroupCommand = + list_make1(LocalGroupIdUpdateCommand(dummyWorkerNode->groupId)); + List *syncDistObjCommands = SyncDistributedObjectsCommandList(dummyWorkerNode); + List *dropSnapshotCommands = NodeMetadataDropCommands(); + List *createSnapshotCommands = NodeMetadataCreateCommands(); + List *pgDistTableMetadataSyncCommands = PgDistTableMetadataSyncCommandList(); + + List *activateNodeCommandList = NIL; + int activateNodeCommandIndex = 0; Oid ddlCommandTypeId = TEXTOID; - snapshotCommandList = list_concat(snapshotCommandList, dropSnapshotCommands); - snapshotCommandList = list_concat(snapshotCommandList, createSnapshotCommands); + activateNodeCommandList = list_concat(activateNodeCommandList, + updateLocalGroupCommand); + activateNodeCommandList = list_concat(activateNodeCommandList, syncDistObjCommands); + activateNodeCommandList = list_concat(activateNodeCommandList, dropSnapshotCommands); + activateNodeCommandList = list_concat(activateNodeCommandList, + createSnapshotCommands); + activateNodeCommandList = list_concat(activateNodeCommandList, + pgDistTableMetadataSyncCommands); - int snapshotCommandCount = list_length(snapshotCommandList); - Datum *snapshotCommandDatumArray = palloc0(snapshotCommandCount * sizeof(Datum)); + int activateNodeCommandCount = list_length(activateNodeCommandList); + Datum *activateNodeCommandDatumArray = palloc0(activateNodeCommandCount * + sizeof(Datum)); - const char *metadataSnapshotCommand = NULL; - foreach_ptr(metadataSnapshotCommand, snapshotCommandList) + const char *activateNodeSnapshotCommand = NULL; + foreach_ptr(activateNodeSnapshotCommand, activateNodeCommandList) { - Datum metadataSnapshotCommandDatum = CStringGetTextDatum(metadataSnapshotCommand); + Datum activateNodeSnapshotCommandDatum = CStringGetTextDatum( + activateNodeSnapshotCommand); - snapshotCommandDatumArray[snapshotCommandIndex] = metadataSnapshotCommandDatum; - snapshotCommandIndex++; + activateNodeCommandDatumArray[activateNodeCommandIndex] = + activateNodeSnapshotCommandDatum; + activateNodeCommandIndex++; } - ArrayType *snapshotCommandArrayType = DatumArrayToArrayType(snapshotCommandDatumArray, - snapshotCommandCount, - ddlCommandTypeId); + ArrayType *activateNodeCommandArrayType = DatumArrayToArrayType( + activateNodeCommandDatumArray, + activateNodeCommandCount, + ddlCommandTypeId); - PG_RETURN_ARRAYTYPE_P(snapshotCommandArrayType); + PG_RETURN_ARRAYTYPE_P(activateNodeCommandArrayType); } diff --git a/src/backend/distributed/utils/maintenanced.c b/src/backend/distributed/utils/maintenanced.c index 2295830d6..940764235 100644 --- a/src/backend/distributed/utils/maintenanced.c +++ b/src/backend/distributed/utils/maintenanced.c @@ -524,7 +524,7 @@ CitusMaintenanceDaemonMain(Datum main_arg) if (syncMetadata) { metadataSyncBgwHandle = - SpawnSyncMetadataToNodes(MyDatabaseId, myDbData->userOid); + SpawnSyncNodeMetadataToNodes(MyDatabaseId, myDbData->userOid); } nextMetadataSyncTime = diff --git a/src/backend/distributed/utils/reference_table_utils.c b/src/backend/distributed/utils/reference_table_utils.c index f2a170bc4..70f56119c 100644 --- a/src/backend/distributed/utils/reference_table_utils.c +++ b/src/backend/distributed/utils/reference_table_utils.c @@ -537,7 +537,7 @@ ReferenceTableReplicationFactor(void) * reference table to prevent unnecessary data transfer. */ void -ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort) +ReplicateAllReferenceTablesToNode(WorkerNode *workerNode) { List *referenceTableList = CitusTableTypeIdList(REFERENCE_TABLE); @@ -560,7 +560,9 @@ ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort) List *shardPlacementList = ShardPlacementListIncludingOrphanedPlacements(shardInterval->shardId); ShardPlacement *targetPlacement = - SearchShardPlacementInList(shardPlacementList, nodeName, nodePort); + SearchShardPlacementInList(shardPlacementList, + workerNode->workerName, + workerNode->workerPort); if (targetPlacement != NULL && targetPlacement->shardState == SHARD_STATE_ACTIVE) { @@ -585,7 +587,9 @@ ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort) LockShardDistributionMetadata(shardId, ExclusiveLock); - ReplicateReferenceTableShardToNode(shardInterval, nodeName, nodePort); + ReplicateReferenceTableShardToNode(shardInterval, + workerNode->workerName, + workerNode->workerPort); } /* create foreign constraints between reference tables */ @@ -595,9 +599,11 @@ ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort) /* send commands to new workers, the current user should be a superuser */ Assert(superuser()); - SendMetadataCommandListToWorkerInCoordinatedTransaction(nodeName, nodePort, - CurrentUserName(), - commandList); + SendMetadataCommandListToWorkerInCoordinatedTransaction( + workerNode->workerName, + workerNode->workerPort, + CurrentUserName(), + commandList); } } } diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index c067abc11..942cabba5 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -33,8 +33,6 @@ #include "distributed/worker_protocol.h" static const char * CreateStmtByObjectAddress(const ObjectAddress *address); -static RenameStmt * CreateRenameStatement(const ObjectAddress *address, char *newName); -static char * GenerateBackupNameForCollision(const ObjectAddress *address); PG_FUNCTION_INFO_V1(worker_create_or_replace_object); @@ -166,7 +164,7 @@ CreateStmtByObjectAddress(const ObjectAddress *address) * address. This name should be used when renaming an existing object before creating the * new object locally on the worker. */ -static char * +char * GenerateBackupNameForCollision(const ObjectAddress *address) { switch (getObjectClass(address)) @@ -186,13 +184,23 @@ GenerateBackupNameForCollision(const ObjectAddress *address) return GenerateBackupNameForTypeCollision(address); } + case OCLASS_CLASS: + { + char relKind = get_rel_relkind(address->objectId); + if (relKind == RELKIND_SEQUENCE) + { + return GenerateBackupNameForSequenceCollision(address); + } + } + default: { - ereport(ERROR, (errmsg("unsupported object to construct a rename statement"), - errdetail( - "unable to generate a backup name for the old type"))); + break; } } + + ereport(ERROR, (errmsg("unsupported object to construct a rename statement"), + errdetail("unable to generate a backup name for the old type"))); } @@ -243,6 +251,7 @@ CreateRenameTypeStmt(const ObjectAddress *address, char *newName) address->objectId)); stmt->newname = newName; + return stmt; } @@ -265,11 +274,43 @@ CreateRenameProcStmt(const ObjectAddress *address, char *newName) } +/* + * CreateRenameSequenceStmt creates a rename statement for a sequence based on its + * ObjectAddress. The rename statement will rename the existing object on its address + * to the value provided in newName. + */ +static RenameStmt * +CreateRenameSequenceStmt(const ObjectAddress *address, char *newName) +{ + RenameStmt *stmt = makeNode(RenameStmt); + Oid seqOid = address->objectId; + + HeapTuple seqClassTuple = SearchSysCache1(RELOID, seqOid); + if (!HeapTupleIsValid(seqClassTuple)) + { + ereport(ERROR, (errmsg("citus cache lookup error"))); + } + Form_pg_class seqClassForm = (Form_pg_class) GETSTRUCT(seqClassTuple); + + char *schemaName = get_namespace_name(seqClassForm->relnamespace); + char *seqName = NameStr(seqClassForm->relname); + List *name = list_make2(makeString(schemaName), makeString(seqName)); + ReleaseSysCache(seqClassTuple); + + stmt->renameType = OBJECT_SEQUENCE; + stmt->object = (Node *) name; + stmt->relation = makeRangeVar(schemaName, seqName, -1); + stmt->newname = newName; + + return stmt; +} + + /* * CreateRenameStatement creates a rename statement for an existing object to rename the * object to newName. */ -static RenameStmt * +RenameStmt * CreateRenameStatement(const ObjectAddress *address, char *newName) { switch (getObjectClass(address)) @@ -289,10 +330,21 @@ CreateRenameStatement(const ObjectAddress *address, char *newName) return CreateRenameTypeStmt(address, newName); } + case OCLASS_CLASS: + { + char relKind = get_rel_relkind(address->objectId); + if (relKind == RELKIND_SEQUENCE) + { + return CreateRenameSequenceStmt(address, newName); + } + } + default: { - ereport(ERROR, (errmsg("unsupported object to construct a rename statement"), - errdetail("unable to generate a parsetree for the rename"))); + break; } } + + ereport(ERROR, (errmsg("unsupported object to construct a rename statement"), + errdetail("unable to generate a parsetree for the rename"))); } diff --git a/src/backend/distributed/worker/worker_data_fetch_protocol.c b/src/backend/distributed/worker/worker_data_fetch_protocol.c index efdc2da85..8b55d5f93 100644 --- a/src/backend/distributed/worker/worker_data_fetch_protocol.c +++ b/src/backend/distributed/worker/worker_data_fetch_protocol.c @@ -28,13 +28,16 @@ #include "commands/extension.h" #include "commands/sequence.h" #include "distributed/citus_ruleutils.h" +#include "distributed/commands.h" #include "distributed/commands/multi_copy.h" #include "distributed/commands/utility_hook.h" #include "distributed/connection_management.h" #include "distributed/coordinator_protocol.h" +#include "distributed/deparser.h" #include "distributed/intermediate_results.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" #include "distributed/multi_client_executor.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_partitioning_utils.h" @@ -43,8 +46,10 @@ #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" +#include "distributed/worker_create_or_replace.h" #include "distributed/worker_protocol.h" #include "distributed/version_compat.h" +#include "executor/spi.h" #include "nodes/makefuncs.h" #include "parser/parse_relation.h" #include "storage/lmgr.h" @@ -461,19 +466,25 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS) " SEQUENCE command string"))); } + /* + * If sequence with the same name exist for different type, it must have been + * stayed on that node after a rollbacked create_distributed_table operation. + * We must change it's name first to create the sequence with the correct type. + */ + CreateSeqStmt *createSequenceStatement = (CreateSeqStmt *) commandNode; + RenameExistingSequenceWithDifferentTypeIfExists(createSequenceStatement->sequence, + sequenceTypeId); + /* run the CREATE SEQUENCE command */ ProcessUtilityParseTree(commandNode, commandString, PROCESS_UTILITY_QUERY, NULL, None_Receiver, NULL); CommandCounterIncrement(); - CreateSeqStmt *createSequenceStatement = (CreateSeqStmt *) commandNode; - - char *sequenceName = createSequenceStatement->sequence->relname; - char *sequenceSchema = createSequenceStatement->sequence->schemaname; - createSequenceStatement = (CreateSeqStmt *) commandNode; - Oid sequenceRelationId = RangeVarGetRelid(createSequenceStatement->sequence, AccessShareLock, false); + char *sequenceName = createSequenceStatement->sequence->relname; + char *sequenceSchema = createSequenceStatement->sequence->schemaname; + Assert(sequenceRelationId != InvalidOid); AlterSequenceMinMax(sequenceRelationId, sequenceSchema, sequenceName, sequenceTypeId); diff --git a/src/backend/distributed/worker/worker_drop_protocol.c b/src/backend/distributed/worker/worker_drop_protocol.c index 1c40838c6..f2ae3597f 100644 --- a/src/backend/distributed/worker/worker_drop_protocol.c +++ b/src/backend/distributed/worker/worker_drop_protocol.c @@ -34,6 +34,7 @@ #include "utils/fmgroids.h" PG_FUNCTION_INFO_V1(worker_drop_distributed_table); +PG_FUNCTION_INFO_V1(worker_drop_shell_table); PG_FUNCTION_INFO_V1(worker_drop_sequence_dependency); @@ -104,6 +105,8 @@ worker_drop_distributed_table(PG_FUNCTION_ARGS) UnmarkObjectDistributed(&ownedSequenceAddress); } + UnmarkObjectDistributed(&distributedTableObject); + if (!IsObjectAddressOwnedByExtension(&distributedTableObject, NULL)) { /* @@ -142,6 +145,82 @@ worker_drop_distributed_table(PG_FUNCTION_ARGS) } +/* + * worker_drop_shell_table drops the shell table of with the given distributed + * table without deleting related entries on pg_dist_placement, pg_dist_shard + * and pg_dist_placement. We've separated that logic since we handle object + * dependencies and table metadata separately while activating nodes. + */ +Datum +worker_drop_shell_table(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + text *relationName = PG_GETARG_TEXT_P(0); + Oid relationId = ResolveRelationId(relationName, true); + + if (!OidIsValid(relationId)) + { + ereport(NOTICE, (errmsg("relation %s does not exist, skipping", + text_to_cstring(relationName)))); + PG_RETURN_VOID(); + } + + EnsureTableOwner(relationId); + + if (GetLocalGroupId() == COORDINATOR_GROUP_ID) + { + ereport(ERROR, (errmsg("worker_drop_shell_table is only allowed to run" + " on worker nodes"))); + } + + /* first check the relation type */ + Relation distributedRelation = relation_open(relationId, AccessShareLock); + EnsureRelationKindSupported(relationId); + + /* close the relation since we do not need anymore */ + relation_close(distributedRelation, AccessShareLock); + + /* prepare distributedTableObject for dropping the table */ + ObjectAddress distributedTableObject = { InvalidOid, InvalidOid, 0 }; + distributedTableObject.classId = RelationRelationId; + distributedTableObject.objectId = relationId; + distributedTableObject.objectSubId = 0; + + if (IsObjectAddressOwnedByExtension(&distributedTableObject, NULL)) + { + PG_RETURN_VOID(); + } + + /* Drop dependent sequences from pg_dist_object */ + #if PG_VERSION_NUM >= PG_VERSION_13 + List *ownedSequences = getOwnedSequences(relationId); + #else + List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber); + #endif + + Oid ownedSequenceOid = InvalidOid; + foreach_oid(ownedSequenceOid, ownedSequences) + { + ObjectAddress ownedSequenceAddress = { 0 }; + ObjectAddressSet(ownedSequenceAddress, RelationRelationId, ownedSequenceOid); + UnmarkObjectDistributed(&ownedSequenceAddress); + } + + /* + * If the table is owned by an extension, we cannot drop it, nor should we + * until the user runs DROP EXTENSION. Therefore, we skip dropping the + * table and only delete the metadata. + * + * We drop the table with cascade since other tables may be referring to it. + */ + performDeletion(&distributedTableObject, DROP_CASCADE, + PERFORM_DELETION_INTERNAL); + + PG_RETURN_VOID(); +} + + /* * worker_drop_sequence_dependency is a UDF that removes the dependency * of all the sequences for the given table. diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 8d04ae4c4..41141ee8a 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -397,6 +397,9 @@ extern ObjectAddress AlterSequenceOwnerStmtObjectAddress(Node *node, bool missin extern ObjectAddress RenameSequenceStmtObjectAddress(Node *node, bool missing_ok); extern void ErrorIfUnsupportedSeqStmt(CreateSeqStmt *createSeqStmt); extern void ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt); +extern char * GenerateBackupNameForSequenceCollision(const ObjectAddress *address); +extern void RenameExistingSequenceWithDifferentTypeIfExists(RangeVar *sequence, + Oid desiredSeqTypeId); /* statistics.c - forward declarations */ extern List * PreprocessCreateStatisticsStmt(Node *node, const char *queryString, diff --git a/src/include/distributed/coordinator_protocol.h b/src/include/distributed/coordinator_protocol.h index cceb12c40..f0bda8515 100644 --- a/src/include/distributed/coordinator_protocol.h +++ b/src/include/distributed/coordinator_protocol.h @@ -224,7 +224,8 @@ extern uint64 GetNextShardId(void); extern uint64 GetNextPlacementId(void); extern Oid ResolveRelationId(text *relationName, bool missingOk); extern List * GetFullTableCreationCommands(Oid relationId, - IncludeSequenceDefaults includeSequenceDefaults); + IncludeSequenceDefaults includeSequenceDefaults, + bool creatingShellTableOnRemoteNode); extern List * GetPostLoadTableCreationCommands(Oid relationId, bool includeIndexes, bool includeReplicaIdentity); extern List * GetPreLoadTableCreationCommands(Oid relationId, IncludeSequenceDefaults diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index 20822fa17..2ea790cbf 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -13,6 +13,7 @@ #define METADATA_SYNC_H +#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "nodes/pg_list.h" @@ -22,18 +23,23 @@ extern int MetadataSyncRetryInterval; typedef enum { - METADATA_SYNC_SUCCESS = 0, - METADATA_SYNC_FAILED_LOCK = 1, - METADATA_SYNC_FAILED_SYNC = 2 -} MetadataSyncResult; + NODE_METADATA_SYNC_SUCCESS = 0, + NODE_METADATA_SYNC_FAILED_LOCK = 1, + NODE_METADATA_SYNC_FAILED_SYNC = 2 +} NodeMetadataSyncResult; /* Functions declarations for metadata syncing */ -extern void StartMetadataSyncToNode(const char *nodeNameString, int32 nodePort); +extern void SyncNodeMetadataToNode(const char *nodeNameString, int32 nodePort); +extern void SyncCitusTableMetadata(Oid relationId); +extern void EnsureSequentialModeMetadataOperations(void); extern bool ClusterHasKnownMetadataWorkers(void); +extern char * LocalGroupIdUpdateCommand(int32 groupId); extern bool ShouldSyncTableMetadata(Oid relationId); extern bool ShouldSyncTableMetadataViaCatalog(Oid relationId); -extern List * MetadataCreateCommands(void); -extern List * MetadataDropCommands(void); +extern List * NodeMetadataCreateCommands(void); +extern List * DistributedObjectMetadataSyncCommandList(void); +extern List * CitusTableMetadataCreateCommandList(Oid relationId); +extern List * NodeMetadataDropCommands(void); extern char * MarkObjectsDistributedCreateCommand(List *addresses, List *distributionArgumentIndexes, List *colocationIds, @@ -52,22 +58,30 @@ extern char * CreateSchemaDDLCommand(Oid schemaId); extern List * GrantOnSchemaDDLCommands(Oid schemaId); extern char * PlacementUpsertCommand(uint64 shardId, uint64 placementId, int shardState, uint64 shardLength, int32 groupId); -extern void CreateTableMetadataOnWorkers(Oid relationId); -extern BackgroundWorkerHandle * SpawnSyncMetadataToNodes(Oid database, Oid owner); -extern void SyncMetadataToNodesMain(Datum main_arg); +extern TableDDLCommand * TruncateTriggerCreateCommand(Oid relationId); +extern void CreateInterTableRelationshipOfRelationOnWorkers(Oid relationId); +extern List * InterTableRelationshipOfRelationCommandList(Oid relationId); +extern List * DetachPartitionCommandList(void); +extern BackgroundWorkerHandle * SpawnSyncNodeMetadataToNodes(Oid database, Oid owner); +extern void SyncNodeMetadataToNodesMain(Datum main_arg); extern void SignalMetadataSyncDaemon(Oid database, int sig); extern bool ShouldInitiateMetadataSync(bool *lockFailure); +extern List * SequenceDependencyCommandList(Oid relationId); extern List * DDLCommandsForSequence(Oid sequenceOid, char *ownerName); -extern List * SequenceDDLCommandsForTable(Oid relationId); extern List * GetSequencesFromAttrDef(Oid attrdefOid); extern void GetDependentSequencesWithRelation(Oid relationId, List **attnumList, List **dependentSequenceList, AttrNumber attnum); extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum); -#define DELETE_ALL_NODES "TRUNCATE pg_dist_node CASCADE" -#define DELETE_ALL_DISTRIBUTED_OBJECTS "TRUNCATE citus.pg_dist_object" +#define DELETE_ALL_NODES "DELETE FROM pg_dist_node" +#define DELETE_ALL_PLACEMENTS "DELETE FROM pg_dist_placement" +#define DELETE_ALL_SHARDS "DELETE FROM pg_dist_shard" +#define DELETE_ALL_DISTRIBUTED_OBJECTS "DELETE FROM citus.pg_dist_object" +#define DELETE_ALL_PARTITIONS "DELETE FROM pg_dist_partition" +#define REMOVE_ALL_SHELL_TABLES_COMMAND \ + "SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition" #define REMOVE_ALL_CITUS_TABLES_COMMAND \ "SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition" #define BREAK_CITUS_TABLE_SEQUENCE_DEPENDENCY_COMMAND \ diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index a5e78a2a4..d1db0f2fe 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -249,7 +249,7 @@ extern void EnsureDependenciesExistOnAllNodes(const ObjectAddress *target); extern List * GetDistributableDependenciesForObject(const ObjectAddress *target); extern bool ShouldPropagate(void); extern bool ShouldPropagateObject(const ObjectAddress *address); -extern void ReplicateAllDependenciesToNode(const char *nodeName, int nodePort); +extern List * ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort); /* Remaining metadata utility functions */ extern char * TableOwner(Oid relationId); @@ -286,13 +286,7 @@ extern bool GetNodeDiskSpaceStatsForConnection(MultiConnection *connection, uint64 *availableBytes, uint64 *totalBytes); extern void ExecuteQueryViaSPI(char *query, int SPIOK); -extern void EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId); +extern void EnsureSequenceTypeSupported(Oid seqOid, Oid seqTypId, Oid ownerRelationId); extern void AlterSequenceType(Oid seqOid, Oid typeOid); -extern void MarkSequenceListDistributedAndPropagateWithDependencies(Oid relationId, - List *sequenceList); -extern void MarkSequenceDistributedAndPropagateWithDependencies(Oid relationId, Oid - sequenceOid); -extern void EnsureDistributedSequencesHaveOneType(Oid relationId, - List *dependentSequenceList, - List *attnumList); +extern void EnsureRelationHasCompatibleSequenceTypes(Oid relationId); #endif /* METADATA_UTILITY_H */ diff --git a/src/include/distributed/reference_table_utils.h b/src/include/distributed/reference_table_utils.h index 323f8e355..e8969eec0 100644 --- a/src/include/distributed/reference_table_utils.h +++ b/src/include/distributed/reference_table_utils.h @@ -25,6 +25,6 @@ extern void DeleteAllReplicatedTablePlacementsFromNodeGroup(int32 groupId, bool localOnly); extern int CompareOids(const void *leftElement, const void *rightElement); extern int ReferenceTableReplicationFactor(void); -extern void ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort); +extern void ReplicateAllReferenceTablesToNode(WorkerNode *workerNode); #endif /* REFERENCE_TABLE_UTILS_H_ */ diff --git a/src/include/distributed/worker_create_or_replace.h b/src/include/distributed/worker_create_or_replace.h index 403efc5dc..60323d172 100644 --- a/src/include/distributed/worker_create_or_replace.h +++ b/src/include/distributed/worker_create_or_replace.h @@ -14,8 +14,12 @@ #ifndef WORKER_CREATE_OR_REPLACE_H #define WORKER_CREATE_OR_REPLACE_H +#include "catalog/objectaddress.h" + #define CREATE_OR_REPLACE_COMMAND "SELECT worker_create_or_replace_object(%s);" extern char * WrapCreateOrReplace(const char *sql); +extern char * GenerateBackupNameForCollision(const ObjectAddress *address); +extern RenameStmt * CreateRenameStatement(const ObjectAddress *address, char *newName); #endif /* WORKER_CREATE_OR_REPLACE_H */ diff --git a/src/include/distributed/worker_manager.h b/src/include/distributed/worker_manager.h index 82118f103..91d91a880 100644 --- a/src/include/distributed/worker_manager.h +++ b/src/include/distributed/worker_manager.h @@ -63,6 +63,7 @@ extern char *WorkerListFileName; extern char *CurrentCluster; extern bool ReplicateReferenceTablesOnActivate; +extern int ActivateNode(char *nodeName, int nodePort); /* Function declarations for finding worker nodes to place shards on */ extern WorkerNode * WorkerGetRandomCandidateNode(List *currentNodeList); @@ -103,6 +104,8 @@ extern WorkerNode * SetWorkerColumnLocalOnly(WorkerNode *workerNode, int columnI Datum value); extern uint32 CountPrimariesWithMetadata(void); extern WorkerNode * GetFirstPrimaryWorkerNode(void); +extern List * SyncDistributedObjectsCommandList(WorkerNode *workerNode); +extern List * PgDistTableMetadataSyncCommandList(void); /* Function declarations for worker node utilities */ extern int CompareWorkerNodes(const void *leftElement, const void *rightElement); diff --git a/src/test/regress/expected/citus_local_tables_mx.out b/src/test/regress/expected/citus_local_tables_mx.out index 254df3b73..8ea410829 100644 --- a/src/test/regress/expected/citus_local_tables_mx.out +++ b/src/test/regress/expected/citus_local_tables_mx.out @@ -43,12 +43,12 @@ FOR EACH ROW EXECUTE FUNCTION dummy_function(); NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1508000, 'citus_local_tables_mx', 'CREATE TRIGGER dummy_function_trigger BEFORE UPDATE OF value ON citus_local_table FOR EACH ROW EXECUTE FUNCTION dummy_function();') --- Show that we can sync metadata successfully. That means, we create +-- Show that we can activate node successfully. That means, we create -- the function that trigger needs in mx workers too. -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) CREATE EXTENSION seg; diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index 6b6dc5098..2ab760014 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -25,13 +25,6 @@ SELECT create_distributed_table('notices', 'id'); (1 row) INSERT INTO notices VALUES (1, 'hello world'); --- Create the necessary test utility function -CREATE OR REPLACE FUNCTION master_metadata_snapshot() - RETURNS text[] - LANGUAGE C STRICT - AS 'citus'; -COMMENT ON FUNCTION master_metadata_snapshot() - IS 'commands to create the metadata snapshot'; CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$ @@ -595,13 +588,6 @@ SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', dist (1 row) --- show that we are able to propagate objects with multiple item on address arrays -SELECT * FROM (SELECT unnest(master_metadata_snapshot()) as metadata_command order by 1) as innerResult WHERE metadata_command like '%distributed_object_data%'; - metadata_command ---------------------------------------------------------------------- - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('type', ARRAY['public.usage_access_type']::text[], ARRAY[]::text[], -1, 0, false), ('type', ARRAY['function_tests.dup_result']::text[], ARRAY[]::text[], -1, 0, false), ('function', ARRAY['public', 'usage_access_func']::text[], ARRAY['public.usage_access_type', 'integer[]']::text[], -1, 0, false), ('function', ARRAY['public', 'usage_access_func_third']::text[], ARRAY['integer', 'integer[]']::text[], 0, 50, false), ('function', ARRAY['function_tests', 'notice']::text[], ARRAY['pg_catalog.text']::text[], -1, 0, false), ('function', ARRAY['function_tests', 'dup']::text[], ARRAY['pg_catalog.macaddr']::text[], 0, 52, false), ('function', ARRAY['function_tests', 'eq_with_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], 0, 52, false), ('function', ARRAY['function_tests', 'eq_mi''xed_param_names']::text[], ARRAY['pg_catalog.macaddr', 'pg_catalog.macaddr']::text[], -1, 0, false), ('function', ARRAY['function_tests', 'agg_sfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0, false), ('function', ARRAY['function_tests', 'agg_invfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0, false), ('function', ARRAY['function_tests', 'agg_finalfunc']::text[], ARRAY['integer', 'integer']::text[], -1, 0, false), ('aggregate', ARRAY['function_tests', 'my_rank']::text[], ARRAY['pg_catalog."any"']::text[], -1, 0, false), ('function', ARRAY['function_tests', 'agg_names_sfunc']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0, false), ('function', ARRAY['function_tests', 'agg_names_finalfunc']::text[], ARRAY['function_tests.dup_result']::text[], -1, 0, false), ('aggregate', ARRAY['function_tests', 'agg_names']::text[], ARRAY['function_tests.dup_result', 'function_tests.dup_result']::text[], -1, 0, false), ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('server', ARRAY['fake_fdw_server']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['schema_colocation']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['function_tests']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['function_tests2']::text[], ARRAY[]::text[], -1, 0, false), ('extension', ARRAY['plpgsql']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; -(1 row) - -- valid distribution with distribution_arg_index SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)','$1'); create_distributed_function diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index f3856410a..7a74d91e5 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -42,7 +42,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()'); mitmproxy @@ -50,33 +50,33 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET grou (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: while executing command on localhost:xxxxx -- Failure to drop all tables in pg_dist_partition -SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_partition").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_partition").kill()'); mitmproxy --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: server closed the connection unexpectedly This probably means the server terminated abnormally before or while processing the request. CONTEXT: while executing command on localhost:xxxxx --- Failure to truncate pg_dist_node in the worker -SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')'); +-- Failure to delete pg_dist_node entries from the worker +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -84,7 +84,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").can SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").kill()'); mitmproxy --------------------------------------------------------------------- @@ -122,17 +122,17 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; f (1 row) --- Verify we can sync metadata after unsuccessful attempts +-- Verify we can activate node after unsuccessful attempts SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_2_proxy_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; @@ -224,8 +224,8 @@ CONTEXT: while executing command on localhost:xxxxx (1 row) --- Failure to drop all tables in pg_dist_partition -SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')'); +-- Failure to delete pg_dist_node entries from the worker +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -234,60 +234,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_tabl SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); NOTICE: dropping metadata on the node (localhost,9060) ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").kill()'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -NOTICE: dropping metadata on the node (localhost,9060) -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -CONTEXT: while executing command on localhost:xxxxx -WARNING: server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -connection not open -connection not open -connection not open -connection not open -CONTEXT: while executing command on localhost:xxxxx - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - --- Failure to truncate pg_dist_node in the worker -SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')'); - mitmproxy ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -NOTICE: dropping metadata on the node (localhost,9060) -ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").kill()'); mitmproxy --------------------------------------------------------------------- @@ -344,7 +291,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; f (1 row) --- Verify we can drop metadata after unsuccessful attempts +-- Verify we can stop metadata sync after unsuccessful attempts SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_distributed_transaction_id.out b/src/test/regress/expected/isolation_distributed_transaction_id.out index 3dd129ad8..1e41f8957 100644 --- a/src/test/regress/expected/isolation_distributed_transaction_id.out +++ b/src/test/regress/expected/isolation_distributed_transaction_id.out @@ -68,7 +68,7 @@ step s3-commit: COMMIT; -starting permutation: s1-create-table s1-begin s1-insert s1-verify-current-xact-is-on-worker s1-commit +starting permutation: s1-create-table s1-begin s1-insert s1-verify-current-xact-is-on-worker s1-drop-table s1-commit step s1-create-table: -- some tests also use distributed table CREATE TABLE distributed_transaction_id_table(some_value int, other_value int); @@ -104,6 +104,9 @@ nodeport|xact_exists 57638|t (2 rows) +step s1-drop-table: + DROP TABLE distributed_transaction_id_table; + step s1-commit: COMMIT; diff --git a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out index b137a56fb..d036468b7 100644 --- a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out +++ b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out @@ -118,7 +118,8 @@ pg_identify_object_as_address (database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) -(3 rows) +(table,"{public,t1}",{}) +(4 rows) count --------------------------------------------------------------------- @@ -310,7 +311,8 @@ pg_identify_object_as_address (database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) -(3 rows) +(table,"{public,t1}",{}) +(4 rows) count --------------------------------------------------------------------- @@ -502,7 +504,8 @@ pg_identify_object_as_address (database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) -(3 rows) +(table,"{public,t1}",{}) +(4 rows) count --------------------------------------------------------------------- @@ -690,7 +693,8 @@ pg_identify_object_as_address (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) -(4 rows) +(table,"{myschema,t1}",{}) +(5 rows) count --------------------------------------------------------------------- @@ -884,7 +888,8 @@ pg_identify_object_as_address (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) -(4 rows) +(table,"{myschema,t1}",{}) +(5 rows) count --------------------------------------------------------------------- @@ -1078,7 +1083,8 @@ pg_identify_object_as_address (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) -(4 rows) +(table,"{myschema,t1}",{}) +(5 rows) count --------------------------------------------------------------------- @@ -1272,7 +1278,8 @@ pg_identify_object_as_address (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) -(4 rows) +(table,"{myschema,t1}",{}) +(5 rows) count --------------------------------------------------------------------- @@ -1480,7 +1487,9 @@ pg_identify_object_as_address (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) -(4 rows) +(table,"{myschema,t1}",{}) +(table,"{myschema,t2}",{}) +(6 rows) count --------------------------------------------------------------------- @@ -1697,7 +1706,9 @@ pg_identify_object_as_address (schema,{myschema},{}) (schema,{myschema2},{}) (schema,{public},{}) -(5 rows) +(table,"{myschema,t1}",{}) +(table,"{myschema2,t2}",{}) +(7 rows) count --------------------------------------------------------------------- @@ -2251,8 +2262,9 @@ pg_identify_object_as_address (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) +(table,"{myschema,t1}",{}) (type,{myschema.tt1},{}) -(5 rows) +(6 rows) count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_metadata_sync_deadlock.out b/src/test/regress/expected/isolation_metadata_sync_deadlock.out index ebca8f427..2ee9a986a 100644 --- a/src/test/regress/expected/isolation_metadata_sync_deadlock.out +++ b/src/test/regress/expected/isolation_metadata_sync_deadlock.out @@ -122,142 +122,3 @@ restore_isolation_tester_func (1 row) - -starting permutation: increase-retry-interval reload-conf s2-start-session-level-connection s2-begin-on-worker s2-truncate-on-worker s3-invalidate-metadata s3-resync s3-wait s1-count-daemons s1-cancel-metadata-sync s1-count-daemons reset-retry-interval reload-conf s2-commit-on-worker s2-stop-connection s3-resync s3-wait -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step increase-retry-interval: - ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 20000; - -step reload-conf: - SELECT pg_reload_conf(); - -pg_reload_conf ---------------------------------------------------------------------- -t -(1 row) - -step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); - -start_session_level_connection_to_node ---------------------------------------------------------------------- - -(1 row) - -step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); - -run_commands_on_session_level_connection_to_node ---------------------------------------------------------------------- - -(1 row) - -step s2-truncate-on-worker: - SELECT run_commands_on_session_level_connection_to_node('TRUNCATE t2'); - -run_commands_on_session_level_connection_to_node ---------------------------------------------------------------------- - -(1 row) - -step s3-invalidate-metadata: - update pg_dist_node SET metadatasynced = false; - -step s3-resync: - SELECT trigger_metadata_sync(); - -trigger_metadata_sync ---------------------------------------------------------------------- - -(1 row) - -step s3-wait: - SELECT pg_sleep(2); - -pg_sleep ---------------------------------------------------------------------- - -(1 row) - -step s1-count-daemons: - SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; - -count ---------------------------------------------------------------------- - 1 -(1 row) - -step s1-cancel-metadata-sync: - SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; - SELECT pg_sleep(2); - -pg_cancel_backend ---------------------------------------------------------------------- -t -(1 row) - -pg_sleep ---------------------------------------------------------------------- - -(1 row) - -step s1-count-daemons: - SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; - -count ---------------------------------------------------------------------- - 0 -(1 row) - -step reset-retry-interval: - ALTER SYSTEM RESET citus.metadata_sync_retry_interval; - -step reload-conf: - SELECT pg_reload_conf(); - -pg_reload_conf ---------------------------------------------------------------------- -t -(1 row) - -step s2-commit-on-worker: - SELECT run_commands_on_session_level_connection_to_node('COMMIT'); - -run_commands_on_session_level_connection_to_node ---------------------------------------------------------------------- - -(1 row) - -step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); - -stop_session_level_connection_to_node ---------------------------------------------------------------------- - -(1 row) - -step s3-resync: - SELECT trigger_metadata_sync(); - -trigger_metadata_sync ---------------------------------------------------------------------- - -(1 row) - -step s3-wait: - SELECT pg_sleep(2); - -pg_sleep ---------------------------------------------------------------------- - -(1 row) - -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index c416e62d6..d28a6b714 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -90,10 +90,7 @@ query |query_hostname |query_hostport|distribute update ref_table set a = a + 1; |coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression - - update ref_table set a = a + 1; -|localhost | 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression -(2 rows) +(1 row) step s2-view-worker: SELECT query, query_hostname, query_hostport, distributed_query_host_name, @@ -106,11 +103,9 @@ step s2-view-worker: query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- -UPDATE public.ref_table_1500775 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression -UPDATE public.ref_table_1500775 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression -UPDATE public.ref_table_1500775 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression -UPDATE public.ref_table_1500775 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression -(4 rows) +UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression +UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression +(2 rows) step s2-end: END; @@ -146,12 +141,12 @@ step s2-active-transactions: count --------------------------------------------------------------------- - 2 + 1 (1 row) count --------------------------------------------------------------------- - 6 + 3 (1 row) step s1-end: diff --git a/src/test/regress/expected/isolation_update_node.out b/src/test/regress/expected/isolation_update_node.out index 69fdb8c0b..2658ca5ef 100644 --- a/src/test/regress/expected/isolation_update_node.out +++ b/src/test/regress/expected/isolation_update_node.out @@ -158,7 +158,7 @@ nodeid|groupid|nodename |nodeport master_run_on_worker --------------------------------------------------------------------- -(localhost,57638,t,"[{""f1"": 25, ""f2"": 25, ""f3"": ""localhost"", ""f4"": 58637}, {""f1"": 26, ""f2"": 26, ""f3"": ""localhost"", ""f4"": 57638}]") +(localhost,57638,t,"[{""f1"": 10, ""f2"": 10, ""f3"": ""localhost"", ""f4"": 57638}]") (1 row) nodeid|nodename|nodeport diff --git a/src/test/regress/expected/local_shard_copy.out b/src/test/regress/expected/local_shard_copy.out index 0e0998a9c..ec9828a04 100644 --- a/src/test/regress/expected/local_shard_copy.out +++ b/src/test/regress/expected/local_shard_copy.out @@ -52,17 +52,9 @@ SELECT create_distributed_table('collections_list', 'key'); CREATE TABLE collections_list_0 PARTITION OF collections_list (key, collection_id) FOR VALUES IN ( 0 ); -DEBUG: relation "collections_list_key_seq" already exists, skipping -DETAIL: from localhost:xxxxx -DEBUG: relation "collections_list_key_seq" already exists, skipping -DETAIL: from localhost:xxxxx CREATE TABLE collections_list_1 PARTITION OF collections_list (key, collection_id) FOR VALUES IN ( 1 ); -DEBUG: relation "collections_list_key_seq" already exists, skipping -DETAIL: from localhost:xxxxx -DEBUG: relation "collections_list_key_seq" already exists, skipping -DETAIL: from localhost:xxxxx -- connection worker and get ready for the tests \c - - - :worker_1_port SET search_path TO local_shard_copy; diff --git a/src/test/regress/expected/local_shard_execution_dropped_column.out b/src/test/regress/expected/local_shard_execution_dropped_column.out index be3ad6745..6a2fe1b0b 100644 --- a/src/test/regress/expected/local_shard_execution_dropped_column.out +++ b/src/test/regress/expected/local_shard_execution_dropped_column.out @@ -28,16 +28,16 @@ SELECT create_distributed_table('t1', 'c'); (1 row) ALTER TABLE t1 DROP COLUMN b; -SELECT start_metadata_sync_to_node('localhost',:worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost',:worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) -SELECT start_metadata_sync_to_node('localhost',:worker_2_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost',:worker_2_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) \c - - - :worker_1_port diff --git a/src/test/regress/expected/master_copy_shard_placement.out b/src/test/regress/expected/master_copy_shard_placement.out index 09fad3aa7..2e2f23e11 100644 --- a/src/test/regress/expected/master_copy_shard_placement.out +++ b/src/test/regress/expected/master_copy_shard_placement.out @@ -110,6 +110,8 @@ SELECT count(*) FROM history; -- test we can replicate MX tables SET citus.shard_replication_factor TO 1; -- metadata sync will succeed even if we have rep > 1 tables +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles_single_shard'::regclass::oid, 0); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index 5d3d9f26e..5a5317e74 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -1,20 +1,15 @@ SET citus.next_shard_id TO 1220000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1; -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- Tests functions related to cluster membership -- add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 @@ -59,8 +54,6 @@ SELECT master_get_active_worker_nodes(); -- try to disable a node with no placements see that node is s=removed SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 @@ -109,12 +102,10 @@ SELECT * FROM rebalance_table_shards(); -- insert stuff into pg_dist_colocation TRUNCATE pg_dist_colocation; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; -SELECT * FROM citus_activate_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created - citus_activate_node +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); + ?column? --------------------------------------------------------------------- - 3 + 1 (1 row) CREATE TABLE cluster_management_test (col_1 text, col_2 int); @@ -183,8 +174,6 @@ SELECT citus_disable_node('localhost.noexist', 2345); ERROR: node at "localhost.noexist:2345" does not exist -- drop the table without leaving a shard placement behind (messes up other tests) SELECT master_activate_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created master_activate_node --------------------------------------------------------------------- 3 @@ -192,7 +181,6 @@ DETAIL: distributed objects are only kept in sync when citus.enable_object_prop DROP TABLE test_reference_table, cluster_management_test; -- create users like this so results of community and enterprise are same -SET citus.enable_object_propagation TO ON; SET client_min_messages TO ERROR; CREATE USER non_super_user; CREATE USER node_metadata_user; @@ -204,7 +192,6 @@ SELECT 1 FROM run_command_on_workers('CREATE USER node_metadata_user'); (2 rows) RESET client_min_messages; -SET citus.enable_object_propagation TO OFF; GRANT EXECUTE ON FUNCTION master_activate_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_node(text,int,int,noderole,name) TO node_metadata_user; @@ -269,7 +256,6 @@ SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_ ERROR: permission denied for function master_update_node -- try to manipulate node metadata via privileged user SET ROLE node_metadata_user; -SET citus.enable_object_propagation TO off; -- prevent master activate node to actually connect for this test SELECT 1 FROM master_add_node('localhost', :worker_2_port); ERROR: operation is not allowed HINT: Run the command with a superuser. @@ -307,7 +293,6 @@ SELECT nodename, nodeport, noderole FROM pg_dist_node ORDER BY nodeport; ABORT; \c - postgres - :master_port SET citus.next_shard_id TO 1220000; -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; SELECT master_get_active_worker_nodes(); @@ -318,8 +303,6 @@ SELECT master_get_active_worker_nodes(); -- restore the node for next tests SELECT * FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created master_add_node --------------------------------------------------------------------- 7 @@ -496,8 +479,6 @@ SELECT * INTO old_placements FROM pg_dist_placement WHERE groupid = :worker_2_gr DELETE FROM pg_dist_placement WHERE groupid = :worker_2_group; SELECT master_add_node('localhost', :worker_2_port) AS new_node \gset WARNING: could not find any shard placements for shardId 1220001 -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created WARNING: could not find any shard placements for shardId 1220001 WARNING: could not find any shard placements for shardId 1220003 WARNING: could not find any shard placements for shardId 1220005 @@ -555,8 +536,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 @@ -583,7 +562,6 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep (0 rows) \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- check that added nodes are not propagated to nodes without metadata SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); NOTICE: dropping metadata on the node (localhost,57637) @@ -593,8 +571,6 @@ NOTICE: dropping metadata on the node (localhost,57637) (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 @@ -607,7 +583,6 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep (0 rows) \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- check that removing two nodes in the same transaction works SELECT master_remove_node('localhost', :worker_1_port), @@ -627,10 +602,6 @@ SELECT count(1) FROM pg_dist_node; SELECT master_add_node('localhost', :worker_1_port), master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created master_add_node | master_add_node --------------------------------------------------------------------- 11 | 12 @@ -652,8 +623,6 @@ SELECT master_remove_node('localhost', :worker_2_port); (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 @@ -679,8 +648,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 @@ -693,8 +660,6 @@ SELECT master_remove_node('localhost', :worker_2_port); (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 @@ -715,7 +680,6 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep (1 row) \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; master_remove_node --------------------------------------------------------------------- @@ -724,16 +688,12 @@ SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; (2 rows) SELECT 1 FROM master_add_node('localhost', :worker_1_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 @@ -749,8 +709,6 @@ SELECT master_remove_node('localhost', :worker_2_port); BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created ?column? --------------------------------------------------------------------- 1 @@ -793,7 +751,6 @@ DELETE FROM pg_dist_shard; DELETE FROM pg_dist_placement; DELETE FROM pg_dist_node; \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); NOTICE: dropping metadata on the node (localhost,57637) stop_metadata_sync_to_node diff --git a/src/test/regress/expected/multi_colocation_utils.out b/src/test/regress/expected/multi_colocation_utils.out index e93bb9a15..815908799 100644 --- a/src/test/regress/expected/multi_colocation_utils.out +++ b/src/test/regress/expected/multi_colocation_utils.out @@ -1,5 +1,7 @@ SET citus.next_shard_id TO 1300000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 4; +-- Delete orphaned entries from pg_dist_colocation +DELETE FROM pg_dist_colocation where colocationid = 5 or colocationid = 6; -- =================================================================== -- create test utility function -- =================================================================== @@ -968,31 +970,22 @@ SELECT update_distributed_table_colocation('table1_group_none', colocate_with => (1 row) --- sync metadata to get rid of inconsistencies in pg_dist tables -select stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node +-- activate nodes to get rid of inconsistencies in pg_dist tables +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table1_group1'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table2_group1'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table3_group2'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table4_group2'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table5_groupX'::regclass::oid, 0); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) -select stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); + ?column? --------------------------------------------------------------------- - -(1 row) - -select start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -select start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - + 1 (1 row) -- move a table with a colocation id which is already not in pg_dist_colocation diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index b3eb7baf8..7caceeeda 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -492,6 +492,7 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Test downgrade to 9.4-1 from 9.5-1 ALTER EXTENSION citus UPDATE TO '9.5-1'; BEGIN; + SET citus.enable_object_propagation TO on; SELECT master_add_node('localhost', :master_port, groupId=>0); NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata master_add_node @@ -507,6 +508,7 @@ NOTICE: create_citus_local_table is deprecated in favour of citus_add_local_tab (1 row) + RESET citus.enable_object_propagation; -- downgrade from 9.5-1 to 9.4-1 should fail as we have a citus local table ALTER EXTENSION citus UPDATE TO '9.4-1'; ERROR: citus local tables are introduced in Citus 9.5 @@ -1010,7 +1012,8 @@ SELECT * FROM multi_extension.print_extension_changes(); | function citus_shards_on_worker() SETOF record | function create_distributed_function(regprocedure,text,text,boolean) void | function worker_drop_sequence_dependency(text) void -(14 rows) + | function worker_drop_shell_table(text) void +(15 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version @@ -1260,10 +1263,7 @@ CREATE EXTENSION citus; \c - - - :worker_1_port CREATE EXTENSION citus; \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent distributed transactions during add node SELECT FROM master_add_node('localhost', :worker_1_port); -WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker -DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created -- (1 row) diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index 49ed3d1fc..98e5437a6 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -89,10 +89,10 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' A \c - - - :master_port -- this should work properly -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) \c - - - :worker_1_port @@ -670,14 +670,18 @@ NOTICE: issuing ALTER TABLE fix_idx_names.p2 OWNER TO postgres DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing ALTER TABLE fix_idx_names.p2 OWNER TO postgres DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370000, 's') -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370000, 's') -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_create_truncate_trigger('fix_idx_names.p2') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_create_truncate_trigger('fix_idx_names.p2') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370000, 's') +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370000, 's') +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; @@ -690,6 +694,14 @@ NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET search_path TO fix_idx_names,public; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET search_path TO fix_idx_names,public; diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 74c996c36..5680b73c3 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -26,30 +26,67 @@ ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000; SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset -- Create the necessary test utility function -CREATE FUNCTION master_metadata_snapshot() +CREATE FUNCTION activate_node_snapshot() RETURNS text[] LANGUAGE C STRICT AS 'citus'; -COMMENT ON FUNCTION master_metadata_snapshot() - IS 'commands to create the metadata snapshot'; +COMMENT ON FUNCTION activate_node_snapshot() + IS 'commands to activate node snapshot'; -- Show that none of the existing tables are qualified to be MX tables SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s'; logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- (0 rows) --- Show that, with no MX tables, metadata snapshot contains only the delete commands, --- pg_dist_node entries and reference tables -SELECT unnest(master_metadata_snapshot()) order by 1; - unnest +-- Since password_encryption default has been changed to sha from md5 with PG14 +-- we are updating it manually just for consistent test results between PG versions. +ALTER SYSTEM SET password_encryption TO md5; +SELECT pg_reload_conf(); + pg_reload_conf --------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword'; +-- Show that, with no MX tables, activate node snapshot contains only the delete commands, +-- pg_dist_node entries, pg_dist_object entries and roles. +SELECT unnest(activate_node_snapshot()) order by 1; + unnest +--------------------------------------------------------------------- + CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres + DELETE FROM citus.pg_dist_object + DELETE FROM pg_dist_node + DELETE FROM pg_dist_partition + DELETE FROM pg_dist_placement + DELETE FROM pg_dist_shard + GRANT CREATE ON SCHEMA public TO PUBLIC; + GRANT CREATE ON SCHEMA public TO postgres; + GRANT USAGE ON SCHEMA public TO PUBLIC; + GRANT USAGE ON SCHEMA public TO postgres; INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) + RESET ROLE + RESET ROLE + SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition - SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition - TRUNCATE citus.pg_dist_object - TRUNCATE pg_dist_node CASCADE + SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') + SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition + SET ROLE postgres + SET ROLE postgres + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + UPDATE pg_dist_local_group SET groupid = 1 WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; -(6 rows) +(27 rows) -- this function is dropped in Citus10, added here for tests CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, @@ -62,111 +99,164 @@ COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regcla distribution_column text, distribution_method citus.distribution_type) IS 'define the table distribution functions'; --- this function is dropped in Citus10, added here for tests -CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer, - replication_factor integer DEFAULT 2) - RETURNS void - AS 'citus', $$master_create_worker_shards$$ - LANGUAGE C STRICT; -- Create a test table with constraints and SERIAL and default from user defined sequence CREATE SEQUENCE user_defined_seq; CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq')); -SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash'); - master_create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT master_create_worker_shards('mx_test_table', 8, 1); - master_create_worker_shards +set citus.shard_count to 8; +set citus.shard_replication_factor to 1; +SELECT create_distributed_table('mx_test_table', 'col_1'); + create_distributed_table --------------------------------------------------------------------- (1 row) +reset citus.shard_count; +reset citus.shard_replication_factor; -- Set the replication model of the test table to streaming replication so that it is -- considered as an MX table UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass; --- Show that the created MX table is included in the metadata snapshot -SELECT unnest(master_metadata_snapshot()) order by 1; - unnest +-- Show that the created MX table is and its sequences are included in the activate node snapshot +SELECT unnest(activate_node_snapshot()) order by 1; + unnest --------------------------------------------------------------------- ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.user_defined_seq OWNER TO postgres ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE public.mx_test_table OWNER TO postgres - ALTER TABLE public.mx_test_table OWNER TO postgres + CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) + DELETE FROM citus.pg_dist_object + DELETE FROM pg_dist_node + DELETE FROM pg_dist_partition + DELETE FROM pg_dist_placement + DELETE FROM pg_dist_shard + GRANT CREATE ON SCHEMA public TO PUBLIC; + GRANT CREATE ON SCHEMA public TO postgres; + GRANT USAGE ON SCHEMA public TO PUBLIC; + GRANT USAGE ON SCHEMA public TO postgres; INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 0, 's') + RESET ROLE + RESET ROLE + SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') + SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') + SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('public.mx_test_table') - SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition - TRUNCATE citus.pg_dist_object - TRUNCATE pg_dist_node CASCADE - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition + SET ROLE postgres + SET ROLE postgres + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + UPDATE pg_dist_local_group SET groupid = 1 + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(19 rows) +(39 rows) --- Show that CREATE INDEX commands are included in the metadata snapshot +-- Show that CREATE INDEX commands are included in the activate node snapshot CREATE INDEX mx_index ON mx_test_table(col_2); -SELECT unnest(master_metadata_snapshot()) order by 1; - unnest +SELECT unnest(activate_node_snapshot()) order by 1; + unnest --------------------------------------------------------------------- ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.user_defined_seq OWNER TO postgres ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE public.mx_test_table OWNER TO postgres - ALTER TABLE public.mx_test_table OWNER TO postgres CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2) + CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) + DELETE FROM citus.pg_dist_object + DELETE FROM pg_dist_node + DELETE FROM pg_dist_partition + DELETE FROM pg_dist_placement + DELETE FROM pg_dist_shard + GRANT CREATE ON SCHEMA public TO PUBLIC; + GRANT CREATE ON SCHEMA public TO postgres; + GRANT USAGE ON SCHEMA public TO PUBLIC; + GRANT USAGE ON SCHEMA public TO postgres; INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 0, 's') + RESET ROLE + RESET ROLE + SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') + SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') + SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('public.mx_test_table') - SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition - TRUNCATE citus.pg_dist_object - TRUNCATE pg_dist_node CASCADE - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition + SET ROLE postgres + SET ROLE postgres + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + UPDATE pg_dist_local_group SET groupid = 1 + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(20 rows) +(40 rows) --- Show that schema changes are included in the metadata snapshot +-- Show that schema changes are included in the activate node snapshot CREATE SCHEMA mx_testing_schema; ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; -SELECT unnest(master_metadata_snapshot()) order by 1; - unnest +SELECT unnest(activate_node_snapshot()) order by 1; + unnest --------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.user_defined_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) + CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres + CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) + DELETE FROM citus.pg_dist_object + DELETE FROM pg_dist_node + DELETE FROM pg_dist_partition + DELETE FROM pg_dist_placement + DELETE FROM pg_dist_shard + GRANT CREATE ON SCHEMA public TO PUBLIC; + GRANT CREATE ON SCHEMA public TO postgres; + GRANT USAGE ON SCHEMA public TO PUBLIC; + GRANT USAGE ON SCHEMA public TO postgres; INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 0, 's') + RESET ROLE + RESET ROLE + SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') + SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') + SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition - TRUNCATE citus.pg_dist_object - TRUNCATE pg_dist_node CASCADE - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition + SET ROLE postgres + SET ROLE postgres + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + UPDATE pg_dist_local_group SET groupid = 1 + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(20 rows) +(41 rows) --- Show that append distributed tables are not included in the metadata snapshot +-- Show that append distributed tables are not included in the activate node snapshot CREATE TABLE non_mx_test_table (col_1 int, col_2 text); SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append'); master_create_distributed_table @@ -175,59 +265,101 @@ SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append'); (1 row) UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; -SELECT unnest(master_metadata_snapshot()) order by 1; - unnest +SELECT unnest(activate_node_snapshot()) order by 1; + unnest --------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.user_defined_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) + CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres + CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) + DELETE FROM citus.pg_dist_object + DELETE FROM pg_dist_node + DELETE FROM pg_dist_partition + DELETE FROM pg_dist_placement + DELETE FROM pg_dist_shard + GRANT CREATE ON SCHEMA public TO PUBLIC; + GRANT CREATE ON SCHEMA public TO postgres; + GRANT USAGE ON SCHEMA public TO PUBLIC; + GRANT USAGE ON SCHEMA public TO postgres; INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 0, 's') + RESET ROLE + RESET ROLE + SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') + SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') + SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition - TRUNCATE citus.pg_dist_object - TRUNCATE pg_dist_node CASCADE - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition + SET ROLE postgres + SET ROLE postgres + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + UPDATE pg_dist_local_group SET groupid = 1 + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(20 rows) +(41 rows) --- Show that range distributed tables are not included in the metadata snapshot +-- Show that range distributed tables are not included in the activate node snapshot UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; -SELECT unnest(master_metadata_snapshot()) order by 1; - unnest +SELECT unnest(activate_node_snapshot()) order by 1; + unnest --------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.user_defined_seq OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) + CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres + CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) + DELETE FROM citus.pg_dist_object + DELETE FROM pg_dist_node + DELETE FROM pg_dist_partition + DELETE FROM pg_dist_placement + DELETE FROM pg_dist_shard + GRANT CREATE ON SCHEMA public TO PUBLIC; + GRANT CREATE ON SCHEMA public TO postgres; + GRANT USAGE ON SCHEMA public TO PUBLIC; + GRANT USAGE ON SCHEMA public TO postgres; INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE),(2, 2, 'localhost', 57638, 'default', FALSE, FALSE, TRUE, 'primary'::noderole, 'default', TRUE) - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 0, 's') + RESET ROLE + RESET ROLE + SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') + SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') + SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') - SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition - TRUNCATE citus.pg_dist_object - TRUNCATE pg_dist_node CASCADE - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition + SET ROLE postgres + SET ROLE postgres + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + UPDATE pg_dist_local_group SET groupid = 1 + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 2, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 2, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 2, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(20 rows) +(41 rows) --- Test start_metadata_sync_to_node UDF +-- Test start_metadata_sync_to_node and citus_activate_node UDFs -- Ensure that hasmetadata=false for all nodes SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; count @@ -235,7 +367,7 @@ SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; 0 (1 row) --- Ensure it works when run on a secondary node +-- Show that metadata can not be synced on secondary node SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); master_add_node @@ -252,7 +384,7 @@ SELECT start_metadata_sync_to_node('localhost', 8888); SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; hasmetadata --------------------------------------------------------------------- - t + f (1 row) SELECT stop_metadata_sync_to_node('localhost', 8888); @@ -275,11 +407,12 @@ SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, 5 (1 row) --- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +\c - - - :master_port +-- Run start_metadata_sync_to_node and citus_activate_node and check that it marked hasmetadata for that worker +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; @@ -306,9 +439,9 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; (4 rows) SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted + logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- - mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s | f + mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 2 | s | f (1 row) SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; @@ -373,7 +506,7 @@ SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table': 1 (1 row) --- Make sure that start_metadata_sync_to_node considers foreign key constraints +-- Make sure that citus_activate_node considers foreign key constraints \c - - - :master_port -- Since we're superuser, we can set the replication model to 'streaming' to -- create some MX tables @@ -394,10 +527,10 @@ SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) -- Check that foreign key metadata exists on the worker @@ -412,18 +545,18 @@ SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schem DROP TABLE mx_testing_schema_2.fk_test_2; DROP TABLE mx_testing_schema.fk_test_1; RESET citus.shard_replication_factor; --- Check that repeated calls to start_metadata_sync_to_node has no side effects +-- Check that repeated calls to citus_activate_node has no side effects \c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) \c - - - :worker_1_port @@ -443,9 +576,9 @@ SELECT * FROM pg_dist_node ORDER BY nodeid; (4 rows) SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted + logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- - mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s | f + mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 2 | s | f (1 row) SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid; @@ -503,13 +636,13 @@ SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table': 1 (1 row) --- Make sure that start_metadata_sync_to_node can be called inside a transaction and rollbacked +-- Make sure that citus_activate_node can be called inside a transaction and rollbacked \c - - - :master_port BEGIN; -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) ROLLBACK; @@ -522,10 +655,10 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; -- Check that the distributed table can be queried from the worker \c - - - :master_port SET citus.shard_replication_factor TO 1; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) CREATE TABLE mx_query_test (a int, b text, c int); @@ -996,10 +1129,10 @@ SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) DROP TABLE mx_table_with_small_sequence; @@ -1105,10 +1238,10 @@ SELECT nextval('mx_table_with_sequence_c_seq'); -- Check that adding a new metadata node sets the sequence space correctly \c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) \c - - - :worker_2_port @@ -1224,7 +1357,7 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; -- Check that MX sequences play well with non-super users \c - - - :master_port -- Remove a node so that shards and sequences won't be created on table creation. Therefore, --- we can test that start_metadata_sync_to_node can actually create the sequence with proper +-- we can test that citus_activate_node can actually create the sequence with proper -- owner CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement; CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition; @@ -1268,12 +1401,6 @@ SELECT master_add_node('localhost', :worker_2_port); 6 (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - \c - mx_user - :worker_1_port SELECT nextval('mx_table_b_seq'); nextval @@ -1674,12 +1801,6 @@ SELECT master_add_node('localhost', :worker_2_port); 7 (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - CREATE SEQUENCE mx_test_sequence_0; CREATE SEQUENCE mx_test_sequence_1; -- test create_distributed_table @@ -1697,8 +1818,8 @@ ERROR: cannot execute ALTER TABLE command involving partition column ALTER TABLE test_table ADD COLUMN id2 int DEFAULT nextval('mx_test_sequence_1'); ALTER TABLE test_table ALTER COLUMN id2 DROP DEFAULT; ALTER TABLE test_table ALTER COLUMN id2 SET DEFAULT nextval('mx_test_sequence_1'); -SELECT unnest(master_metadata_snapshot()) order by 1; - unnest +SELECT unnest(activate_node_snapshot()) order by 1; + unnest --------------------------------------------------------------------- ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres ALTER SEQUENCE public.mx_test_sequence_0 OWNER TO postgres @@ -1707,33 +1828,44 @@ SELECT unnest(master_metadata_snapshot()) order by 1; ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_fk_constraint_2 FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) NOT VALID ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_table_1_col1_key UNIQUE (col1) ALTER TABLE mx_test_schema_1.mx_table_1 OWNER TO postgres - ALTER TABLE mx_test_schema_1.mx_table_1 OWNER TO postgres ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1) ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1) ALTER TABLE mx_test_schema_2.mx_table_2 OWNER TO postgres - ALTER TABLE mx_test_schema_2.mx_table_2 OWNER TO postgres ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres - ALTER TABLE public.dist_table_1 OWNER TO postgres ALTER TABLE public.dist_table_1 OWNER TO postgres ALTER TABLE public.mx_ref OWNER TO postgres - ALTER TABLE public.mx_ref OWNER TO postgres - ALTER TABLE public.test_table OWNER TO postgres ALTER TABLE public.test_table OWNER TO postgres CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 USING btree (col1) CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 USING btree (col2) + CREATE SCHEMA IF NOT EXISTS mx_test_schema_1 AUTHORIZATION postgres + CREATE SCHEMA IF NOT EXISTS mx_test_schema_2 AUTHORIZATION postgres + CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres + CREATE SCHEMA IF NOT EXISTS mx_testing_schema_2 AUTHORIZATION postgres + CREATE SCHEMA IF NOT EXISTS public AUTHORIZATION postgres CREATE TABLE mx_test_schema_1.mx_table_1 (col1 integer, col2 text, col3 integer) CREATE TABLE mx_test_schema_2.mx_table_2 (col1 integer, col2 text) CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL, col_4 bigint DEFAULT nextval('public.user_defined_seq'::regclass)) CREATE TABLE public.dist_table_1 (a integer) CREATE TABLE public.mx_ref (col_1 integer, col_2 text) CREATE TABLE public.test_table (id integer DEFAULT worker_nextval('public.mx_test_sequence_0'::regclass), id2 integer DEFAULT worker_nextval('public.mx_test_sequence_1'::regclass)) + DELETE FROM citus.pg_dist_object + DELETE FROM pg_dist_node + DELETE FROM pg_dist_partition + DELETE FROM pg_dist_placement + DELETE FROM pg_dist_shard + GRANT CREATE ON SCHEMA public TO PUBLIC; + GRANT CREATE ON SCHEMA public TO postgres; + GRANT USAGE ON SCHEMA public TO PUBLIC; + GRANT USAGE ON SCHEMA public TO postgres; INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, metadatasynced, isactive, noderole, nodecluster, shouldhaveshards) VALUES (4, 1, 'localhost', 8888, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'default', TRUE),(5, 1, 'localhost', 8889, 'default', FALSE, FALSE, TRUE, 'secondary'::noderole, 'second-cluster', TRUE),(1, 1, 'localhost', 57637, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE),(7, 5, 'localhost', 57638, 'default', TRUE, TRUE, TRUE, 'primary'::noderole, 'default', TRUE) - SELECT citus_internal_add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 3, 's') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 3, 's') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 0, 's') + RESET ROLE + RESET ROLE + SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') + SELECT citus_internal_add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 4, 's') + SELECT citus_internal_add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 4, 's') + SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10005, 's') SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10003, 't') SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10005, 's') @@ -1743,16 +1875,24 @@ SELECT unnest(master_metadata_snapshot()) order by 1; SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_sequence_0 AS integer INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE','integer') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.mx_test_sequence_1 AS integer INCREMENT BY 1 MINVALUE 1 MAXVALUE 2147483647 START WITH 1 CACHE 1 NO CYCLE','integer') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS public.user_defined_seq AS bigint INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 CACHE 1 NO CYCLE','bigint') + SELECT worker_create_or_alter_role('postgres', null, 'ALTER ROLE postgres SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 0 PASSWORD ''md5c53670dddfc3bb4b5675c7872bc2249a'' VALID UNTIL ''2052-05-05 00:00:00-07''') SELECT worker_create_truncate_trigger('mx_test_schema_1.mx_table_1') SELECT worker_create_truncate_trigger('mx_test_schema_2.mx_table_2') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') SELECT worker_create_truncate_trigger('public.dist_table_1') SELECT worker_create_truncate_trigger('public.mx_ref') SELECT worker_create_truncate_trigger('public.test_table') - SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition - TRUNCATE citus.pg_dist_object - TRUNCATE pg_dist_node CASCADE - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + SELECT worker_drop_shell_table(logicalrelid::regclass::text) FROM pg_dist_partition + SET ROLE postgres + SET ROLE postgres + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'off' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + SET citus.enable_ddl_propagation TO 'on' + UPDATE pg_dist_local_group SET groupid = 1 + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false), ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false), ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false), ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310000, 1, 0, 1, 100000), (1310001, 1, 0, 5, 100001), (1310002, 1, 0, 1, 100002), (1310003, 1, 0, 5, 100003), (1310004, 1, 0, 1, 100004), (1310005, 1, 0, 5, 100005), (1310006, 1, 0, 1, 100006), (1310007, 1, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310020, 1, 0, 1, 100020), (1310021, 1, 0, 5, 100021), (1310022, 1, 0, 1, 100022), (1310023, 1, 0, 5, 100023), (1310024, 1, 0, 1, 100024)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1310025, 1, 0, 1, 100025), (1310026, 1, 0, 5, 100026), (1310027, 1, 0, 1, 100027), (1310028, 1, 0, 5, 100028), (1310029, 1, 0, 1, 100029)) SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; @@ -1765,7 +1905,7 @@ SELECT unnest(master_metadata_snapshot()) order by 1; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310074, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310075, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310076, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310073, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310083, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310084, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310085, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310086, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -(65 rows) +(84 rows) -- shouldn't work since test_table is MX ALTER TABLE test_table ADD COLUMN id3 bigserial; @@ -1784,13 +1924,16 @@ ALTER TABLE test_table ADD COLUMN id4 bigserial CHECK (id4 > 0); ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes when metadata is synchronized to workers \c - - - :worker_1_port \ds - List of relations - Schema | Name | Type | Owner + List of relations + Schema | Name | Type | Owner --------------------------------------------------------------------- - public | mx_test_sequence_0 | sequence | postgres - public | mx_test_sequence_1 | sequence | postgres - public | user_defined_seq | sequence | postgres -(3 rows) + public | mx_test_sequence_0 | sequence | postgres + public | mx_test_sequence_1 | sequence | postgres + public | mx_test_table_col_3_seq | sequence | postgres + public | sequence_rollback | sequence | postgres + public | sequence_rollback(citus_backup_0) | sequence | postgres + public | user_defined_seq | sequence | postgres +(6 rows) \c - - - :master_port CREATE SEQUENCE local_sequence; @@ -1802,11 +1945,14 @@ DETAIL: drop cascades to default value for column id2 of table test_table drop cascades to default value for column id of table test_table \c - - - :worker_1_port \ds - List of relations - Schema | Name | Type | Owner + List of relations + Schema | Name | Type | Owner --------------------------------------------------------------------- - public | user_defined_seq | sequence | postgres -(1 row) + public | mx_test_table_col_3_seq | sequence | postgres + public | sequence_rollback | sequence | postgres + public | sequence_rollback(citus_backup_0) | sequence | postgres + public | user_defined_seq | sequence | postgres +(4 rows) \c - - - :master_port DROP TABLE test_table CASCADE; @@ -1917,36 +2063,43 @@ SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_sup (2 rows) SET ROLE non_super_metadata_user; +-- user must be super user stop/start metadata +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +ERROR: operation is not allowed +HINT: Run the command with a superuser. +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +ERROR: operation is not allowed +HINT: Run the command with a superuser. +RESET ROLE; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); NOTICE: dropping metadata on the node (localhost,57637) stop_metadata_sync_to_node --------------------------------------------------------------------- -(1 row) + (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- -(1 row) + (1 row) -RESET ROLE; RESET citus.shard_count; RESET citus.shard_replication_factor; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; --- Turn metadata sync back on at the end -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +-- Activate them at the end +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index f08fc78b9..33aec18d8 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -49,11 +49,17 @@ SET citus.shard_count TO 8; SET citus.next_shard_id TO 7000000; SET citus.next_placement_id TO 7000000; SET client_min_messages TO WARNING; --- test that coordinator pg_dist_node entry is synced to the workers -SELECT wait_until_metadata_sync(30000); - wait_until_metadata_sync +-- test that metadata is synced to the workers +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- + 1 +(1 row) +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port), @@ -335,11 +341,17 @@ SELECT master_remove_node('localhost', :master_port); (1 row) --- test that coordinator pg_dist_node entry was removed from the workers -SELECT wait_until_metadata_sync(30000); - wait_until_metadata_sync +-- test that metadata is synced to the workers +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- + 1 +(1 row) +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 (1 row) SELECT verify_metadata('localhost', :worker_1_port), diff --git a/src/test/regress/expected/multi_partitioning.out b/src/test/regress/expected/multi_partitioning.out index 33a7d8cdf..a2c03ef7a 100644 --- a/src/test/regress/expected/multi_partitioning.out +++ b/src/test/regress/expected/multi_partitioning.out @@ -4301,10 +4301,10 @@ WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_% (2 rows) -- should work properly - no names clashes -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) \c - - - :worker_1_port diff --git a/src/test/regress/expected/multi_sequence_default.out b/src/test/regress/expected/multi_sequence_default.out index e00310b0e..7f3c23673 100644 --- a/src/test/regress/expected/multi_sequence_default.out +++ b/src/test/regress/expected/multi_sequence_default.out @@ -130,72 +130,6 @@ ALTER SEQUENCE seq_0 AS bigint; ERROR: Altering a distributed sequence is currently not supported. ALTER SEQUENCE seq_0_local_table AS bigint; ERROR: Altering a distributed sequence is currently not supported. --- we can change other things like increment --- if metadata is not synced to workers -BEGIN; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -CREATE SEQUENCE seq_13; -CREATE SEQUENCE seq_13_local_table; -CREATE TABLE seq_test_13 (x int, y int); -CREATE TABLE seq_test_13_local_table (x int, y int); -SELECT create_distributed_table('seq_test_13','x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_add_local_table_to_metadata('seq_test_13_local_table'); - citus_add_local_table_to_metadata ---------------------------------------------------------------------- - -(1 row) - -ALTER TABLE seq_test_13 ADD COLUMN z int DEFAULT nextval('seq_13'); -ALTER TABLE seq_test_13_local_table ADD COLUMN z int DEFAULT nextval('seq_13_local_table'); -ALTER SEQUENCE seq_13 INCREMENT BY 2; -ALTER SEQUENCE seq_13_local_table INCREMENT BY 2; -\d seq_13 - Sequence "sequence_default.seq_13" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ---------------------------------------------------------------------- - integer | 1 | 1 | 2147483647 | 2 | no | 1 - -\d seq_13_local_table - Sequence "sequence_default.seq_13_local_table" - Type | Start | Minimum | Maximum | Increment | Cycles? | Cache ---------------------------------------------------------------------- - integer | 1 | 1 | 2147483647 | 2 | no | 1 - --- check that we can add serial pseudo-type columns --- when metadata is not synced to workers -TRUNCATE seq_test_0; -ALTER TABLE seq_test_0 ADD COLUMN w00 smallserial; -ALTER TABLE seq_test_0 ADD COLUMN w01 serial2; -ALTER TABLE seq_test_0 ADD COLUMN w10 serial; -ALTER TABLE seq_test_0 ADD COLUMN w11 serial4; -ALTER TABLE seq_test_0 ADD COLUMN w20 bigserial; -ALTER TABLE seq_test_0 ADD COLUMN w21 serial8; -TRUNCATE seq_test_0_local_table; -ALTER TABLE seq_test_0_local_table ADD COLUMN w00 smallserial; -ALTER TABLE seq_test_0_local_table ADD COLUMN w01 serial2; -ALTER TABLE seq_test_0_local_table ADD COLUMN w10 serial; -ALTER TABLE seq_test_0_local_table ADD COLUMN w11 serial4; -ALTER TABLE seq_test_0_local_table ADD COLUMN w20 bigserial; -ALTER TABLE seq_test_0_local_table ADD COLUMN w21 serial8; -ROLLBACK; -- check alter column type precaution ALTER TABLE seq_test_0 ALTER COLUMN z TYPE bigint; ERROR: cannot execute ALTER COLUMN TYPE .. command because the column involves a default coming from a sequence @@ -216,12 +150,6 @@ SELECT create_distributed_table('seq_test_4','x'); CREATE SEQUENCE seq_4; ALTER TABLE seq_test_4 ADD COLUMN a bigint DEFAULT nextval('seq_4'); -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - DROP SEQUENCE seq_4 CASCADE; NOTICE: drop cascades to default value for column a of table seq_test_4 TRUNCATE seq_test_4; @@ -770,10 +698,10 @@ SELECT create_reference_table('seq_test_10'); INSERT INTO seq_test_10 VALUES (0); CREATE TABLE seq_test_11 (col0 int, col1 bigint DEFAULT nextval('seq_11'::text)); -- works but doesn't create seq_11 in the workers -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) -- works because there is no dependency created between seq_11 and seq_test_10 @@ -812,10 +740,10 @@ SELECT create_distributed_table('seq_test_12', 'col0'); (1 row) -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) INSERT INTO seq_test_12 VALUES ('hello0') RETURNING *; @@ -918,10 +846,10 @@ ERROR: nextval: reached maximum value of sequence "seq_14" (32767) \c - - - :master_port SET citus.shard_replication_factor TO 1; SET search_path = sequence_default, public; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) SELECT undistribute_table('seq_test_12'); @@ -981,8 +909,92 @@ SELECT nextval('seq_13'); ERROR: nextval: reached maximum value of sequence "seq_13" (2147483647) SELECT nextval('seq_14'); ERROR: nextval: reached maximum value of sequence "seq_14" (32767) +\c - - - :master_port +-- Show that sequence and its dependency schema will be propagated if a distributed +-- table with default column is added +CREATE SCHEMA test_schema_for_sequence_default_propagation; +CREATE SEQUENCE test_schema_for_sequence_default_propagation.seq_10; +-- Both should return 0 rows +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation.seq_10'::regclass); + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation'::regnamespace); + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + +-- Create distributed table with default column to propagate dependencies +CREATE TABLE test_seq_dist(a int, x BIGINT DEFAULT nextval('test_schema_for_sequence_default_propagation.seq_10')); +SELECT create_distributed_table('test_seq_dist', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Both sequence and dependency schema should be distributed +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation.seq_10'::regclass); + pg_identify_object_as_address +--------------------------------------------------------------------- + (sequence,"{test_schema_for_sequence_default_propagation,seq_10}",{}) +(1 row) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation'::regnamespace); + pg_identify_object_as_address +--------------------------------------------------------------------- + (schema,{test_schema_for_sequence_default_propagation},{}) +(1 row) + +-- Show that sequence can stay on the worker node if the transaction is +-- rollbacked after distributing the table +BEGIN; +CREATE SEQUENCE sequence_rollback; +CREATE TABLE sequence_rollback_table(id int, val_1 int default nextval('sequence_rollback')); +SELECT create_distributed_table('sequence_rollback_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ROLLBACK; +-- Show that there is a sequence on the worker with the sequence type int +\c - - - :worker_1_port +SELECT seqtypid::regtype, seqmax, seqmin FROM pg_sequence WHERE seqrelid::regclass::text = 'sequence_rollback'; + seqtypid | seqmax | seqmin +--------------------------------------------------------------------- + integer | 2147483647 | 1 +(1 row) + +\c - - - :master_port +-- Show that we can create a sequence with the same name and different data type +BEGIN; +CREATE SEQUENCE sequence_rollback; +CREATE TABLE sequence_rollback_table(id int, val_1 bigint default nextval('sequence_rollback')); +SELECT create_distributed_table('sequence_rollback_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ROLLBACK; +-- Show that existing sequence has been renamed and a new sequence with the same name +-- created for another type +\c - - - :worker_1_port +SELECT seqrelid::regclass, seqtypid::regtype, seqmax, seqmin FROM pg_sequence WHERE seqrelid::regclass::text like '%sequence_rollback%' ORDER BY 1,2; + seqrelid | seqtypid | seqmax | seqmin +--------------------------------------------------------------------- + "sequence_rollback(citus_backup_0)" | integer | 2147483647 | 1 + sequence_rollback | bigint | 562949953421313 | 281474976710657 +(2 rows) + \c - - - :master_port -- clean up +DROP SCHEMA test_schema_for_sequence_default_propagation CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to sequence test_schema_for_sequence_default_propagation.seq_10 +drop cascades to default value for column x of table test_seq_dist +DROP TABLE test_seq_dist; DROP TABLE sequence_default.seq_test_7_par; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA sequence_default CASCADE; diff --git a/src/test/regress/expected/multi_unsupported_worker_operations.out b/src/test/regress/expected/multi_unsupported_worker_operations.out index fc51e202e..2e412a4a8 100644 --- a/src/test/regress/expected/multi_unsupported_worker_operations.out +++ b/src/test/regress/expected/multi_unsupported_worker_operations.out @@ -262,11 +262,6 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; (1 row) \c - - - :worker_2_port -SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx\_%table%'; - worker_drop_distributed_table ---------------------------------------------------------------------- -(0 rows) - SELECT count(*) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx\_%table%'; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/non_super_user_object_metadata.out b/src/test/regress/expected/non_super_user_object_metadata.out index 67d3677d2..ebbf9510e 100644 --- a/src/test/regress/expected/non_super_user_object_metadata.out +++ b/src/test/regress/expected/non_super_user_object_metadata.out @@ -416,22 +416,34 @@ SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_ localhost | 57638 | t | (extension,{ltree},{}) (2 rows) -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -NOTICE: dropping metadata on the node (localhost,57637) - stop_metadata_sync_to_node +-- Show that dropping a distributed table drops the pg_dist_object entry on worker +CREATE TABLE extension_schema.table_to_check_object(id int); +SELECT create_distributed_table('extension_schema.table_to_check_object', 'id'); + create_distributed_table --------------------------------------------------------------------- (1 row) -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -NOTICE: dropping metadata on the node (localhost,57638) - stop_metadata_sync_to_node +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%table_to_check_object%'; + obj_identifier --------------------------------------------------------------------- - + (table,"{extension_schema,table_to_check_object}",{}) (1 row) --- Show that we don't have any object metadata after stopping syncing -SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object;$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%table_to_check_object%';$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (table,"{extension_schema,table_to_check_object}",{}) + localhost | 57638 | t | (table,"{extension_schema,table_to_check_object}",{}) +(2 rows) + +DROP TABLE extension_schema.table_to_check_object; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%table_to_check_object%'; + obj_identifier +--------------------------------------------------------------------- +(0 rows) + +SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%table_to_check_object%';$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- localhost | 57637 | t | @@ -441,15 +453,3 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas -- Revert the settings for following tests RESET citus.enable_ddl_propagation; RESET citus.shard_replication_factor; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/resync_metadata_with_sequences.out b/src/test/regress/expected/resync_metadata_with_sequences.out index b977f0a8d..f96651a14 100644 --- a/src/test/regress/expected/resync_metadata_with_sequences.out +++ b/src/test/regress/expected/resync_metadata_with_sequences.out @@ -140,10 +140,10 @@ INSERT INTO sensors_news VALUES (DEFAULT, DEFAULT, '2021-01-01') RETURNING *; (1 row) \c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) \c - - - :worker_1_port diff --git a/src/test/regress/expected/start_stop_metadata_sync.out b/src/test/regress/expected/start_stop_metadata_sync.out index 37bb73194..3cbbf1572 100644 --- a/src/test/regress/expected/start_stop_metadata_sync.out +++ b/src/test/regress/expected/start_stop_metadata_sync.out @@ -251,24 +251,6 @@ SELECT count(*) > 0 FROM pg_dist_node; f (1 row) -SELECT count(*) > 0 FROM pg_dist_shard; - ?column? ---------------------------------------------------------------------- - f -(1 row) - -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'distributed_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); - ?column? ---------------------------------------------------------------------- - f -(1 row) - -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'reference_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); - ?column? ---------------------------------------------------------------------- - f -(1 row) - \c - - - :master_port SET search_path TO "start_stop_metadata_sync"; SELECT * FROM distributed_table_1; @@ -291,24 +273,6 @@ SELECT count(*) > 0 FROM pg_dist_node; t (1 row) -SELECT count(*) > 0 FROM pg_dist_shard; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'distributed_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'reference_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); - ?column? ---------------------------------------------------------------------- - t -(1 row) - \c - - - :master_port -- test synchronization for pg_dist_node flags SELECT citus_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', false); @@ -460,24 +424,6 @@ SELECT count(*) > 0 FROM pg_dist_node; f (1 row) -SELECT count(*) > 0 FROM pg_dist_shard; - ?column? ---------------------------------------------------------------------- - f -(1 row) - -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'distributed_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); - ?column? ---------------------------------------------------------------------- - f -(1 row) - -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'reference_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); - ?column? ---------------------------------------------------------------------- - f -(1 row) - \c - - - :master_port SET search_path TO "start_stop_metadata_sync"; -- start metadata sync sets the multi-shard modify mode to sequential @@ -522,9 +468,7 @@ BEGIN; (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -ERROR: cannot execute metadata syncing operation because there was a parallel operation on a distributed table in the transaction -DETAIL: When modifying metadata, Citus needs to perform all operations over a single connection per node to ensure consistency. -HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; -- this is safe because start_metadata_sync_to_node already switches to -- sequential execution @@ -561,9 +505,7 @@ BEGIN; (1 row) ROLLBACK; --- multi-shard commands are allowed with start_metadata_sync --- as long as the start_metadata_sync_to_node executed --- when it is OK to switch to sequential execution +-- multi-shard commands are not allowed with start_metadata_sync BEGIN; -- sync at the start of the tx SELECT start_metadata_sync_to_node('localhost', :worker_1_port); @@ -597,15 +539,9 @@ BEGIN; -- sync at the end of the tx SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - +ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; --- multi-shard commands are allowed with start_metadata_sync --- as long as the start_metadata_sync_to_node executed --- when it is OK to switch to sequential execution +-- multi-shard commands are not allowed with start_metadata_sync BEGIN; -- sync at the start of the tx SELECT start_metadata_sync_to_node('localhost', :worker_1_port); @@ -640,11 +576,7 @@ BEGIN; -- sync at the end of the tx SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - +ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; -- cleanup \c - - - :master_port diff --git a/src/test/regress/expected/turn_mx_on.out b/src/test/regress/expected/turn_mx_on.out index 7e1210bbd..f6e9c6856 100644 --- a/src/test/regress/expected/turn_mx_on.out +++ b/src/test/regress/expected/turn_mx_on.out @@ -5,6 +5,12 @@ SELECT pg_reload_conf(); t (1 row) +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + SET client_min_messages TO ERROR; SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; start_metadata_sync_to_node diff --git a/src/test/regress/expected/turn_mx_on_0.out b/src/test/regress/expected/turn_mx_on_0.out index 498a8ea68..ea67ce573 100644 --- a/src/test/regress/expected/turn_mx_on_0.out +++ b/src/test/regress/expected/turn_mx_on_0.out @@ -5,6 +5,12 @@ SELECT pg_reload_conf(); t (1 row) +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + SET client_min_messages TO ERROR; SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; start_metadata_sync_to_node diff --git a/src/test/regress/expected/turn_mx_on_1.out b/src/test/regress/expected/turn_mx_on_1.out index becca3589..21d1ecfd9 100644 --- a/src/test/regress/expected/turn_mx_on_1.out +++ b/src/test/regress/expected/turn_mx_on_1.out @@ -5,6 +5,12 @@ SELECT pg_reload_conf(); t (1 row) +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + SET client_min_messages TO ERROR; SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; start_metadata_sync_to_node diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 7bb7e779d..86c121568 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -207,6 +207,7 @@ ORDER BY 1; function worker_create_truncate_trigger(regclass) function worker_drop_distributed_table(text) function worker_drop_sequence_dependency(text) + function worker_drop_shell_table(text) function worker_fetch_foreign_file(text,text,bigint,text[],integer[]) function worker_fetch_partition_file(bigint,integer,integer,integer,text,integer) function worker_fix_partition_shard_index_names(regclass,text,text) @@ -266,5 +267,5 @@ ORDER BY 1; view citus_worker_stat_activity view pg_dist_shard_placement view time_partitions -(250 rows) +(251 rows) diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index e41f81779..659000306 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -637,3 +637,23 @@ CREATE TABLE referenced_table(i int UNIQUE); SELECT create_distributed_table('referenced_table', 'i'); ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES referenced_table(i); DROP TABLE referenced_table, test_table_1; + +-- Check sequence propagate its own dependencies while adding a column +CREATE TABLE table_without_sequence(a int); +SELECT create_distributed_table('table_without_sequence', 'a'); + +CREATE SCHEMA test_schema_for_sequence_propagation; +CREATE SEQUENCE test_schema_for_sequence_propagation.seq_10; + +-- Both should have zero rows +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace); + +ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_schema_for_sequence_propagation.seq_10'); + +-- Should be distributed along with the sequence +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace); + +DROP SCHEMA test_schema_for_sequence_propagation CASCADE; +DROP TABLE table_without_sequence; diff --git a/src/test/regress/input/multi_copy.source b/src/test/regress/input/multi_copy.source index dfd094943..79075482a 100644 --- a/src/test/regress/input/multi_copy.source +++ b/src/test/regress/input/multi_copy.source @@ -497,7 +497,17 @@ SELECT shardid, nodename, nodeport WHERE logicalrelid = 'numbers_append'::regclass order by placementid; -- add the node back -SET client_min_messages TO ERROR; +-- before adding the node, add pg_dist_object entry for tables created with +-- master_create_distributed_table as we don't have the entry for them. + +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'objects'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'customer_with_special_\\_character'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, '1_customer'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'packed_numbers_hash'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'super_packed_numbers_hash'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table_to_distribute'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'second_dustbunnies'::regclass::oid, 0); + SELECT 1 FROM master_activate_node('localhost', :worker_1_port); RESET client_min_messages; RESET citus.shard_replication_factor; diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index bd57faa84..a2583034a 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -92,8 +92,8 @@ test: isolation_replicated_dist_on_mx # MXless tests test: isolation_check_mx -test: isolation_turn_mx_off test: isolation_replicate_reference_tables_to_coordinator +test: isolation_turn_mx_off test: isolation_reference_copy_vs_all test: isolation_ref2ref_foreign_keys test: isolation_multiuser_locking diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index b84b81127..6c2b04289 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -1259,3 +1259,43 @@ ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES referenced_table(i); ERROR: cannot create foreign key constraint DETAIL: Foreign keys are supported in two cases, either in between two colocated tables including partition column in the same ordinal in the both tables or from distributed to reference tables DROP TABLE referenced_table, test_table_1; +-- Check sequence propagate its own dependencies while adding a column +CREATE TABLE table_without_sequence(a int); +SELECT create_distributed_table('table_without_sequence', 'a'); + create_distributed_table +-------------------------- + +(1 row) + +CREATE SCHEMA test_schema_for_sequence_propagation; +CREATE SEQUENCE test_schema_for_sequence_propagation.seq_10; +-- Both should have zero rows +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); + pg_identify_object_as_address +------------------------------- +(0 rows) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace); + pg_identify_object_as_address +------------------------------- +(0 rows) + +ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_schema_for_sequence_propagation.seq_10'); +-- Should be distributed along with the sequence +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); + pg_identify_object_as_address +--------------------------------------------------------------- + (sequence,"{test_schema_for_sequence_propagation,seq_10}",{}) +(1 row) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace); + pg_identify_object_as_address +---------------------------------------------------- + (schema,{test_schema_for_sequence_propagation},{}) +(1 row) + +DROP SCHEMA test_schema_for_sequence_propagation CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to sequence test_schema_for_sequence_propagation.seq_10 +drop cascades to default value for column x of table table_without_sequence +DROP TABLE table_without_sequence; diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index fb2a33d20..0788cf3c9 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -625,7 +625,15 @@ SELECT shardid, nodename, nodeport (6 rows) -- add the node back -SET client_min_messages TO ERROR; +-- before adding the node, add pg_dist_object entry for tables created with +-- master_create_distributed_table as we don't have the entry for them. +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'objects'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'customer_with_special_\\_character'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, '1_customer'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'packed_numbers_hash'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'super_packed_numbers_hash'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table_to_distribute'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'second_dustbunnies'::regclass::oid, 0); SELECT 1 FROM master_activate_node('localhost', :worker_1_port); ?column? --------------------------------------------------------------------- diff --git a/src/test/regress/spec/isolation_distributed_transaction_id.spec b/src/test/regress/spec/isolation_distributed_transaction_id.spec index 7bf4c02c0..372cd8f78 100644 --- a/src/test/regress/spec/isolation_distributed_transaction_id.spec +++ b/src/test/regress/spec/isolation_distributed_transaction_id.spec @@ -64,6 +64,11 @@ step "s1-get-all-transactions" SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id() ORDER BY 1,2,3; } +step "s1-drop-table" +{ + DROP TABLE distributed_transaction_id_table; +} + session "s2" step "s2-begin" @@ -119,7 +124,7 @@ permutation "s1-begin" "s1-assign-transaction-id" "s1-get-all-transactions" "s2- // now show that distributed transaction id on the coordinator // is the same with the one on the worker -permutation "s1-create-table" "s1-begin" "s1-insert" "s1-verify-current-xact-is-on-worker" "s1-commit" +permutation "s1-create-table" "s1-begin" "s1-insert" "s1-verify-current-xact-is-on-worker" "s1-drop-table" "s1-commit" // we would initially forget the distributed transaction ID on pg_dist_partition invalidations permutation "s1-begin" "s1-assign-transaction-id" "s1-has-transaction-number" "s2-vacuum" "s1-has-transaction-number" "s1-commit" diff --git a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec index a3b73b9bb..64b376395 100644 --- a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec +++ b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec @@ -31,16 +31,6 @@ teardown session "s1" -step "increase-retry-interval" -{ - ALTER SYSTEM SET citus.metadata_sync_retry_interval TO 20000; -} - -step "reset-retry-interval" -{ - ALTER SYSTEM RESET citus.metadata_sync_retry_interval; -} - step "enable-deadlock-detection" { ALTER SYSTEM SET citus.distributed_deadlock_detection_factor TO 3; @@ -76,17 +66,6 @@ step "s1-commit" COMMIT; } -step "s1-count-daemons" -{ - SELECT count(*) FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; -} - -step "s1-cancel-metadata-sync" -{ - SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; - SELECT pg_sleep(2); -} - session "s2" step "s2-start-session-level-connection" @@ -147,7 +126,3 @@ step "s3-wait" // themselves involved in a distributed deadlock. // See https://github.com/citusdata/citus/issues/4393 for more details. permutation "enable-deadlock-detection" "reload-conf" "s2-start-session-level-connection" "s1-begin" "s1-update-1" "s2-begin-on-worker" "s2-update-2-on-worker" "s2-truncate-on-worker" "s3-invalidate-metadata" "s3-resync" "s3-wait" "s2-update-1-on-worker" "s1-update-2" "s1-commit" "s2-commit-on-worker" "disable-deadlock-detection" "reload-conf" "s2-stop-connection" - -// Test that when metadata sync is waiting for locks, cancelling it terminates it. -// This is important in cases where the metadata sync daemon itself is involved in a deadlock. -permutation "increase-retry-interval" "reload-conf" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-truncate-on-worker" "s3-invalidate-metadata" "s3-resync" "s3-wait" "s1-count-daemons" "s1-cancel-metadata-sync" "s1-count-daemons" "reset-retry-interval" "reload-conf" "s2-commit-on-worker" "s2-stop-connection" "s3-resync" "s3-wait" diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index b2dd5dad3..752f26399 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -3,7 +3,7 @@ setup SELECT citus_internal.replace_isolation_tester_func(); SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SELECT master_add_node('localhost', 57636); + SELECT master_add_node('localhost', 57636, groupid => 0); CREATE TABLE ref_table(a int primary key); SELECT create_reference_table('ref_table'); diff --git a/src/test/regress/spec/isolation_update_node.spec b/src/test/regress/spec/isolation_update_node.spec index 32b9511e6..a6e108528 100644 --- a/src/test/regress/spec/isolation_update_node.spec +++ b/src/test/regress/spec/isolation_update_node.spec @@ -135,6 +135,8 @@ permutation "s1-begin" "s1-update-node-1" "s2-begin" "s2-update-node-1" "s1-comm // master_update_node should block start_metadata_sync_to_node. Note that we // cannot run start_metadata_sync_to_node in a transaction, so we're not // testing the reverse order here. +// Having different result on coordinator and worker is expected for now since +// we run test after disabling mx. permutation "s1-begin" "s1-update-node-1" "s2-start-metadata-sync-node-2" "s1-commit" "s2-verify-metadata" // make sure we have entries in prepared statement cache diff --git a/src/test/regress/sql/citus_local_tables_mx.sql b/src/test/regress/sql/citus_local_tables_mx.sql index 8c14bd621..bc6f84feb 100644 --- a/src/test/regress/sql/citus_local_tables_mx.sql +++ b/src/test/regress/sql/citus_local_tables_mx.sql @@ -33,9 +33,9 @@ CREATE TRIGGER dummy_function_trigger BEFORE UPDATE OF value ON citus_local_table FOR EACH ROW EXECUTE FUNCTION dummy_function(); --- Show that we can sync metadata successfully. That means, we create +-- Show that we can activate node successfully. That means, we create -- the function that trigger needs in mx workers too. -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); CREATE EXTENSION seg; ALTER TRIGGER dummy_function_trigger ON citus_local_table DEPENDS ON EXTENSION seg; diff --git a/src/test/regress/sql/distributed_functions.sql b/src/test/regress/sql/distributed_functions.sql index 1b132b8b4..ce39d50c6 100644 --- a/src/test/regress/sql/distributed_functions.sql +++ b/src/test/regress/sql/distributed_functions.sql @@ -19,15 +19,6 @@ CREATE TABLE notices ( SELECT create_distributed_table('notices', 'id'); INSERT INTO notices VALUES (1, 'hello world'); --- Create the necessary test utility function -CREATE OR REPLACE FUNCTION master_metadata_snapshot() - RETURNS text[] - LANGUAGE C STRICT - AS 'citus'; - -COMMENT ON FUNCTION master_metadata_snapshot() - IS 'commands to create the metadata snapshot'; - CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$ @@ -358,9 +349,6 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_proc WHERE proname='eq_w -- valid distribution with distribution_arg_name -- case insensitive SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', distribution_arg_name:='VaL1'); --- show that we are able to propagate objects with multiple item on address arrays -SELECT * FROM (SELECT unnest(master_metadata_snapshot()) as metadata_command order by 1) as innerResult WHERE metadata_command like '%distributed_object_data%'; - -- valid distribution with distribution_arg_index SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)','$1'); diff --git a/src/test/regress/sql/failure_mx_metadata_sync.sql b/src/test/regress/sql/failure_mx_metadata_sync.sql index 171d49a1a..5dfe88585 100644 --- a/src/test/regress/sql/failure_mx_metadata_sync.sql +++ b/src/test/regress/sql/failure_mx_metadata_sync.sql @@ -20,20 +20,20 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; -- Failure to set groupid in the worker SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").cancel(' || :pid || ')'); -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT citus_activate_node('localhost', :worker_2_proxy_port); SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()'); -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to drop all tables in pg_dist_partition -SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')'); -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").kill()'); -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_partition").cancel(' || :pid || ')'); +SELECT citus_activate_node('localhost', :worker_2_proxy_port); +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_partition").kill()'); +SELECT citus_activate_node('localhost', :worker_2_proxy_port); --- Failure to truncate pg_dist_node in the worker -SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')'); +-- Failure to delete pg_dist_node entries from the worker +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").cancel(' || :pid || ')'); SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").kill()'); SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); -- Failure to populate pg_dist_node in the worker @@ -45,9 +45,9 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); -- Verify that coordinator knows worker does not have valid metadata SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; --- Verify we can sync metadata after unsuccessful attempts +-- Verify we can activate node after unsuccessful attempts SELECT citus.mitmproxy('conn.allow()'); -SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_2_proxy_port); SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; -- Check failures on DDL command propagation @@ -70,16 +70,10 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE pg_dist_local_group SET groupid").kill()'); SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); --- Failure to drop all tables in pg_dist_partition -SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").cancel(' || :pid || ')'); +-- Failure to delete pg_dist_node entries from the worker +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").cancel(' || :pid || ')'); SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_drop_distributed_table").kill()'); -SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); - --- Failure to truncate pg_dist_node in the worker -SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").cancel(' || :pid || ')'); -SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE pg_dist_node CASCADE").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM pg_dist_node").kill()'); SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); \c - - - :worker_2_port @@ -87,7 +81,7 @@ SELECT count(*) FROM pg_dist_node; \c - - - :master_port SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; --- Verify we can drop metadata after unsuccessful attempts +-- Verify we can stop metadata sync after unsuccessful attempts SELECT citus.mitmproxy('conn.allow()'); SELECT stop_metadata_sync_to_node('localhost', :worker_2_proxy_port); diff --git a/src/test/regress/sql/local_shard_execution_dropped_column.sql b/src/test/regress/sql/local_shard_execution_dropped_column.sql index 36effbf71..1d7dac0b7 100644 --- a/src/test/regress/sql/local_shard_execution_dropped_column.sql +++ b/src/test/regress/sql/local_shard_execution_dropped_column.sql @@ -15,8 +15,9 @@ SET citus.shard_replication_factor TO 1; CREATE TABLE t1 (a int, b int, c int UNIQUE); SELECT create_distributed_table('t1', 'c'); ALTER TABLE t1 DROP COLUMN b; -SELECT start_metadata_sync_to_node('localhost',:worker_1_port); -SELECT start_metadata_sync_to_node('localhost',:worker_2_port); + +SELECT 1 FROM citus_activate_node('localhost',:worker_1_port); +SELECT 1 FROM citus_activate_node('localhost',:worker_2_port); \c - - - :worker_1_port SET search_path TO local_shard_execution_dropped_column; diff --git a/src/test/regress/sql/master_copy_shard_placement.sql b/src/test/regress/sql/master_copy_shard_placement.sql index f4b70fdb2..6448cb63b 100644 --- a/src/test/regress/sql/master_copy_shard_placement.sql +++ b/src/test/regress/sql/master_copy_shard_placement.sql @@ -93,6 +93,8 @@ SELECT count(*) FROM history; SET citus.shard_replication_factor TO 1; -- metadata sync will succeed even if we have rep > 1 tables +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles_single_shard'::regclass::oid, 0); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); CREATE TABLE mx_table(a int); diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index df0a8e389..f5a67e148 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -1,7 +1,6 @@ SET citus.next_shard_id TO 1220000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1; -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- Tests functions related to cluster membership @@ -49,7 +48,7 @@ SELECT * FROM rebalance_table_shards(); TRUNCATE pg_dist_colocation; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000; -SELECT * FROM citus_activate_node('localhost', :worker_2_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash'); @@ -85,13 +84,11 @@ SELECT master_activate_node('localhost', :worker_2_port); DROP TABLE test_reference_table, cluster_management_test; -- create users like this so results of community and enterprise are same -SET citus.enable_object_propagation TO ON; SET client_min_messages TO ERROR; CREATE USER non_super_user; CREATE USER node_metadata_user; SELECT 1 FROM run_command_on_workers('CREATE USER node_metadata_user'); RESET client_min_messages; -SET citus.enable_object_propagation TO OFF; GRANT EXECUTE ON FUNCTION master_activate_node(text,int) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_inactive_node(text,int,int,noderole,name) TO node_metadata_user; GRANT EXECUTE ON FUNCTION master_add_node(text,int,int,noderole,name) TO node_metadata_user; @@ -125,7 +122,6 @@ SELECT master_update_node(nodeid, 'localhost', :worker_2_port + 3) FROM pg_dist_ -- try to manipulate node metadata via privileged user SET ROLE node_metadata_user; -SET citus.enable_object_propagation TO off; -- prevent master activate node to actually connect for this test SELECT 1 FROM master_add_node('localhost', :worker_2_port); BEGIN; SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port); @@ -137,7 +133,6 @@ ABORT; \c - postgres - :master_port SET citus.next_shard_id TO 1220000; -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; SELECT master_get_active_worker_nodes(); @@ -222,7 +217,6 @@ SELECT master_remove_node('localhost', :worker_2_port); \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- check that added nodes are not propagated to nodes without metadata SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); @@ -230,7 +224,6 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup -- check that removing two nodes in the same transaction works SELECT @@ -266,8 +259,6 @@ SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodep SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup - SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); @@ -303,7 +294,6 @@ DELETE FROM pg_dist_shard; DELETE FROM pg_dist_placement; DELETE FROM pg_dist_node; \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); diff --git a/src/test/regress/sql/multi_colocation_utils.sql b/src/test/regress/sql/multi_colocation_utils.sql index 33324384f..6729b1cbf 100644 --- a/src/test/regress/sql/multi_colocation_utils.sql +++ b/src/test/regress/sql/multi_colocation_utils.sql @@ -2,6 +2,9 @@ SET citus.next_shard_id TO 1300000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 4; +-- Delete orphaned entries from pg_dist_colocation +DELETE FROM pg_dist_colocation where colocationid = 5 or colocationid = 6; + -- =================================================================== -- create test utility function -- =================================================================== @@ -95,6 +98,7 @@ SELECT master_create_worker_shards('table5_groupX', 4, 2); CREATE TABLE table6_append ( id int ); SELECT master_create_distributed_table('table6_append', 'id', 'append'); SELECT master_create_empty_shard('table6_append'); + SELECT master_create_empty_shard('table6_append'); @@ -105,7 +109,6 @@ SELECT colocation_test_colocate_tables('table1_group1', 'table2_group1'); SELECT get_table_colocation_id('table1_group1'); SELECT get_table_colocation_id('table5_groupX'); SELECT get_table_colocation_id('table6_append'); - -- check self table co-location SELECT tables_colocated('table1_group1', 'table1_group1'); SELECT tables_colocated('table5_groupX', 'table5_groupX'); @@ -120,7 +123,6 @@ SELECT tables_colocated('table1_group1', 'table3_group2'); -- check table co-location with invalid co-location group SELECT tables_colocated('table1_group1', 'table5_groupX'); SELECT tables_colocated('table1_group1', 'table6_append'); - -- check self shard co-location SELECT shards_colocated(1300000, 1300000); SELECT shards_colocated(1300016, 1300016); @@ -155,7 +157,6 @@ SELECT find_shard_interval_index(1300001); SELECT find_shard_interval_index(1300002); SELECT find_shard_interval_index(1300003); SELECT find_shard_interval_index(1300016); - -- check external colocation API SELECT count(*) FROM pg_dist_partition WHERE colocationid IN (4, 5); @@ -409,11 +410,14 @@ SELECT update_distributed_table_colocation('table1_group_none', colocate_with => SELECT update_distributed_table_colocation('table1_group_none', colocate_with => 'table2_groupE'); SELECT update_distributed_table_colocation('table1_group_none', colocate_with => 'table3_groupE'); --- sync metadata to get rid of inconsistencies in pg_dist tables -select stop_metadata_sync_to_node('localhost', :worker_1_port); -select stop_metadata_sync_to_node('localhost', :worker_2_port); -select start_metadata_sync_to_node('localhost', :worker_1_port); -select start_metadata_sync_to_node('localhost', :worker_2_port); +-- activate nodes to get rid of inconsistencies in pg_dist tables +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table1_group1'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table2_group1'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table3_group2'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table4_group2'::regclass::oid, 0); +INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table5_groupX'::regclass::oid, 0); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); -- move a table with a colocation id which is already not in pg_dist_colocation SELECT update_distributed_table_colocation('table1_group_none', colocate_with => 'table2_group_none'); diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index a5150d2c2..833fa35bd 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -212,9 +212,11 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '9.5-1'; BEGIN; + SET citus.enable_object_propagation TO on; SELECT master_add_node('localhost', :master_port, groupId=>0); CREATE TABLE citus_local_table (a int); SELECT create_citus_local_table('citus_local_table'); + RESET citus.enable_object_propagation; -- downgrade from 9.5-1 to 9.4-1 should fail as we have a citus local table ALTER EXTENSION citus UPDATE TO '9.4-1'; @@ -660,7 +662,6 @@ CREATE EXTENSION citus; CREATE EXTENSION citus; \c - - - :master_port -SET citus.enable_object_propagation TO off; -- prevent distributed transactions during add node SELECT FROM master_add_node('localhost', :worker_1_port); \c - - - :worker_1_port diff --git a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql index 6bfd32ed0..8604480a0 100644 --- a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql +++ b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql @@ -54,7 +54,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'fix_idx_names' A \c - - - :master_port -- this should work properly -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); \c - - - :worker_1_port -- we have no clashes diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index f55bc9c3f..d85bede5f 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -20,20 +20,27 @@ SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset -- Create the necessary test utility function -CREATE FUNCTION master_metadata_snapshot() +CREATE FUNCTION activate_node_snapshot() RETURNS text[] LANGUAGE C STRICT AS 'citus'; -COMMENT ON FUNCTION master_metadata_snapshot() - IS 'commands to create the metadata snapshot'; +COMMENT ON FUNCTION activate_node_snapshot() + IS 'commands to activate node snapshot'; -- Show that none of the existing tables are qualified to be MX tables SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s'; --- Show that, with no MX tables, metadata snapshot contains only the delete commands, --- pg_dist_node entries and reference tables -SELECT unnest(master_metadata_snapshot()) order by 1; +-- Since password_encryption default has been changed to sha from md5 with PG14 +-- we are updating it manually just for consistent test results between PG versions. +ALTER SYSTEM SET password_encryption TO md5; +SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); +ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword'; + +-- Show that, with no MX tables, activate node snapshot contains only the delete commands, +-- pg_dist_node entries, pg_dist_object entries and roles. +SELECT unnest(activate_node_snapshot()) order by 1; -- this function is dropped in Citus10, added here for tests CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, @@ -47,52 +54,48 @@ COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regcla distribution_method citus.distribution_type) IS 'define the table distribution functions'; --- this function is dropped in Citus10, added here for tests -CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer, - replication_factor integer DEFAULT 2) - RETURNS void - AS 'citus', $$master_create_worker_shards$$ - LANGUAGE C STRICT; - -- Create a test table with constraints and SERIAL and default from user defined sequence CREATE SEQUENCE user_defined_seq; CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq')); -SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash'); -SELECT master_create_worker_shards('mx_test_table', 8, 1); +set citus.shard_count to 8; +set citus.shard_replication_factor to 1; +SELECT create_distributed_table('mx_test_table', 'col_1'); +reset citus.shard_count; +reset citus.shard_replication_factor; -- Set the replication model of the test table to streaming replication so that it is -- considered as an MX table UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass; --- Show that the created MX table is included in the metadata snapshot -SELECT unnest(master_metadata_snapshot()) order by 1; +-- Show that the created MX table is and its sequences are included in the activate node snapshot +SELECT unnest(activate_node_snapshot()) order by 1; --- Show that CREATE INDEX commands are included in the metadata snapshot +-- Show that CREATE INDEX commands are included in the activate node snapshot CREATE INDEX mx_index ON mx_test_table(col_2); -SELECT unnest(master_metadata_snapshot()) order by 1; +SELECT unnest(activate_node_snapshot()) order by 1; --- Show that schema changes are included in the metadata snapshot +-- Show that schema changes are included in the activate node snapshot CREATE SCHEMA mx_testing_schema; ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; -SELECT unnest(master_metadata_snapshot()) order by 1; +SELECT unnest(activate_node_snapshot()) order by 1; --- Show that append distributed tables are not included in the metadata snapshot +-- Show that append distributed tables are not included in the activate node snapshot CREATE TABLE non_mx_test_table (col_1 int, col_2 text); SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append'); UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; -SELECT unnest(master_metadata_snapshot()) order by 1; +SELECT unnest(activate_node_snapshot()) order by 1; --- Show that range distributed tables are not included in the metadata snapshot +-- Show that range distributed tables are not included in the activate node snapshot UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; -SELECT unnest(master_metadata_snapshot()) order by 1; +SELECT unnest(activate_node_snapshot()) order by 1; --- Test start_metadata_sync_to_node UDF +-- Test start_metadata_sync_to_node and citus_activate_node UDFs -- Ensure that hasmetadata=false for all nodes SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; --- Ensure it works when run on a secondary node +-- Show that metadata can not be synced on secondary node SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); SELECT start_metadata_sync_to_node('localhost', 8888); @@ -103,8 +106,10 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; -- Add a node to another cluster to make sure it's also synced SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); --- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +\c - - - :master_port +-- Run start_metadata_sync_to_node and citus_activate_node and check that it marked hasmetadata for that worker +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; -- Check that the metadata has been copied to the worker @@ -126,7 +131,7 @@ SELECT * FROM pg_dist_colocation ORDER BY colocationid; -- Make sure that truncate trigger has been set for the MX table on worker SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; --- Make sure that start_metadata_sync_to_node considers foreign key constraints +-- Make sure that citus_activate_node considers foreign key constraints \c - - - :master_port -- Since we're superuser, we can set the replication model to 'streaming' to @@ -142,7 +147,7 @@ CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text, SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1'); SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); -- Check that foreign key metadata exists on the worker \c - - - :worker_1_port @@ -154,10 +159,10 @@ DROP TABLE mx_testing_schema.fk_test_1; RESET citus.shard_replication_factor; --- Check that repeated calls to start_metadata_sync_to_node has no side effects +-- Check that repeated calls to citus_activate_node has no side effects \c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; SELECT * FROM pg_dist_node ORDER BY nodeid; @@ -171,10 +176,10 @@ SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'mx_testing_schema.mx_index'::regclass; SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; --- Make sure that start_metadata_sync_to_node can be called inside a transaction and rollbacked +-- Make sure that citus_activate_node can be called inside a transaction and rollbacked \c - - - :master_port BEGIN; -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); ROLLBACK; SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; @@ -182,7 +187,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; -- Check that the distributed table can be queried from the worker \c - - - :master_port SET citus.shard_replication_factor TO 1; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); CREATE TABLE mx_query_test (a int, b text, c int); SELECT create_distributed_table('mx_query_test', 'a'); @@ -441,7 +446,7 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -- sync table with serial column after create_distributed_table CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL, c SMALLSERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); DROP TABLE mx_table_with_small_sequence; -- Show that create_distributed_table works with a serial column @@ -486,7 +491,7 @@ SELECT nextval('mx_table_with_sequence_c_seq'); -- Check that adding a new metadata node sets the sequence space correctly \c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); \c - - - :worker_2_port SELECT groupid FROM pg_dist_local_group; @@ -531,7 +536,7 @@ DROP TABLE mx_table_with_small_sequence, mx_table_with_sequence; \c - - - :master_port -- Remove a node so that shards and sequences won't be created on table creation. Therefore, --- we can test that start_metadata_sync_to_node can actually create the sequence with proper +-- we can test that citus_activate_node can actually create the sequence with proper -- owner CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement; CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition; @@ -557,7 +562,6 @@ SELECT create_distributed_table('mx_table', 'a'); \c - postgres - :master_port SELECT master_add_node('localhost', :worker_2_port); -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); \c - mx_user - :worker_1_port SELECT nextval('mx_table_b_seq'); @@ -771,7 +775,6 @@ SELECT pg_reload_conf(); UPDATE pg_dist_node SET metadatasynced=true WHERE nodeport=:worker_1_port; SELECT master_add_node('localhost', :worker_2_port); -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); CREATE SEQUENCE mx_test_sequence_0; CREATE SEQUENCE mx_test_sequence_1; @@ -788,7 +791,7 @@ ALTER TABLE test_table ADD COLUMN id2 int DEFAULT nextval('mx_test_sequence_1'); ALTER TABLE test_table ALTER COLUMN id2 DROP DEFAULT; ALTER TABLE test_table ALTER COLUMN id2 SET DEFAULT nextval('mx_test_sequence_1'); -SELECT unnest(master_metadata_snapshot()) order by 1; +SELECT unnest(activate_node_snapshot()) order by 1; -- shouldn't work since test_table is MX ALTER TABLE test_table ADD COLUMN id3 bigserial; @@ -852,11 +855,15 @@ SELECT run_command_on_workers('GRANT USAGE ON SCHEMA mx_test_schema_2 TO non_sup SET ROLE non_super_metadata_user; +-- user must be super user stop/start metadata SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); RESET ROLE; +SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + RESET citus.shard_count; RESET citus.shard_replication_factor; @@ -865,6 +872,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; --- Turn metadata sync back on at the end -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +-- Activate them at the end +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); diff --git a/src/test/regress/sql/multi_mx_add_coordinator.sql b/src/test/regress/sql/multi_mx_add_coordinator.sql index c318e78aa..338fbc651 100644 --- a/src/test/regress/sql/multi_mx_add_coordinator.sql +++ b/src/test/regress/sql/multi_mx_add_coordinator.sql @@ -31,8 +31,9 @@ SET citus.next_shard_id TO 7000000; SET citus.next_placement_id TO 7000000; SET client_min_messages TO WARNING; --- test that coordinator pg_dist_node entry is synced to the workers -SELECT wait_until_metadata_sync(30000); +-- test that metadata is synced to the workers +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); @@ -174,8 +175,10 @@ SET client_min_messages TO ERROR; DROP TABLE distributed_table, referece_table, local_fkey_table; SELECT master_remove_node('localhost', :master_port); --- test that coordinator pg_dist_node entry was removed from the workers -SELECT wait_until_metadata_sync(30000); +-- test that metadata is synced to the workers +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_2_port); + SELECT verify_metadata('localhost', :worker_1_port), verify_metadata('localhost', :worker_2_port); diff --git a/src/test/regress/sql/multi_partitioning.sql b/src/test/regress/sql/multi_partitioning.sql index d95413dc9..91bbc180b 100644 --- a/src/test/regress/sql/multi_partitioning.sql +++ b/src/test/regress/sql/multi_partitioning.sql @@ -1992,7 +1992,7 @@ SELECT tablename, indexname FROM pg_indexes WHERE schemaname = 'partitioning_schema' AND tablename ilike '%part_table_with_%' ORDER BY 1, 2; -- should work properly - no names clashes -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); \c - - - :worker_1_port -- check that indexes are named properly diff --git a/src/test/regress/sql/multi_sequence_default.sql b/src/test/regress/sql/multi_sequence_default.sql index 67663accb..c8d09738f 100644 --- a/src/test/regress/sql/multi_sequence_default.sql +++ b/src/test/regress/sql/multi_sequence_default.sql @@ -51,46 +51,6 @@ SELECT * FROM seq_test_0_local_table ORDER BY 1, 2 LIMIT 5; ALTER SEQUENCE seq_0 AS bigint; ALTER SEQUENCE seq_0_local_table AS bigint; --- we can change other things like increment --- if metadata is not synced to workers -BEGIN; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -CREATE SEQUENCE seq_13; -CREATE SEQUENCE seq_13_local_table; -CREATE TABLE seq_test_13 (x int, y int); -CREATE TABLE seq_test_13_local_table (x int, y int); -SELECT create_distributed_table('seq_test_13','x'); -SELECT citus_add_local_table_to_metadata('seq_test_13_local_table'); -ALTER TABLE seq_test_13 ADD COLUMN z int DEFAULT nextval('seq_13'); -ALTER TABLE seq_test_13_local_table ADD COLUMN z int DEFAULT nextval('seq_13_local_table'); - -ALTER SEQUENCE seq_13 INCREMENT BY 2; -ALTER SEQUENCE seq_13_local_table INCREMENT BY 2; -\d seq_13 -\d seq_13_local_table - - --- check that we can add serial pseudo-type columns --- when metadata is not synced to workers -TRUNCATE seq_test_0; -ALTER TABLE seq_test_0 ADD COLUMN w00 smallserial; -ALTER TABLE seq_test_0 ADD COLUMN w01 serial2; -ALTER TABLE seq_test_0 ADD COLUMN w10 serial; -ALTER TABLE seq_test_0 ADD COLUMN w11 serial4; -ALTER TABLE seq_test_0 ADD COLUMN w20 bigserial; -ALTER TABLE seq_test_0 ADD COLUMN w21 serial8; - -TRUNCATE seq_test_0_local_table; -ALTER TABLE seq_test_0_local_table ADD COLUMN w00 smallserial; -ALTER TABLE seq_test_0_local_table ADD COLUMN w01 serial2; -ALTER TABLE seq_test_0_local_table ADD COLUMN w10 serial; -ALTER TABLE seq_test_0_local_table ADD COLUMN w11 serial4; -ALTER TABLE seq_test_0_local_table ADD COLUMN w20 bigserial; -ALTER TABLE seq_test_0_local_table ADD COLUMN w21 serial8; - -ROLLBACK; - -- check alter column type precaution ALTER TABLE seq_test_0 ALTER COLUMN z TYPE bigint; ALTER TABLE seq_test_0 ALTER COLUMN z TYPE smallint; @@ -106,7 +66,6 @@ CREATE TABLE seq_test_4 (x int, y int); SELECT create_distributed_table('seq_test_4','x'); CREATE SEQUENCE seq_4; ALTER TABLE seq_test_4 ADD COLUMN a bigint DEFAULT nextval('seq_4'); -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); DROP SEQUENCE seq_4 CASCADE; TRUNCATE seq_test_4; CREATE SEQUENCE seq_4; @@ -381,7 +340,7 @@ SELECT create_reference_table('seq_test_10'); INSERT INTO seq_test_10 VALUES (0); CREATE TABLE seq_test_11 (col0 int, col1 bigint DEFAULT nextval('seq_11'::text)); -- works but doesn't create seq_11 in the workers -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); -- works because there is no dependency created between seq_11 and seq_test_10 SELECT create_distributed_table('seq_test_11', 'col1'); -- insertion from workers fails @@ -403,7 +362,7 @@ CREATE TABLE seq_test_12(col0 text, col1 smallint DEFAULT nextval('seq_12'), col2 int DEFAULT nextval('seq_13'), col3 bigint DEFAULT nextval('seq_14')); SELECT create_distributed_table('seq_test_12', 'col0'); -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); INSERT INTO seq_test_12 VALUES ('hello0') RETURNING *; \c - - - :worker_1_port @@ -458,7 +417,7 @@ SELECT nextval('seq_14'); \c - - - :master_port SET citus.shard_replication_factor TO 1; SET search_path = sequence_default, public; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); SELECT undistribute_table('seq_test_12'); SELECT create_distributed_table('seq_test_12', 'col0'); INSERT INTO seq_test_12 VALUES ('hello2') RETURNING *; @@ -478,7 +437,53 @@ SELECT nextval('seq_14'); \c - - - :master_port +-- Show that sequence and its dependency schema will be propagated if a distributed +-- table with default column is added +CREATE SCHEMA test_schema_for_sequence_default_propagation; +CREATE SEQUENCE test_schema_for_sequence_default_propagation.seq_10; + +-- Both should return 0 rows +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation.seq_10'::regclass); +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation'::regnamespace); + +-- Create distributed table with default column to propagate dependencies +CREATE TABLE test_seq_dist(a int, x BIGINT DEFAULT nextval('test_schema_for_sequence_default_propagation.seq_10')); +SELECT create_distributed_table('test_seq_dist', 'a'); + +-- Both sequence and dependency schema should be distributed +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation.seq_10'::regclass); +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation'::regnamespace); + +-- Show that sequence can stay on the worker node if the transaction is +-- rollbacked after distributing the table +BEGIN; +CREATE SEQUENCE sequence_rollback; +CREATE TABLE sequence_rollback_table(id int, val_1 int default nextval('sequence_rollback')); +SELECT create_distributed_table('sequence_rollback_table', 'id'); +ROLLBACK; + +-- Show that there is a sequence on the worker with the sequence type int +\c - - - :worker_1_port +SELECT seqtypid::regtype, seqmax, seqmin FROM pg_sequence WHERE seqrelid::regclass::text = 'sequence_rollback'; + +\c - - - :master_port +-- Show that we can create a sequence with the same name and different data type +BEGIN; +CREATE SEQUENCE sequence_rollback; +CREATE TABLE sequence_rollback_table(id int, val_1 bigint default nextval('sequence_rollback')); +SELECT create_distributed_table('sequence_rollback_table', 'id'); +ROLLBACK; + +-- Show that existing sequence has been renamed and a new sequence with the same name +-- created for another type +\c - - - :worker_1_port +SELECT seqrelid::regclass, seqtypid::regtype, seqmax, seqmin FROM pg_sequence WHERE seqrelid::regclass::text like '%sequence_rollback%' ORDER BY 1,2; + +\c - - - :master_port + -- clean up +DROP SCHEMA test_schema_for_sequence_default_propagation CASCADE; +DROP TABLE test_seq_dist; DROP TABLE sequence_default.seq_test_7_par; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA sequence_default CASCADE; diff --git a/src/test/regress/sql/multi_unsupported_worker_operations.sql b/src/test/regress/sql/multi_unsupported_worker_operations.sql index b9f4ee337..02e5397cc 100644 --- a/src/test/regress/sql/multi_unsupported_worker_operations.sql +++ b/src/test/regress/sql/multi_unsupported_worker_operations.sql @@ -164,7 +164,6 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; \c - - - :worker_2_port -SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx\_%table%'; SELECT count(*) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx\_%table%'; SELECT count(*) FROM pg_dist_node; \c - - - :worker_1_port diff --git a/src/test/regress/sql/non_super_user_object_metadata.sql b/src/test/regress/sql/non_super_user_object_metadata.sql index 191225eec..67e9687a3 100644 --- a/src/test/regress/sql/non_super_user_object_metadata.sql +++ b/src/test/regress/sql/non_super_user_object_metadata.sql @@ -177,14 +177,18 @@ SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_ SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{ltree}%'; SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{ltree}%';$$) ORDER BY 1,2; -SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); +-- Show that dropping a distributed table drops the pg_dist_object entry on worker +CREATE TABLE extension_schema.table_to_check_object(id int); +SELECT create_distributed_table('extension_schema.table_to_check_object', 'id'); --- Show that we don't have any object metadata after stopping syncing -SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object;$$) ORDER BY 1,2; +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%table_to_check_object%'; +SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%table_to_check_object%';$$) ORDER BY 1,2; + +DROP TABLE extension_schema.table_to_check_object; + +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%table_to_check_object%'; +SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%table_to_check_object%';$$) ORDER BY 1,2; -- Revert the settings for following tests RESET citus.enable_ddl_propagation; RESET citus.shard_replication_factor; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -SELECT start_metadata_sync_to_node('localhost', :worker_2_port); diff --git a/src/test/regress/sql/resync_metadata_with_sequences.sql b/src/test/regress/sql/resync_metadata_with_sequences.sql index b83caff7f..e0b263f52 100644 --- a/src/test/regress/sql/resync_metadata_with_sequences.sql +++ b/src/test/regress/sql/resync_metadata_with_sequences.sql @@ -55,7 +55,7 @@ INSERT INTO sensors VALUES (DEFAULT, DEFAULT, '2010-01-01') RETURNING *; INSERT INTO sensors_news VALUES (DEFAULT, DEFAULT, '2021-01-01') RETURNING *; \c - - - :master_port -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); \c - - - :worker_1_port diff --git a/src/test/regress/sql/start_stop_metadata_sync.sql b/src/test/regress/sql/start_stop_metadata_sync.sql index a4c152631..a23eba415 100644 --- a/src/test/regress/sql/start_stop_metadata_sync.sql +++ b/src/test/regress/sql/start_stop_metadata_sync.sql @@ -131,9 +131,6 @@ SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'reference_table__' AND rel SET search_path TO "start_stop_metadata_sync"; SELECT count(*) > 0 FROM pg_dist_node; -SELECT count(*) > 0 FROM pg_dist_shard; -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'distributed_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'reference_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); \c - - - :master_port SET search_path TO "start_stop_metadata_sync"; @@ -144,9 +141,6 @@ COMMIT; \c - - - :worker_1_port SELECT count(*) > 0 FROM pg_dist_node; -SELECT count(*) > 0 FROM pg_dist_shard; -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'distributed_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'reference_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); \c - - - :master_port -- test synchronization for pg_dist_node flags @@ -209,9 +203,6 @@ COMMIT; \c - - - :worker_1_port SELECT count(*) > 0 FROM pg_dist_node; -SELECT count(*) > 0 FROM pg_dist_shard; -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'distributed_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); -SELECT count(*) > 0 FROM pg_class WHERE relname LIKE 'reference_table__' AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = 'start_stop_metadata_sync'); \c - - - :master_port SET search_path TO "start_stop_metadata_sync"; @@ -253,9 +244,7 @@ BEGIN; SELECT create_distributed_table('test_table_rep', 'a'); ROLLBACK; --- multi-shard commands are allowed with start_metadata_sync --- as long as the start_metadata_sync_to_node executed --- when it is OK to switch to sequential execution +-- multi-shard commands are not allowed with start_metadata_sync BEGIN; -- sync at the start of the tx SELECT start_metadata_sync_to_node('localhost', :worker_1_port); @@ -271,9 +260,7 @@ BEGIN; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); ROLLBACK; --- multi-shard commands are allowed with start_metadata_sync --- as long as the start_metadata_sync_to_node executed --- when it is OK to switch to sequential execution +-- multi-shard commands are not allowed with start_metadata_sync BEGIN; -- sync at the start of the tx SELECT start_metadata_sync_to_node('localhost', :worker_1_port); diff --git a/src/test/regress/sql/turn_mx_on.sql b/src/test/regress/sql/turn_mx_on.sql index dfe31857b..67b548043 100644 --- a/src/test/regress/sql/turn_mx_on.sql +++ b/src/test/regress/sql/turn_mx_on.sql @@ -1,5 +1,6 @@ ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON; SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); SET client_min_messages TO ERROR; SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; From 768643644b69fd5888ea2cd2449ad20ba65a80d4 Mon Sep 17 00:00:00 2001 From: Hanefi Onaldi Date: Tue, 1 Feb 2022 02:57:01 +0300 Subject: [PATCH 02/28] Add changelog entries for 10.1.4 --- CHANGELOG.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index edf91cfa7..210507e34 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,18 @@ +### citus v10.1.4 (February 1, 2022) ### + +* Adds missing version checks for columnar tables + +* Fixes a bug that could break `DROP SCHEMA/EXTENSION` commands when there is + a columnar table + +* Fixes a build error that happens when `lz4` is not installed + +* Fixes a missing `FROM` clause entry error + +* Reinstates optimisation for uniform shard interval ranges + +* Fixes a bug that causes commands to fail when `application_name` is set + ### citus v10.2.3 (November 29, 2021) ### * Adds `fix_partition_shard_index_names` udf to fix currently broken From beafde5ff53191158466800a73ed5f1f16c1568d Mon Sep 17 00:00:00 2001 From: Hanefi Onaldi Date: Tue, 1 Feb 2022 03:07:47 +0300 Subject: [PATCH 03/28] Add changelog entries for 10.2.4 --- CHANGELOG.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 210507e34..af747f5ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,17 @@ +### citus v10.2.4 (February 1, 2022) ### + +* Adds support for operator class parameters in indexes + +* Fixes a bug with distributed functions that have `OUT` parameters or + return `TABLE` + +* Fixes a build error that happens when `lz4` is not installed + +* Improves self-deadlock prevention for `CREATE INDEX` & + `REINDEX CONCURRENTLY` commands for builds using PG14 or higher + +* Fixes a bug that causes commands to fail when `application_name` is set + ### citus v10.1.4 (February 1, 2022) ### * Adds missing version checks for columnar tables From f712dfc55882c11382e4d62da317ce9359ac11f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=96nder=20Kalac=C4=B1?= Date: Tue, 1 Feb 2022 13:39:52 +0100 Subject: [PATCH 04/28] Add tests coverage (#5672) For extension owned tables with sequences --- src/test/regress/expected/multi_mx_ddl.out | 123 ++++++++++++++++++++- src/test/regress/sql/multi_mx_ddl.sql | 45 +++++++- 2 files changed, 166 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/multi_mx_ddl.out b/src/test/regress/expected/multi_mx_ddl.out index f69e4b49a..14318c3d8 100644 --- a/src/test/regress/expected/multi_mx_ddl.out +++ b/src/test/regress/expected/multi_mx_ddl.out @@ -303,5 +303,126 @@ SELECT * FROM seg_test; (1 row) \c - - - :master_port +CREATE SCHEMA ext_owned_tables; +SELECT run_command_on_workers($$CREATE SCHEMA ext_owned_tables;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE SCHEMA") + (localhost,57638,t,"CREATE SCHEMA") +(2 rows) + +SET search_path TO ext_owned_tables; +CREATE sequence my_seq_ext_1; +SELECT run_command_on_workers($$CREATE sequence ext_owned_tables.my_seq_ext_1;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE SEQUENCE") + (localhost,57638,t,"CREATE SEQUENCE") +(2 rows) + +CREATE sequence my_seq_ext_2; +SELECT run_command_on_workers($$CREATE sequence ext_owned_tables.my_seq_ext_2;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE SEQUENCE") + (localhost,57638,t,"CREATE SEQUENCE") +(2 rows) + +-- test distributed tables owned by extension +CREATE TABLE seg_test (x int, y bigserial, z int default nextval('my_seq_ext_1')); +SELECT run_command_on_workers($$CREATE TABLE ext_owned_tables.seg_test (x int, y bigserial, z int default nextval('ext_owned_tables.my_seq_ext_1'))$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE TABLE") + (localhost,57638,t,"CREATE TABLE") +(2 rows) + +INSERT INTO seg_test VALUES (42); +CREATE TABLE tcn_test (x int, y bigserial, z int default nextval('my_seq_ext_2')); +SELECT run_command_on_workers($$CREATE TABLE ext_owned_tables.tcn_test (x int, y bigserial, z int default nextval('ext_owned_tables.my_seq_ext_2'));$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE TABLE") + (localhost,57638,t,"CREATE TABLE") +(2 rows) + +INSERT INTO tcn_test VALUES (42); +-- pretend this table belongs to an extension +ALTER EXTENSION seg ADD TABLE ext_owned_tables.seg_test; +NOTICE: Citus does not propagate adding/dropping member objects +HINT: You can add/drop the member objects on the workers as well. +ALTER EXTENSION seg ADD SEQUENCE ext_owned_tables.my_seq_ext_1; +NOTICE: Citus does not propagate adding/dropping member objects +HINT: You can add/drop the member objects on the workers as well. +SELECT run_command_on_workers($$ALTER EXTENSION seg ADD TABLE ext_owned_tables.seg_test;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"ALTER EXTENSION") + (localhost,57638,t,"ALTER EXTENSION") +(2 rows) + +SELECT run_command_on_workers($$ALTER EXTENSION seg ADD SEQUENCE ext_owned_tables.my_seq_ext_1;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"ALTER EXTENSION") + (localhost,57638,t,"ALTER EXTENSION") +(2 rows) + +CREATE EXTENSION tcn; +ALTER EXTENSION tcn ADD TABLE ext_owned_tables.tcn_test; +NOTICE: Citus does not propagate adding/dropping member objects +HINT: You can add/drop the member objects on the workers as well. +ALTER EXTENSION tcn ADD SEQUENCE ext_owned_tables.my_seq_ext_2; +NOTICE: Citus does not propagate adding/dropping member objects +HINT: You can add/drop the member objects on the workers as well. +SELECT run_command_on_workers($$ALTER EXTENSION tcn ADD TABLE ext_owned_tables.tcn_test;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"ALTER EXTENSION") + (localhost,57638,t,"ALTER EXTENSION") +(2 rows) + +SELECT run_command_on_workers($$ALTER EXTENSION tcn ADD SEQUENCE ext_owned_tables.my_seq_ext_2;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"ALTER EXTENSION") + (localhost,57638,t,"ALTER EXTENSION") +(2 rows) + +SELECT create_reference_table('seg_test'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$ext_owned_tables.seg_test$$) + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('tcn_test', 'x'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$ext_owned_tables.tcn_test$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- test metadata re-sync in the presence of an extension-owned table +-- and serial/sequences +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + -- also drops table on both worker and master -DROP EXTENSION seg CASCADE; +SET client_min_messages TO ERROR; +DROP SCHEMA ext_owned_tables CASCADE; diff --git a/src/test/regress/sql/multi_mx_ddl.sql b/src/test/regress/sql/multi_mx_ddl.sql index 0e3c0ff0e..5d1622d35 100644 --- a/src/test/regress/sql/multi_mx_ddl.sql +++ b/src/test/regress/sql/multi_mx_ddl.sql @@ -172,5 +172,48 @@ SELECT * FROM seg_test; \c - - - :master_port +CREATE SCHEMA ext_owned_tables; +SELECT run_command_on_workers($$CREATE SCHEMA ext_owned_tables;$$); + +SET search_path TO ext_owned_tables; + +CREATE sequence my_seq_ext_1; +SELECT run_command_on_workers($$CREATE sequence ext_owned_tables.my_seq_ext_1;$$); +CREATE sequence my_seq_ext_2; +SELECT run_command_on_workers($$CREATE sequence ext_owned_tables.my_seq_ext_2;$$); + +-- test distributed tables owned by extension +CREATE TABLE seg_test (x int, y bigserial, z int default nextval('my_seq_ext_1')); +SELECT run_command_on_workers($$CREATE TABLE ext_owned_tables.seg_test (x int, y bigserial, z int default nextval('ext_owned_tables.my_seq_ext_1'))$$); + +INSERT INTO seg_test VALUES (42); + +CREATE TABLE tcn_test (x int, y bigserial, z int default nextval('my_seq_ext_2')); +SELECT run_command_on_workers($$CREATE TABLE ext_owned_tables.tcn_test (x int, y bigserial, z int default nextval('ext_owned_tables.my_seq_ext_2'));$$); + +INSERT INTO tcn_test VALUES (42); + +-- pretend this table belongs to an extension +ALTER EXTENSION seg ADD TABLE ext_owned_tables.seg_test; +ALTER EXTENSION seg ADD SEQUENCE ext_owned_tables.my_seq_ext_1; +SELECT run_command_on_workers($$ALTER EXTENSION seg ADD TABLE ext_owned_tables.seg_test;$$); +SELECT run_command_on_workers($$ALTER EXTENSION seg ADD SEQUENCE ext_owned_tables.my_seq_ext_1;$$); + + +CREATE EXTENSION tcn; +ALTER EXTENSION tcn ADD TABLE ext_owned_tables.tcn_test; +ALTER EXTENSION tcn ADD SEQUENCE ext_owned_tables.my_seq_ext_2; +SELECT run_command_on_workers($$ALTER EXTENSION tcn ADD TABLE ext_owned_tables.tcn_test;$$); +SELECT run_command_on_workers($$ALTER EXTENSION tcn ADD SEQUENCE ext_owned_tables.my_seq_ext_2;$$); + +SELECT create_reference_table('seg_test'); +SELECT create_distributed_table('tcn_test', 'x'); + +-- test metadata re-sync in the presence of an extension-owned table +-- and serial/sequences +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); +SELECT start_metadata_sync_to_node('localhost', :worker_1_port); + -- also drops table on both worker and master -DROP EXTENSION seg CASCADE; +SET client_min_messages TO ERROR; +DROP SCHEMA ext_owned_tables CASCADE; From 63c68967164bbe00af19c567108cb0824e59f427 Mon Sep 17 00:00:00 2001 From: Marco Slot Date: Thu, 27 Jan 2022 18:39:52 +0100 Subject: [PATCH 05/28] Enable function call pushdown from workers --- .../planner/function_call_delegation.c | 11 --- .../multi_mx_function_call_delegation.out | 86 +++++++++++++++++-- .../multi_mx_function_call_delegation_0.out | 86 +++++++++++++++++-- .../sql/multi_mx_function_call_delegation.sql | 39 ++++++++- 4 files changed, 196 insertions(+), 26 deletions(-) diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index 2ae9fdde3..3ab0c71c8 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -303,17 +303,6 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext) return NULL; } - if (localGroupId != COORDINATOR_GROUP_ID) - { - /* - * We are calling a distributed function on a worker node. We currently - * only delegate from the coordinator. - * - * TODO: remove this restriction. - */ - return NULL; - } - /* * Cannot delegate functions for INSERT ... SELECT func(), since they require * coordinated transactions. diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index e77e0c3b5..d48f001bf 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -74,6 +74,11 @@ LANGUAGE plpgsql AS $$ BEGIN y := x + y * 2; END;$$; +CREATE FUNCTION mx_call_func_bigint_force(x bigint, INOUT y bigint) +LANGUAGE plpgsql AS $$ +BEGIN + PERFORM multi_mx_function_call_delegation.mx_call_func_bigint(x, y); +END;$$; -- create another function which verifies: -- 1. we work fine with multiple return columns -- 2. we work fine in combination with custom types @@ -193,12 +198,6 @@ select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass (1 row) -select colocate_proc_with_table('mx_call_func_bigint', 'mx_call_dist_table_bigint'::regclass, 1); - colocate_proc_with_table ---------------------------------------------------------------------- - -(1 row) - select colocate_proc_with_table('mx_call_func_custom_types', 'mx_call_dist_table_enum'::regclass, 1); colocate_proc_with_table --------------------------------------------------------------------- @@ -211,6 +210,26 @@ select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0); (1 row) +select create_distributed_function('mx_call_func_bigint(bigint,bigint)', 'x', + colocate_with := 'mx_call_dist_table_bigint'); +DEBUG: switching to sequential query execution mode +DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- set up a force_delegation function +select create_distributed_function('mx_call_func_bigint_force(bigint,bigint)', 'x', + colocate_with := 'mx_call_dist_table_2', + force_delegation := true); +DEBUG: switching to sequential query execution mode +DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + select mx_call_func(2, 0); DEBUG: pushing down the function call mx_call_func @@ -748,6 +767,16 @@ HINT: Connect to the coordinator and run it again. -- show that functions can be delegated from worker nodes SET client_min_messages TO DEBUG1; SELECT mx_call_func(2, 0); +DEBUG: pushing down the function call + mx_call_func +--------------------------------------------------------------------- + 28 +(1 row) + +-- not delegated in a transaction block +BEGIN; +SELECT mx_call_func(2, 0); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line XX at assignment @@ -759,9 +788,52 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment 28 (1 row) +END; +-- not delegated in a DO block +DO $$ +BEGIN + PERFORM mx_call_func(2, 0); +END; +$$ LANGUAGE plpgsql; +DEBUG: not pushing down function calls in a multi-statement transaction +CONTEXT: SQL statement "SELECT mx_call_func(2, 0)" +PL/pgSQL function inline_code_block line XX at PERFORM +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" +PL/pgSQL function mx_call_func(integer,integer) line XX at assignment +SQL statement "SELECT mx_call_func(2, 0)" +PL/pgSQL function inline_code_block line XX at PERFORM +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((2 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer +CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" +PL/pgSQL function mx_call_func(integer,integer) line XX at assignment +SQL statement "SELECT mx_call_func(2, 0)" +PL/pgSQL function inline_code_block line XX at PERFORM +-- forced calls are delegated in a transaction block +BEGIN; +SELECT mx_call_func_bigint_force(4, 2); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call + mx_call_func_bigint_force +--------------------------------------------------------------------- + 2 +(1 row) + +END; +-- forced calls are delegated in a DO block +DO $$ +BEGIN + PERFORM * FROM mx_call_func_bigint_force(4, 2); +END; +$$ LANGUAGE plpgsql; +DEBUG: pushing down function call in a multi-statement transaction +CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)" +PL/pgSQL function inline_code_block line XX at PERFORM +DEBUG: pushing down the function call +CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)" +PL/pgSQL function inline_code_block line XX at PERFORM \c - - - :master_port SET search_path TO multi_mx_function_call_delegation, public; RESET client_min_messages; \set VERBOSITY terse DROP SCHEMA multi_mx_function_call_delegation CASCADE; -NOTICE: drop cascades to 15 other objects +NOTICE: drop cascades to 16 other objects diff --git a/src/test/regress/expected/multi_mx_function_call_delegation_0.out b/src/test/regress/expected/multi_mx_function_call_delegation_0.out index 657183bc2..06a7b320d 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation_0.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation_0.out @@ -74,6 +74,11 @@ LANGUAGE plpgsql AS $$ BEGIN y := x + y * 2; END;$$; +CREATE FUNCTION mx_call_func_bigint_force(x bigint, INOUT y bigint) +LANGUAGE plpgsql AS $$ +BEGIN + PERFORM multi_mx_function_call_delegation.mx_call_func_bigint(x, y); +END;$$; -- create another function which verifies: -- 1. we work fine with multiple return columns -- 2. we work fine in combination with custom types @@ -193,12 +198,6 @@ select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass (1 row) -select colocate_proc_with_table('mx_call_func_bigint', 'mx_call_dist_table_bigint'::regclass, 1); - colocate_proc_with_table ---------------------------------------------------------------------- - -(1 row) - select colocate_proc_with_table('mx_call_func_custom_types', 'mx_call_dist_table_enum'::regclass, 1); colocate_proc_with_table --------------------------------------------------------------------- @@ -211,6 +210,26 @@ select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0); (1 row) +select create_distributed_function('mx_call_func_bigint(bigint,bigint)', 'x', + colocate_with := 'mx_call_dist_table_bigint'); +DEBUG: switching to sequential query execution mode +DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- set up a force_delegation function +select create_distributed_function('mx_call_func_bigint_force(bigint,bigint)', 'x', + colocate_with := 'mx_call_dist_table_2', + force_delegation := true); +DEBUG: switching to sequential query execution mode +DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + select mx_call_func(2, 0); DEBUG: pushing down the function call mx_call_func @@ -748,6 +767,16 @@ HINT: Connect to the coordinator and run it again. -- show that functions can be delegated from worker nodes SET client_min_messages TO DEBUG1; SELECT mx_call_func(2, 0); +DEBUG: pushing down the function call + mx_call_func +--------------------------------------------------------------------- + 28 +(1 row) + +-- not delegated in a transaction block +BEGIN; +SELECT mx_call_func(2, 0); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" PL/pgSQL function mx_call_func(integer,integer) line XX at assignment @@ -759,9 +788,52 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment 28 (1 row) +END; +-- not delegated in a DO block +DO $$ +BEGIN + PERFORM mx_call_func(2, 0); +END; +$$ LANGUAGE plpgsql; +DEBUG: not pushing down function calls in a multi-statement transaction +CONTEXT: SQL statement "SELECT mx_call_func(2, 0)" +PL/pgSQL function inline_code_block line XX at PERFORM +DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) +CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" +PL/pgSQL function mx_call_func(integer,integer) line XX at assignment +SQL statement "SELECT mx_call_func(2, 0)" +PL/pgSQL function inline_code_block line XX at PERFORM +DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (2 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) +CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" +PL/pgSQL function mx_call_func(integer,integer) line XX at assignment +SQL statement "SELECT mx_call_func(2, 0)" +PL/pgSQL function inline_code_block line XX at PERFORM +-- forced calls are delegated in a transaction block +BEGIN; +SELECT mx_call_func_bigint_force(4, 2); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call + mx_call_func_bigint_force +--------------------------------------------------------------------- + 2 +(1 row) + +END; +-- forced calls are delegated in a DO block +DO $$ +BEGIN + PERFORM * FROM mx_call_func_bigint_force(4, 2); +END; +$$ LANGUAGE plpgsql; +DEBUG: pushing down function call in a multi-statement transaction +CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)" +PL/pgSQL function inline_code_block line XX at PERFORM +DEBUG: pushing down the function call +CONTEXT: SQL statement "SELECT * FROM mx_call_func_bigint_force(4, 2)" +PL/pgSQL function inline_code_block line XX at PERFORM \c - - - :master_port SET search_path TO multi_mx_function_call_delegation, public; RESET client_min_messages; \set VERBOSITY terse DROP SCHEMA multi_mx_function_call_delegation CASCADE; -NOTICE: drop cascades to 15 other objects +NOTICE: drop cascades to 16 other objects diff --git a/src/test/regress/sql/multi_mx_function_call_delegation.sql b/src/test/regress/sql/multi_mx_function_call_delegation.sql index 4dfe91322..206969456 100644 --- a/src/test/regress/sql/multi_mx_function_call_delegation.sql +++ b/src/test/regress/sql/multi_mx_function_call_delegation.sql @@ -57,6 +57,12 @@ BEGIN y := x + y * 2; END;$$; +CREATE FUNCTION mx_call_func_bigint_force(x bigint, INOUT y bigint) +LANGUAGE plpgsql AS $$ +BEGIN + PERFORM multi_mx_function_call_delegation.mx_call_func_bigint(x, y); +END;$$; + -- create another function which verifies: -- 1. we work fine with multiple return columns -- 2. we work fine in combination with custom types @@ -104,10 +110,17 @@ select mx_call_func_custom_types('S', 'A'); -- Mark them as colocated with a table. Now we should route them to workers. select colocate_proc_with_table('mx_call_func', 'mx_call_dist_table_1'::regclass, 1); -select colocate_proc_with_table('mx_call_func_bigint', 'mx_call_dist_table_bigint'::regclass, 1); select colocate_proc_with_table('mx_call_func_custom_types', 'mx_call_dist_table_enum'::regclass, 1); select colocate_proc_with_table('squares', 'mx_call_dist_table_2'::regclass, 0); +select create_distributed_function('mx_call_func_bigint(bigint,bigint)', 'x', + colocate_with := 'mx_call_dist_table_bigint'); + +-- set up a force_delegation function +select create_distributed_function('mx_call_func_bigint_force(bigint,bigint)', 'x', + colocate_with := 'mx_call_dist_table_2', + force_delegation := true); + select mx_call_func(2, 0); select mx_call_func_bigint(4, 2); select mx_call_func_custom_types('S', 'A'); @@ -294,6 +307,30 @@ select create_distributed_function('mx_call_func(int,int)'); SET client_min_messages TO DEBUG1; SELECT mx_call_func(2, 0); +-- not delegated in a transaction block +BEGIN; +SELECT mx_call_func(2, 0); +END; + +-- not delegated in a DO block +DO $$ +BEGIN + PERFORM mx_call_func(2, 0); +END; +$$ LANGUAGE plpgsql; + +-- forced calls are delegated in a transaction block +BEGIN; +SELECT mx_call_func_bigint_force(4, 2); +END; + +-- forced calls are delegated in a DO block +DO $$ +BEGIN + PERFORM * FROM mx_call_func_bigint_force(4, 2); +END; +$$ LANGUAGE plpgsql; + \c - - - :master_port SET search_path TO multi_mx_function_call_delegation, public; From 34d91009edc08870e11ee70bf48490a05b6e03c9 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Tue, 1 Feb 2022 15:37:10 +0100 Subject: [PATCH 06/28] Update outdated comment As of the current HEAD, we support sequences as first class objects --- .../distributed/metadata/pg_get_object_address_12_13_14.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c index f2d66fb59..c2a8e29e3 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c +++ b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c @@ -402,8 +402,7 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, bool skipAclCheck = false; Oid idToCheck = InvalidOid; - /* Since we don't handle sequences like object, add it separately */ - if (!(SupportedDependencyByCitus(addr) || type == OBJECT_SEQUENCE)) + if (!SupportedDependencyByCitus(addr)) { ereport(ERROR, (errmsg("Object type %d can not be distributed by Citus", type))); } From 650243927ca2c435563ab8425e86567bc6d44081 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Tue, 1 Feb 2022 15:39:06 +0100 Subject: [PATCH 07/28] Relax some transactional limications on activate node We already enforce EnsureSequentialModeMetadataOperations(), and given that all activate node is transaction, we should be fine --- .../distributed/metadata/node_metadata.c | 1 - .../multi_replicate_reference_table.out | 18 +++++++++++++++--- .../expected/start_stop_metadata_sync.out | 18 ++++++++++++++---- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index b5423a8a2..d040514bc 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -787,7 +787,6 @@ SyncDistributedObjectsToNode(WorkerNode *workerNode) return; } - EnsureNoModificationsHaveBeenDone(); EnsureSequentialModeMetadataOperations(); Assert(ShouldPropagate()); diff --git a/src/test/regress/expected/multi_replicate_reference_table.out b/src/test/regress/expected/multi_replicate_reference_table.out index e77ce2df1..aa5d10149 100644 --- a/src/test/regress/expected/multi_replicate_reference_table.out +++ b/src/test/regress/expected/multi_replicate_reference_table.out @@ -464,7 +464,11 @@ SELECT create_reference_table('replicate_reference_table_insert'); BEGIN; INSERT INTO replicate_reference_table_insert VALUES(1); SELECT 1 FROM master_add_node('localhost', :worker_2_port); -ERROR: cannot open new connections after the first modification command within a transaction + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + ROLLBACK; DROP TABLE replicate_reference_table_insert; -- test COPY then adding a new node in a transaction @@ -479,7 +483,11 @@ SET citus.enable_local_execution = 'off'; BEGIN; COPY replicate_reference_table_copy FROM STDIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -ERROR: cannot open new connections after the first modification command within a transaction + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + ROLLBACK; RESET citus.enable_local_execution; DROP TABLE replicate_reference_table_copy; @@ -494,7 +502,11 @@ SELECT create_reference_table('replicate_reference_table_ddl'); BEGIN; ALTER TABLE replicate_reference_table_ddl ADD column2 int; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -ERROR: cannot open new connections after the first modification command within a transaction + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + ROLLBACK; DROP TABLE replicate_reference_table_ddl; -- test DROP table after adding new node in a transaction diff --git a/src/test/regress/expected/start_stop_metadata_sync.out b/src/test/regress/expected/start_stop_metadata_sync.out index 3cbbf1572..1f82c60cb 100644 --- a/src/test/regress/expected/start_stop_metadata_sync.out +++ b/src/test/regress/expected/start_stop_metadata_sync.out @@ -156,7 +156,7 @@ SELECT * FROM test_matview; (1 row) SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'events%' ORDER BY logicalrelid::text; - logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted + logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted --------------------------------------------------------------------- events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 1390012 | s | f @@ -468,7 +468,9 @@ BEGIN; (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -ERROR: cannot open new connections after the first modification command within a transaction +ERROR: cannot execute metadata syncing operation because there was a parallel operation on a distributed table in the transaction +DETAIL: When modifying metadata, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" ROLLBACK; -- this is safe because start_metadata_sync_to_node already switches to -- sequential execution @@ -539,7 +541,11 @@ BEGIN; -- sync at the end of the tx SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -ERROR: cannot open new connections after the first modification command within a transaction + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + ROLLBACK; -- multi-shard commands are not allowed with start_metadata_sync BEGIN; @@ -576,7 +582,11 @@ BEGIN; -- sync at the end of the tx SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -ERROR: cannot open new connections after the first modification command within a transaction + start_metadata_sync_to_node +--------------------------------------------------------------------- + +(1 row) + ROLLBACK; -- cleanup \c - - - :master_port From b072b9235e3c312e4a51cbbcf051e29451c30102 Mon Sep 17 00:00:00 2001 From: jeff-davis Date: Wed, 2 Feb 2022 13:22:11 -0800 Subject: [PATCH 08/28] Columnar: fix checksums, broken in a4067913. (#5669) Checksums must be set directly before writing the page. log_newpage() sets the page LSN, and therefore invalidates the checksum. --- src/backend/columnar/columnar_storage.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/columnar/columnar_storage.c b/src/backend/columnar/columnar_storage.c index 58eb87b7e..71fc75ccb 100644 --- a/src/backend/columnar/columnar_storage.c +++ b/src/backend/columnar/columnar_storage.c @@ -186,17 +186,17 @@ ColumnarStorageInit(SMgrRelation srel, uint64 storageId) (char *) &metapage, sizeof(ColumnarMetapage)); phdr->pd_lower += sizeof(ColumnarMetapage); - PageSetChecksumInplace(page, COLUMNAR_METAPAGE_BLOCKNO); log_newpage(&srel->smgr_rnode.node, MAIN_FORKNUM, COLUMNAR_METAPAGE_BLOCKNO, page, true); + PageSetChecksumInplace(page, COLUMNAR_METAPAGE_BLOCKNO); smgrextend(srel, MAIN_FORKNUM, COLUMNAR_METAPAGE_BLOCKNO, page, true); /* write empty page */ PageInit(page, BLCKSZ, 0); - PageSetChecksumInplace(page, COLUMNAR_EMPTY_BLOCKNO); log_newpage(&srel->smgr_rnode.node, MAIN_FORKNUM, COLUMNAR_EMPTY_BLOCKNO, page, true); + PageSetChecksumInplace(page, COLUMNAR_EMPTY_BLOCKNO); smgrextend(srel, MAIN_FORKNUM, COLUMNAR_EMPTY_BLOCKNO, page, true); /* From f31bce5b486f39eed511e91bc4a8a5cc75bb6b32 Mon Sep 17 00:00:00 2001 From: Teja Mupparti Date: Thu, 27 Jan 2022 15:29:51 -0800 Subject: [PATCH 09/28] Fixes the issue seen in https://github.com/citusdata/citus-enterprise/issues/745 With this commit, rebalancer backends are identified by application_name = citus_rebalancer and the regular internal backends are identified by application_name = citus_internal --- src/backend/distributed/commands/call.c | 2 +- .../distributed/commands/utility_hook.c | 2 +- .../connection/connection_management.c | 15 +- .../distributed/executor/multi_executor.c | 2 +- .../distributed/metadata/metadata_sync.c | 3 +- .../distributed/operations/shard_cleaner.c | 2 +- .../distributed/operations/shard_rebalancer.c | 18 ++- .../planner/function_call_delegation.c | 2 +- .../transaction/citus_dist_stat_activity.c | 2 +- .../transaction/transaction_management.c | 2 +- .../worker/worker_shard_visibility.c | 5 +- .../distributed/connection_management.h | 8 +- src/include/distributed/shard_rebalancer.h | 2 +- .../expected/metadata_sync_helpers.out | 138 +++++++++--------- .../regress/expected/shard_rebalancer.out | 29 ++++ .../regress/sql/metadata_sync_helpers.sql | 138 +++++++++--------- src/test/regress/sql/shard_rebalancer.sql | 18 +++ 17 files changed, 229 insertions(+), 159 deletions(-) diff --git a/src/backend/distributed/commands/call.c b/src/backend/distributed/commands/call.c index af319f0ce..91260a07e 100644 --- a/src/backend/distributed/commands/call.c +++ b/src/backend/distributed/commands/call.c @@ -68,7 +68,7 @@ CallDistributedProcedureRemotely(CallStmt *callStmt, DestReceiver *dest) return false; } - if (IsCitusInitiatedRemoteBackend()) + if (IsCitusInternalBackend()) { /* * We are in a citus-initiated backend handling a CALL to a distributed diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index b7f52e871..cf9012dd5 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -897,7 +897,7 @@ ShouldCheckUndistributeCitusLocalTables(void) return false; } - if (IsCitusInitiatedRemoteBackend()) + if (IsCitusInternalBackend() || IsRebalancerInternalBackend()) { /* connection from the coordinator operating on a shard */ return false; diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index 9ca2cbb96..89a863109 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -1429,7 +1429,7 @@ ShouldShutdownConnection(MultiConnection *connection, const int cachedConnection * escalating the number of cached connections. We can recognize such backends * from their application name. */ - return IsCitusInitiatedRemoteBackend() || + return (IsCitusInternalBackend() || IsRebalancerInternalBackend()) || connection->initilizationState != POOL_STATE_INITIALIZED || cachedConnectionCount >= MaxCachedConnectionsPerWorker || connection->forceCloseAtTransactionEnd || @@ -1441,12 +1441,23 @@ ShouldShutdownConnection(MultiConnection *connection, const int cachedConnection } +/* + * IsRebalancerInitiatedBackend returns true if we are in a backend that citus + * rebalancer initiated. + */ +bool +IsRebalancerInternalBackend(void) +{ + return application_name && strcmp(application_name, CITUS_REBALANCER_NAME) == 0; +} + + /* * IsCitusInitiatedRemoteBackend returns true if we are in a backend that citus * initiated via remote connection. */ bool -IsCitusInitiatedRemoteBackend(void) +IsCitusInternalBackend(void) { return application_name && strcmp(application_name, CITUS_APPLICATION_NAME) == 0; } diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index 7acc2b510..a101d2968 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -779,7 +779,7 @@ InTaskExecution(void) * is considered a task execution, but an exception is when we * are in a delegated function/procedure call. */ - return IsCitusInitiatedRemoteBackend() && + return IsCitusInternalBackend() && !InTopLevelDelegatedFunctionCall && !InDelegatedProcedureCall; } diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 46f7cbc3a..988dedbc7 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -2636,7 +2636,8 @@ EnsureCoordinatorInitiatedOperation(void) * check. The other two checks are to ensure that the operation is initiated * by the coordinator. */ - if (!IsCitusInitiatedRemoteBackend() || !MyBackendIsInDisributedTransaction() || + if (!(IsCitusInternalBackend() || IsRebalancerInternalBackend()) || + !MyBackendIsInDisributedTransaction() || GetLocalGroupId() == COORDINATOR_GROUP_ID) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), diff --git a/src/backend/distributed/operations/shard_cleaner.c b/src/backend/distributed/operations/shard_cleaner.c index d07403355..cb9f8ae20 100644 --- a/src/backend/distributed/operations/shard_cleaner.c +++ b/src/backend/distributed/operations/shard_cleaner.c @@ -96,7 +96,7 @@ isolation_cleanup_orphaned_shards(PG_FUNCTION_ARGS) void DropOrphanedShardsInSeparateTransaction(void) { - ExecuteCriticalCommandInSeparateTransaction("CALL citus_cleanup_orphaned_shards()"); + ExecuteRebalancerCommandInSeparateTransaction("CALL citus_cleanup_orphaned_shards()"); } diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index 166423cf0..d35427e6b 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -919,10 +919,10 @@ citus_drain_node(PG_FUNCTION_ARGS) * This is done in a separate session. This way it's not undone if the * draining fails midway through. */ - ExecuteCriticalCommandInSeparateTransaction(psprintf( - "SELECT master_set_node_property(%s, %i, 'shouldhaveshards', false)", - quote_literal_cstr(nodeName), - nodePort)); + ExecuteRebalancerCommandInSeparateTransaction(psprintf( + "SELECT master_set_node_property(%s, %i, 'shouldhaveshards', false)", + quote_literal_cstr(nodeName), + nodePort)); RebalanceTableShards(&options, shardTransferModeOid); @@ -1696,7 +1696,7 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent, * In case of failure, we throw an error such that rebalance_table_shards * fails early. */ - ExecuteCriticalCommandInSeparateTransaction(placementUpdateCommand->data); + ExecuteRebalancerCommandInSeparateTransaction(placementUpdateCommand->data); UpdateColocatedShardPlacementProgress(shardId, sourceNode->workerName, @@ -1711,12 +1711,18 @@ UpdateShardPlacement(PlacementUpdateEvent *placementUpdateEvent, * don't want to rollback when the current transaction is rolled back. */ void -ExecuteCriticalCommandInSeparateTransaction(char *command) +ExecuteRebalancerCommandInSeparateTransaction(char *command) { int connectionFlag = FORCE_NEW_CONNECTION; MultiConnection *connection = GetNodeConnection(connectionFlag, LocalHostName, PostPortNumber); + StringInfo setApplicationName = makeStringInfo(); + appendStringInfo(setApplicationName, "SET application_name TO %s", + CITUS_REBALANCER_NAME); + + ExecuteCriticalRemoteCommand(connection, setApplicationName->data); ExecuteCriticalRemoteCommand(connection, command); + CloseConnection(connection); } diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index 3ab0c71c8..cef7cdf25 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -260,7 +260,7 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext) ereport(DEBUG4, (errmsg("function is distributed"))); } - if (IsCitusInitiatedRemoteBackend()) + if (IsCitusInternalBackend()) { bool isFunctionForceDelegated = procedure->forceDelegation; diff --git a/src/backend/distributed/transaction/citus_dist_stat_activity.c b/src/backend/distributed/transaction/citus_dist_stat_activity.c index d1aa9a034..d85959925 100644 --- a/src/backend/distributed/transaction/citus_dist_stat_activity.c +++ b/src/backend/distributed/transaction/citus_dist_stat_activity.c @@ -188,7 +188,7 @@ FROM \ get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ ON pg_stat_activity.pid = dist_txs.process_id \ WHERE \ - pg_stat_activity.application_name = 'citus' \ + pg_stat_activity.application_name = 'citus_internal' \ AND \ pg_stat_activity.query NOT ILIKE '%stat_activity%';" diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index c87c60d7b..4fe97e421 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -824,7 +824,7 @@ EnsurePrepareTransactionIsAllowed(void) return; } - if (IsCitusInitiatedRemoteBackend()) + if (IsCitusInternalBackend()) { /* * If this is a Citus-initiated backend. diff --git a/src/backend/distributed/worker/worker_shard_visibility.c b/src/backend/distributed/worker/worker_shard_visibility.c index d7e9b87cb..ca05e8cee 100644 --- a/src/backend/distributed/worker/worker_shard_visibility.c +++ b/src/backend/distributed/worker/worker_shard_visibility.c @@ -150,7 +150,8 @@ ErrorIfRelationIsAKnownShard(Oid relationId) void ErrorIfIllegallyChangingKnownShard(Oid relationId) { - if (LocalExecutorLevel > 0 || IsCitusInitiatedRemoteBackend() || + if (LocalExecutorLevel > 0 || + (IsCitusInternalBackend() || IsRebalancerInternalBackend()) || EnableManualChangesToShards) { return; @@ -330,7 +331,7 @@ ResetHideShardsDecision(void) static bool ShouldHideShardsInternal(void) { - if (IsCitusInitiatedRemoteBackend()) + if (IsCitusInternalBackend() || IsRebalancerInternalBackend()) { /* we never hide shards from Citus */ return false; diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index aca9cee0f..721617474 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -29,7 +29,10 @@ #define ERROR_BUFFER_SIZE 256 /* application name used for internal connections in Citus */ -#define CITUS_APPLICATION_NAME "citus" +#define CITUS_APPLICATION_NAME "citus_internal" + +/* application name used for internal connections in rebalancer */ +#define CITUS_REBALANCER_NAME "citus_rebalancer" /* forward declare, to avoid forcing large headers on everyone */ struct pg_conn; /* target of the PGconn typedef */ @@ -277,7 +280,8 @@ extern void FinishConnectionListEstablishment(List *multiConnectionList); extern void FinishConnectionEstablishment(MultiConnection *connection); extern void ClaimConnectionExclusively(MultiConnection *connection); extern void UnclaimConnection(MultiConnection *connection); -extern bool IsCitusInitiatedRemoteBackend(void); +extern bool IsCitusInternalBackend(void); +extern bool IsRebalancerInternalBackend(void); extern void MarkConnectionConnected(MultiConnection *connection); /* time utilities */ diff --git a/src/include/distributed/shard_rebalancer.h b/src/include/distributed/shard_rebalancer.h index de0684d68..3e6d7a8b7 100644 --- a/src/include/distributed/shard_rebalancer.h +++ b/src/include/distributed/shard_rebalancer.h @@ -190,7 +190,7 @@ extern List * RebalancePlacementUpdates(List *workerNodeList, List *shardPlaceme RebalancePlanFunctions *rebalancePlanFunctions); extern List * ReplicationPlacementUpdates(List *workerNodeList, List *shardPlacementList, int shardReplicationFactor); -extern void ExecuteCriticalCommandInSeparateTransaction(char *command); +extern void ExecuteRebalancerCommandInSeparateTransaction(char *command); #endif /* SHARD_REBALANCER_H */ diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 752fbe925..15de77e4d 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -36,7 +36,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; @@ -73,7 +73,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: must be owner of table test ROLLBACK; @@ -85,7 +85,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); ERROR: must be owner of table test ROLLBACK; @@ -99,7 +99,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -121,7 +121,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ERROR: Metadata syncing is only allowed for hash, reference and local tables:X ROLLBACK; @@ -133,7 +133,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); ERROR: column "non_existing_col" of relation "test_2" does not exist ROLLBACK; @@ -145,7 +145,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); ERROR: relation cannot be NULL ROLLBACK; @@ -157,7 +157,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); ERROR: Metadata syncing is only allowed for valid colocation id values. ROLLBACK; @@ -169,7 +169,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); ERROR: Metadata syncing is only allowed for hash, reference and local tables:X ROLLBACK; @@ -181,7 +181,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata @@ -200,7 +200,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata @@ -219,7 +219,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); ERROR: Distribution column cannot be NULL for relation "test_2" @@ -252,7 +252,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -268,7 +268,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); ERROR: could not find valid entry for shard xxxxx @@ -298,7 +298,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ERROR: role "non_existing_user" does not exist ROLLBACK; @@ -329,7 +329,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); ERROR: Reference or local tables cannot have distribution columns ROLLBACK; @@ -341,7 +341,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); ERROR: Metadata syncing is only allowed for known replication models. ROLLBACK; @@ -353,7 +353,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); ERROR: Local or references tables can only have 's' or 't' as the replication model. ROLLBACK; @@ -368,7 +368,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -387,7 +387,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -402,7 +402,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -417,7 +417,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -445,7 +445,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); citus_internal_update_relation_colocation --------------------------------------------------------------------- @@ -461,7 +461,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -476,7 +476,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -491,7 +491,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) @@ -506,7 +506,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) @@ -521,7 +521,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) @@ -536,7 +536,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), @@ -554,7 +554,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -569,7 +569,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false)) @@ -583,7 +583,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false)) @@ -598,7 +598,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -614,7 +614,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) @@ -635,7 +635,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse CREATE TABLE publication_test_table(id int); CREATE PUBLICATION publication_test FOR TABLE publication_test_table; @@ -653,7 +653,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse CREATE FUNCTION distribution_test_function(int) RETURNS int AS $$ SELECT $1 $$ @@ -674,7 +674,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse UPDATE pg_dist_partition SET partmethod = 'X'; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) @@ -693,7 +693,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) @@ -720,7 +720,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '11'::text, '20'::text), @@ -751,7 +751,7 @@ BEGIN; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; @@ -763,7 +763,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_3'::regclass, 1420009::bigint, 't'::"char", '21'::text, '30'::text), @@ -790,7 +790,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) @@ -805,7 +805,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), @@ -821,7 +821,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) @@ -842,7 +842,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) @@ -864,7 +864,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) @@ -879,7 +879,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, 1::int, -10)) @@ -894,7 +894,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1430100, 1, 0::bigint, 1::int, 10)) @@ -909,7 +909,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 10, 0::bigint, 1::int, 1500000)) @@ -924,7 +924,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES ( 1420000, 1, 0::bigint, 123123123::int, 1500000)) @@ -952,7 +952,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -968,7 +968,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420007, 1, 0::bigint, get_node_id(), 1500000)) @@ -983,7 +983,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -1024,7 +1024,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); citus_internal_update_relation_colocation --------------------------------------------------------------------- @@ -1041,7 +1041,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); ERROR: Node with group id 1014 for shard placement xxxxx does not exist @@ -1054,7 +1054,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); ERROR: Active placement for shard xxxxx is not found on group:14 @@ -1067,7 +1067,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 0 @@ -1080,7 +1080,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 213123123123 @@ -1093,7 +1093,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); ERROR: must be owner of table super_user_table @@ -1106,7 +1106,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -1115,7 +1115,7 @@ ERROR: must be owner of table super_user_table ROLLBACK; -- the user only allowed to delete shards in a distributed transaction BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -1130,7 +1130,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) @@ -1157,7 +1157,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) @@ -1191,7 +1191,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; -- with an ugly trick, update the repmodel -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' @@ -1206,7 +1206,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; -- with an ugly trick, update the vartype of table from int to bigint -- so that making two tables colocated fails UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}' @@ -1221,7 +1221,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' @@ -1236,7 +1236,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' @@ -1254,7 +1254,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); citus_internal_add_partition_metadata @@ -1277,7 +1277,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); citus_internal_add_partition_metadata diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index d6d268016..bbf537000 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -2349,3 +2349,32 @@ WHERE logicalrelid = 'r1'::regclass; (1 row) DROP TABLE t1, r1; +-- Test rebalancer with index on a table +DROP TABLE IF EXISTS test_rebalance_with_index; +CREATE TABLE test_rebalance_with_index (measureid integer PRIMARY KEY); +SELECT create_distributed_table('test_rebalance_with_index', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE INDEX rebalance_with_index ON test_rebalance_with_index(measureid); +INSERT INTO test_rebalance_with_index VALUES(0); +INSERT INTO test_rebalance_with_index VALUES(1); +INSERT INTO test_rebalance_with_index VALUES(2); +SELECT * FROM master_drain_node('localhost', :worker_2_port); + master_drain_node +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +UPDATE pg_dist_node SET shouldhaveshards=true WHERE nodeport = :worker_2_port; +SELECT rebalance_table_shards(); + rebalance_table_shards +--------------------------------------------------------------------- + +(1 row) + +CALL citus_cleanup_orphaned_shards(); +DROP TABLE test_rebalance_with_index CASCADE; diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index 98ab53dac..7054c5414 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -28,7 +28,7 @@ ROLLBACK; -- but we are on the coordinator, so still not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; @@ -67,14 +67,14 @@ SET search_path TO metadata_sync_helpers; -- owner of the table test BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- we do not own the relation BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); ROLLBACK; @@ -83,7 +83,7 @@ CREATE TABLE test_2(col_1 int, col_2 int); CREATE TABLE test_3(col_1 int, col_2 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT count(*) FROM pg_dist_partition WHERE logicalrelid = 'metadata_sync_helpers.test_2'::regclass; ROLLBACK; @@ -91,42 +91,42 @@ ROLLBACK; -- fails because there is no X distribution method BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; -- fails because there is the column does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); ROLLBACK; --- fails because we do not allow NULL parameters BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); ROLLBACK; -- fails because colocationId cannot be negative BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); ROLLBACK; -- fails because there is no X replication model BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); @@ -135,7 +135,7 @@ ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key even if distribution key changes BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); @@ -144,7 +144,7 @@ ROLLBACK; -- hash distributed table cannot have NULL distribution key BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); ROLLBACK; @@ -165,14 +165,14 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; -- should throw error even if we skip the checks, there are no such nodes BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); ROLLBACK; @@ -189,7 +189,7 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; @@ -207,21 +207,21 @@ SET search_path TO metadata_sync_helpers; CREATE TABLE test_ref(col_1 int, col_2 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); ROLLBACK; -- non-valid replication model BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); ROLLBACK; -- not-matching replication model for reference table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); ROLLBACK; @@ -231,7 +231,7 @@ SET search_path TO metadata_sync_helpers; CREATE TABLE super_user_table(col_1 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); COMMIT; @@ -244,7 +244,7 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -254,7 +254,7 @@ ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -264,7 +264,7 @@ ROLLBACK; -- ok, now add the table to the pg_dist_partition BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); SELECT citus_internal_add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); @@ -273,14 +273,14 @@ COMMIT; -- we can update to a non-existing colocation group (e.g., colocate_with:=none) BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); ROLLBACK; -- invalid shard ids are not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -290,7 +290,7 @@ ROLLBACK; -- invalid storage types are not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -300,7 +300,7 @@ ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) @@ -310,7 +310,7 @@ ROLLBACK; -- non-integer shard ranges are not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) @@ -320,7 +320,7 @@ ROLLBACK; -- shardMinValue should be smaller than shardMaxValue BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) @@ -330,7 +330,7 @@ ROLLBACK; -- we do not allow overlapping shards for the same table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), @@ -344,7 +344,7 @@ ROLLBACK; -- check with non-existing object type BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -354,7 +354,7 @@ ROLLBACK; -- check the sanity of distributionArgumentIndex and colocationId BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false)) @@ -363,7 +363,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false)) @@ -373,7 +373,7 @@ ROLLBACK; -- check with non-existing object BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -384,7 +384,7 @@ ROLLBACK; -- if any parameter is NULL BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) @@ -397,7 +397,7 @@ ROLLBACK; -- which is known how to distribute BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse CREATE TABLE publication_test_table(id int); @@ -412,7 +412,7 @@ ROLLBACK; -- Show that citus_internal_add_object_metadata checks the priviliges BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse CREATE FUNCTION distribution_test_function(int) RETURNS int @@ -430,7 +430,7 @@ ROLLBACK; SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse UPDATE pg_dist_partition SET partmethod = 'X'; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) @@ -444,7 +444,7 @@ ROLLBACK; SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) @@ -462,7 +462,7 @@ SET search_path TO metadata_sync_helpers; -- now, add few shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '11'::text, '20'::text), @@ -478,14 +478,14 @@ COMMIT; -- we cannot mark these two tables colocated because they are not colocated BEGIN; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_3'::regclass, 1420009::bigint, 't'::"char", '21'::text, '30'::text), @@ -499,7 +499,7 @@ COMMIT; -- shardMin/MaxValues should be NULL for reference tables BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) @@ -509,7 +509,7 @@ ROLLBACK; -- reference tables cannot have multiple shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), @@ -520,7 +520,7 @@ ROLLBACK; -- finally, add a shard for reference tables BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) @@ -533,7 +533,7 @@ SET search_path TO metadata_sync_helpers; -- and a shard for the superuser table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) @@ -548,7 +548,7 @@ SET search_path TO metadata_sync_helpers; -- shard does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) @@ -558,7 +558,7 @@ ROLLBACK; -- invalid placementid BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, 1::int, -10)) @@ -568,7 +568,7 @@ ROLLBACK; -- non-existing shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1430100, 1, 0::bigint, 1::int, 10)) @@ -578,7 +578,7 @@ ROLLBACK; -- invalid shard state BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 10, 0::bigint, 1::int, 1500000)) @@ -588,7 +588,7 @@ ROLLBACK; -- non-existing node with non-existing node-id 123123123 BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES ( 1420000, 1, 0::bigint, 123123123::int, 1500000)) @@ -612,7 +612,7 @@ END; $$ language plpgsql; -- fails because we ingest more placements for the same shards to the same worker node BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -623,7 +623,7 @@ ROLLBACK; -- shard is not owned by us BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420007, 1, 0::bigint, get_node_id(), 1500000)) @@ -633,7 +633,7 @@ ROLLBACK; -- sucessfully add placements BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -654,7 +654,7 @@ COMMIT; -- we should be able to colocate both tables now BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); ROLLBACK; @@ -663,7 +663,7 @@ ROLLBACK; -- fails because we are trying to update it to non-existing node BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); COMMIT; @@ -671,7 +671,7 @@ COMMIT; -- fails because the source node doesn't contain the shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); COMMIT; @@ -679,7 +679,7 @@ COMMIT; -- fails because shard does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); COMMIT; @@ -687,7 +687,7 @@ COMMIT; -- fails because none-existing shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); COMMIT; @@ -695,7 +695,7 @@ COMMIT; -- fails because we do not own the shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); COMMIT; @@ -703,7 +703,7 @@ COMMIT; -- the user only allowed to delete their own shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -712,7 +712,7 @@ ROLLBACK; -- the user only allowed to delete shards in a distributed transaction BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -722,7 +722,7 @@ ROLLBACK; -- the user cannot delete non-existing shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) @@ -737,7 +737,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT count(*) FROM pg_dist_placement WHERE shardid = 1420000; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) @@ -754,7 +754,7 @@ ROLLBACK; SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; -- with an ugly trick, update the repmodel -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' @@ -765,7 +765,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; -- with an ugly trick, update the vartype of table from int to bigint -- so that making two tables colocated fails UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}' @@ -775,7 +775,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' @@ -785,7 +785,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' @@ -799,7 +799,7 @@ CREATE TABLE test_6(int_col int, text_col text); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); SELECT citus_internal_add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); @@ -815,7 +815,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; CREATE TABLE test_8(int_col int, text_col text COLLATE "caseinsensitive"); SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus'; + SET application_name to 'citus_internal'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); SELECT citus_internal_add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql index 612fd69c3..c9bad1983 100644 --- a/src/test/regress/sql/shard_rebalancer.sql +++ b/src/test/regress/sql/shard_rebalancer.sql @@ -1399,3 +1399,21 @@ WHERE logicalrelid = 'r1'::regclass; DROP TABLE t1, r1; +-- Test rebalancer with index on a table + +DROP TABLE IF EXISTS test_rebalance_with_index; +CREATE TABLE test_rebalance_with_index (measureid integer PRIMARY KEY); +SELECT create_distributed_table('test_rebalance_with_index', 'measureid'); +CREATE INDEX rebalance_with_index ON test_rebalance_with_index(measureid); + +INSERT INTO test_rebalance_with_index VALUES(0); +INSERT INTO test_rebalance_with_index VALUES(1); +INSERT INTO test_rebalance_with_index VALUES(2); + +SELECT * FROM master_drain_node('localhost', :worker_2_port); +CALL citus_cleanup_orphaned_shards(); +UPDATE pg_dist_node SET shouldhaveshards=true WHERE nodeport = :worker_2_port; + +SELECT rebalance_table_shards(); +CALL citus_cleanup_orphaned_shards(); +DROP TABLE test_rebalance_with_index CASCADE; From ff234fbfd267750730121077d3a4a05464cb2262 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Wed, 2 Feb 2022 17:22:10 +0100 Subject: [PATCH 10/28] Unify old GUCs into a single one Replaces citus.enable_object_propagation with citus.enable_metadata_sync Also, within Citus 11 release cycle, we added citus.enable_metadata_sync_by_default, that is also replaced with citus.enable_metadata_sync. In essence, when citus.enable_metadata_sync is set to true, all the objects and the metadata is send to the remote node. We strongly advice that the users never changes the value of this GUC. --- .../distributed/commands/dependencies.c | 6 ++-- src/backend/distributed/commands/extension.c | 2 +- src/backend/distributed/commands/function.c | 14 ++++---- src/backend/distributed/commands/sequence.c | 2 +- src/backend/distributed/metadata/dependency.c | 2 +- src/backend/distributed/metadata/distobject.c | 2 +- .../distributed/metadata/metadata_sync.c | 17 +++++----- .../distributed/metadata/node_metadata.c | 6 ++-- src/backend/distributed/shared_library_init.c | 17 ++-------- .../distributed/commands/utility_hook.h | 1 - src/include/distributed/metadata_sync.h | 6 ++-- src/test/regress/expected/check_mx.out | 4 +-- .../expected/disable_object_propagation.out | 6 ++-- .../distributed_collations_conflict.out | 2 ++ .../regress/expected/isolation_check_mx.out | 4 +-- .../expected/isolation_turn_mx_off.out | 2 +- .../expected/isolation_turn_mx_off_0.out | 2 +- .../regress/expected/isolation_turn_mx_on.out | 2 +- .../expected/isolation_turn_mx_on_0.out | 2 +- .../expected/multi_deparse_function.out | 32 +++++++++---------- .../expected/multi_deparse_procedure.out | 4 +-- src/test/regress/expected/multi_extension.out | 6 ++-- src/test/regress/expected/single_node.out | 6 ++-- src/test/regress/expected/turn_mx_off.out | 2 +- src/test/regress/expected/turn_mx_off_0.out | 2 +- src/test/regress/expected/turn_mx_off_1.out | 2 +- src/test/regress/expected/turn_mx_on.out | 2 +- src/test/regress/expected/turn_mx_on_0.out | 2 +- src/test/regress/expected/turn_mx_on_1.out | 2 +- src/test/regress/input/multi_copy.source | 4 +-- src/test/regress/multi_1_schedule | 5 +-- src/test/regress/output/multi_copy.source | 4 +-- src/test/regress/spec/isolation_check_mx.spec | 2 +- .../regress/spec/isolation_turn_mx_off.spec | 2 +- .../regress/spec/isolation_turn_mx_on.spec | 2 +- src/test/regress/sql/check_mx.sql | 2 +- .../sql/disable_object_propagation.sql | 6 ++-- .../sql/distributed_collations_conflict.sql | 4 +++ .../regress/sql/multi_deparse_function.sql | 32 +++++++++---------- .../regress/sql/multi_deparse_procedure.sql | 4 +-- src/test/regress/sql/multi_extension.sql | 6 ++-- src/test/regress/sql/single_node.sql | 6 ++-- src/test/regress/sql/turn_mx_off.sql | 2 +- src/test/regress/sql/turn_mx_on.sql | 2 +- 44 files changed, 116 insertions(+), 126 deletions(-) diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index 9d9839b5a..ea1c59064 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -34,8 +34,6 @@ static List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency); static List * FilterObjectAddressListByPredicate(List *objectAddressList, AddressPredicate predicate); -bool EnableDependencyCreation = true; - /* * EnsureDependenciesExistOnAllNodes finds all the dependencies that we support and makes * sure these are available on all workers. If not available they will be created on the @@ -364,7 +362,7 @@ ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort) List *dependencies = GetDistributedObjectAddressList(); /* - * Depending on changes in the environment, such as the enable_object_propagation guc + * Depending on changes in the environment, such as the enable_metadata_sync guc * there might be objects in the distributed object address list that should currently * not be propagated by citus as they are 'not supported'. */ @@ -415,7 +413,7 @@ ShouldPropagate(void) return false; } - if (!EnableDependencyCreation) + if (!EnableMetadataSync) { /* * we are configured to disable object propagation, should not propagate anything diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index 88150942f..ef5b6c1b1 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -649,7 +649,7 @@ static bool ShouldPropagateExtensionCommand(Node *parseTree) { /* if we disabled object propagation, then we should not propagate anything. */ - if (!EnableDependencyCreation) + if (!EnableMetadataSync) { return false; } diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index a1b618125..497a32dbb 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -200,8 +200,8 @@ create_distributed_function(PG_FUNCTION_ARGS) const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true); const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid); initStringInfo(&ddlCommand); - appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_OBJECT_PROPAGATION, - createFunctionSQL, alterFunctionOwnerSQL, ENABLE_OBJECT_PROPAGATION); + appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_METADATA_SYNC, + createFunctionSQL, alterFunctionOwnerSQL, ENABLE_METADATA_SYNC); SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(), ddlCommand.data); MarkObjectDistributed(&functionAddress); @@ -698,7 +698,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress, table_close(pgDistObjectRel, NoLock); - if (EnableDependencyCreation) + if (EnableMetadataSync) { List *objectAddressList = list_make1((ObjectAddress *) distAddress); List *distArgumentIndexList = NIL; @@ -1206,7 +1206,7 @@ ShouldPropagateCreateFunction(CreateFunctionStmt *stmt) return false; } - if (!EnableDependencyCreation) + if (!EnableMetadataSync) { /* * we are configured to disable object propagation, should not propagate anything @@ -1254,7 +1254,7 @@ ShouldPropagateAlterFunction(const ObjectAddress *address) return false; } - if (!EnableDependencyCreation) + if (!EnableMetadataSync) { /* * we are configured to disable object propagation, should not propagate anything @@ -1556,7 +1556,7 @@ PreprocessDropFunctionStmt(Node *node, const char *queryString, return NIL; } - if (!EnableDependencyCreation) + if (!EnableMetadataSync) { /* * we are configured to disable object propagation, should not propagate anything @@ -1657,7 +1657,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString, return NIL; } - if (!EnableDependencyCreation) + if (!EnableMetadataSync) { /* * we are configured to disable object propagation, should not propagate anything diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index 4aa04b2ef..3638ab737 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -241,7 +241,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString, return NIL; } - if (!EnableDependencyCreation) + if (!EnableMetadataSync) { /* * we are configured to disable object propagation, should not propagate anything diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index 820cb848f..a9a154242 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -558,7 +558,7 @@ IsObjectAddressCollected(ObjectAddress findAddress, bool SupportedDependencyByCitus(const ObjectAddress *address) { - if (!EnableDependencyCreation) + if (!EnableMetadataSync) { /* * If the user has disabled object propagation we need to fall back to the legacy diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index fc5d029fb..37aaa3aed 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -172,7 +172,7 @@ MarkObjectDistributed(const ObjectAddress *distAddress) ereport(ERROR, (errmsg("failed to insert object into citus.pg_dist_object"))); } - if (EnableDependencyCreation) + if (EnableMetadataSync) { /* create a list by adding the address of value to not to have warning */ List *objectAddressList = list_make1((ObjectAddress *) distAddress); diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 988dedbc7..ec5ffb3f9 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -105,7 +105,7 @@ static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission); static RoleSpec * GetRoleSpecObjectForGrantStmt(Oid roleOid); static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid, AclItem *aclItem); -static void SetLocalEnableDependencyCreation(bool state); +static void SetLocalEnableMetadataSync(bool state); static void SetLocalReplicateReferenceTablesOnActivate(bool state); static char * GenerateSetRoleQuery(Oid roleOid); static void MetadataSyncSigTermHandler(SIGNAL_ARGS); @@ -417,7 +417,8 @@ ClusterHasKnownMetadataWorkers() bool ShouldSyncTableMetadata(Oid relationId) { - if (!OidIsValid(relationId) || !IsCitusTable(relationId)) + if (!EnableMetadataSync || + !OidIsValid(relationId) || !IsCitusTable(relationId)) { return false; } @@ -950,8 +951,8 @@ citus_internal_add_object_metadata(PG_FUNCTION_ARGS) argsArray); /* First, disable propagation off to not to cause infinite propagation */ - bool prevDependencyCreationValue = EnableDependencyCreation; - SetLocalEnableDependencyCreation(false); + bool prevDependencyCreationValue = EnableMetadataSync; + SetLocalEnableMetadataSync(false); MarkObjectDistributed(&objectAddress); @@ -978,7 +979,7 @@ citus_internal_add_object_metadata(PG_FUNCTION_ARGS) forceDelegationAddress); } - SetLocalEnableDependencyCreation(prevDependencyCreationValue); + SetLocalEnableMetadataSync(prevDependencyCreationValue); PG_RETURN_VOID(); } @@ -1847,12 +1848,12 @@ GetRoleSpecObjectForGrantStmt(Oid roleOid) /* - * SetLocalEnableDependencyCreation sets the enable_object_propagation locally + * SetLocalEnableMetadataSync sets the enable_metadata_sync locally */ static void -SetLocalEnableDependencyCreation(bool state) +SetLocalEnableMetadataSync(bool state) { - set_config_option("citus.enable_object_propagation", state == true ? "on" : "off", + set_config_option("citus.enable_metadata_sync", state == true ? "on" : "off", (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, GUC_ACTION_LOCAL, true, 0, false); } diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index d040514bc..a32e0aa20 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -77,7 +77,7 @@ bool ReplicateReferenceTablesOnActivate = true; /* did current transaction modify pg_dist_node? */ bool TransactionModifiedNodeMetadata = false; -bool EnableMetadataSyncByDefault = true; +bool EnableMetadataSync = true; typedef struct NodeMetadata { @@ -1095,9 +1095,9 @@ ActivateNode(char *nodeName, int nodePort) BoolGetDatum(isActive)); /* TODO: Once all tests will be enabled for MX, we can remove sync by default check */ - bool syncMetadata = EnableMetadataSyncByDefault && NodeIsPrimary(workerNode); + bool syncMetadata = EnableMetadataSync && NodeIsPrimary(workerNode); - if (syncMetadata && EnableDependencyCreation) + if (syncMetadata) { /* * We are going to sync the metadata anyway in this transaction, so do diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 5e3ab85d2..95250f19d 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -911,21 +911,10 @@ RegisterCitusConfigVariables(void) NULL, NULL, NULL); DefineCustomBoolVariable( - "citus.enable_metadata_sync_by_default", - gettext_noop("Enables MX in the new nodes by default"), + "citus.enable_metadata_sync", + gettext_noop("Enables object and metadata syncing."), NULL, - &EnableMetadataSyncByDefault, - true, - PGC_USERSET, - GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL, - NULL, NULL, NULL); - - DefineCustomBoolVariable( - "citus.enable_object_propagation", - gettext_noop("Enables propagating object creation for more complex objects, " - "schema's will always be created"), - NULL, - &EnableDependencyCreation, + &EnableMetadataSync, true, PGC_USERSET, GUC_NO_SHOW_ALL, diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 1ee18a206..7c926fcf1 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -32,7 +32,6 @@ typedef enum } PropSetCmdBehavior; extern PropSetCmdBehavior PropagateSetCommands; extern bool EnableDDLPropagation; -extern bool EnableDependencyCreation; extern bool EnableCreateTypePropagation; extern bool EnableAlterRolePropagation; extern bool EnableAlterRoleSetPropagation; diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index 2ea790cbf..69d500da4 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -89,8 +89,8 @@ extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum); #define DISABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'off'" #define ENABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'on'" -#define DISABLE_OBJECT_PROPAGATION "SET citus.enable_object_propagation TO 'off'" -#define ENABLE_OBJECT_PROPAGATION "SET citus.enable_object_propagation TO 'on'" +#define DISABLE_METADATA_SYNC "SET citus.enable_metadata_sync TO 'off'" +#define ENABLE_METADATA_SYNC "SET citus.enable_metadata_sync TO 'on'" #define WORKER_APPLY_SEQUENCE_COMMAND "SELECT worker_apply_sequence_command (%s,%s)" #define UPSERT_PLACEMENT \ "INSERT INTO pg_dist_placement " \ @@ -108,6 +108,6 @@ extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum); /* controlled via GUC */ extern char *EnableManualMetadataChangesForUser; -extern bool EnableMetadataSyncByDefault; +extern bool EnableMetadataSync; #endif /* METADATA_SYNC_H */ diff --git a/src/test/regress/expected/check_mx.out b/src/test/regress/expected/check_mx.out index 7e5bc23f4..6a030bc31 100644 --- a/src/test/regress/expected/check_mx.out +++ b/src/test/regress/expected/check_mx.out @@ -1,5 +1,5 @@ -SHOW citus.enable_metadata_sync_by_default; - citus.enable_metadata_sync_by_default +SHOW citus.enable_metadata_sync; + citus.enable_metadata_sync --------------------------------------------------------------------- on (1 row) diff --git a/src/test/regress/expected/disable_object_propagation.out b/src/test/regress/expected/disable_object_propagation.out index 78247223c..8429fe301 100644 --- a/src/test/regress/expected/disable_object_propagation.out +++ b/src/test/regress/expected/disable_object_propagation.out @@ -1,5 +1,5 @@ SET citus.next_shard_id TO 20030000; -SET citus.enable_object_propagation TO false; -- all tests here verify old behaviour without distributing types,functions,etc automatically +SET citus.enable_metadata_sync TO false; -- all tests here verify old behaviour without distributing types,functions,etc automatically CREATE USER typeowner_for_disabled_object_propagation_guc; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. @@ -65,7 +65,7 @@ SELECT create_distributed_table('t3', 'a'); -- verify ALTER TYPE statements are not propagated for types, even though they are marked distributed BEGIN; -- object propagation is turned off after xact finished, type is already marked distributed by then -SET LOCAL citus.enable_object_propagation TO on; +SET LOCAL citus.enable_metadata_sync TO on; CREATE TYPE tt3 AS (a int, b int); CREATE TABLE t4 (a int PRIMARY KEY, b tt3); SELECT create_distributed_table('t4','a'); @@ -120,7 +120,7 @@ $$); -- suppress any warnings during cleanup SET client_min_messages TO error; -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; DROP SCHEMA disabled_object_propagation CASCADE; DROP SCHEMA disabled_object_propagation2 CASCADE; DROP USER typeowner_for_disabled_object_propagation_guc; diff --git a/src/test/regress/expected/distributed_collations_conflict.out b/src/test/regress/expected/distributed_collations_conflict.out index 8643ae290..821521457 100644 --- a/src/test/regress/expected/distributed_collations_conflict.out +++ b/src/test/regress/expected/distributed_collations_conflict.out @@ -8,6 +8,7 @@ SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$); \c - - - :worker_1_port SET search_path TO collation_conflict; +SET citus.enable_metadata_sync TO off; CREATE COLLATION caseinsensitive ( provider = icu, locale = 'und-u-ks-level2' @@ -45,6 +46,7 @@ SET search_path TO collation_conflict; DROP TABLE tblcoll; DROP COLLATION caseinsensitive; \c - - - :worker_1_port +SET citus.enable_metadata_sync TO off; SET search_path TO collation_conflict; CREATE COLLATION caseinsensitive ( provider = icu, diff --git a/src/test/regress/expected/isolation_check_mx.out b/src/test/regress/expected/isolation_check_mx.out index c6d9f58ea..25f65a0f8 100644 --- a/src/test/regress/expected/isolation_check_mx.out +++ b/src/test/regress/expected/isolation_check_mx.out @@ -2,10 +2,10 @@ Parsed test spec with 1 sessions starting permutation: check_mx step check_mx: - SHOW citus.enable_metadata_sync_by_default; + SHOW citus.enable_metadata_sync; SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary'; -citus.enable_metadata_sync_by_default +citus.enable_metadata_sync --------------------------------------------------------------------- on (1 row) diff --git a/src/test/regress/expected/isolation_turn_mx_off.out b/src/test/regress/expected/isolation_turn_mx_off.out index d12211562..4c004c2fa 100644 --- a/src/test/regress/expected/isolation_turn_mx_off.out +++ b/src/test/regress/expected/isolation_turn_mx_off.out @@ -2,7 +2,7 @@ Parsed test spec with 1 sessions starting permutation: disable-mx-by-default reload stop-metadata-sync step disable-mx-by-default: - ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF; + ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; step reload: SELECT pg_reload_conf(); diff --git a/src/test/regress/expected/isolation_turn_mx_off_0.out b/src/test/regress/expected/isolation_turn_mx_off_0.out index 763e806fb..bb41b2412 100644 --- a/src/test/regress/expected/isolation_turn_mx_off_0.out +++ b/src/test/regress/expected/isolation_turn_mx_off_0.out @@ -2,7 +2,7 @@ Parsed test spec with 1 sessions starting permutation: disable-mx-by-default reload stop-metadata-sync step disable-mx-by-default: - ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF; + ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; step reload: SELECT pg_reload_conf(); diff --git a/src/test/regress/expected/isolation_turn_mx_on.out b/src/test/regress/expected/isolation_turn_mx_on.out index 27a855dc4..8f65d92bd 100644 --- a/src/test/regress/expected/isolation_turn_mx_on.out +++ b/src/test/regress/expected/isolation_turn_mx_on.out @@ -2,7 +2,7 @@ Parsed test spec with 1 sessions starting permutation: enable-mx-by-default reload start-metadata-sync step enable-mx-by-default: - ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON; + ALTER SYSTEM SET citus.enable_metadata_sync TO ON; step reload: SELECT pg_reload_conf(); diff --git a/src/test/regress/expected/isolation_turn_mx_on_0.out b/src/test/regress/expected/isolation_turn_mx_on_0.out index 4d7889487..bf173e1ab 100644 --- a/src/test/regress/expected/isolation_turn_mx_on_0.out +++ b/src/test/regress/expected/isolation_turn_mx_on_0.out @@ -2,7 +2,7 @@ Parsed test spec with 1 sessions starting permutation: enable-mx-by-default reload start-metadata-sync step enable-mx-by-default: - ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON; + ALTER SYSTEM SET citus.enable_metadata_sync TO ON; step reload: SELECT pg_reload_conf(); diff --git a/src/test/regress/expected/multi_deparse_function.out b/src/test/regress/expected/multi_deparse_function.out index 31779bbd1..cdf002e8e 100644 --- a/src/test/regress/expected/multi_deparse_function.out +++ b/src/test/regress/expected/multi_deparse_function.out @@ -64,14 +64,14 @@ CREATE FUNCTION add(integer, integer) RETURNS integer -- Since deparse logic on workers can not work for if function -- is distributed on workers, we are disabling object propagation -- first. Same trick has been applied multiple times in this test. -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('add(int,int)'); create_distributed_function --------------------------------------------------------------------- (1 row) -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION add CALLED ON NULL INPUT $cmd$); @@ -540,7 +540,7 @@ CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() RETURNS TEXT CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text) RETURNS TEXT AS $$ SELECT 'Overloaded function called with param: ' || $1 $$ LANGUAGE SQL; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"()'); create_distributed_function --------------------------------------------------------------------- @@ -553,7 +553,7 @@ SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text)'); (1 row) -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2" $cmd$); @@ -581,14 +581,14 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE CREATE FUNCTION func_default_param(param INT DEFAULT 0) RETURNS TEXT AS $$ SELECT 'supplied param is : ' || param; $$ LANGUAGE SQL; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_default_param(INT)'); create_distributed_function --------------------------------------------------------------------- (1 row) -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_default_param RENAME TO func_with_default_param; $cmd$); @@ -604,14 +604,14 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE CREATE FUNCTION func_out_param(IN param INT, OUT result TEXT) AS $$ SELECT 'supplied param is : ' || param; $$ LANGUAGE SQL; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_out_param(INT)'); create_distributed_function --------------------------------------------------------------------- (1 row) -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_out_param RENAME TO func_in_and_out_param; $cmd$); @@ -630,14 +630,14 @@ BEGIN a := a * a; END; $$ LANGUAGE plpgsql; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('square(NUMERIC)'); create_distributed_function --------------------------------------------------------------------- (1 row) -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION square SET search_path TO DEFAULT; $cmd$); @@ -663,14 +663,14 @@ BEGIN FROM generate_subscripts(list, 1) g(i); END; $$ LANGUAGE plpgsql; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('sum_avg(NUMERIC[])'); create_distributed_function --------------------------------------------------------------------- (1 row) -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION sum_avg COST 10000; $cmd$); @@ -689,14 +689,14 @@ RESET citus.enable_ddl_propagation; CREATE FUNCTION func_custom_param(IN param intpair, OUT total INT) AS $$ SELECT param.x + param.y $$ LANGUAGE SQL; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_custom_param(intpair)'); create_distributed_function --------------------------------------------------------------------- (1 row) -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param; $cmd$); @@ -713,14 +713,14 @@ CREATE FUNCTION func_returns_table(IN count INT) RETURNS TABLE (x INT, y INT) AS $$ SELECT i,i FROM generate_series(1,count) i $$ LANGUAGE SQL; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_returns_table(INT)'); create_distributed_function --------------------------------------------------------------------- (1 row) -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_returns_table ROWS 100; $cmd$); diff --git a/src/test/regress/expected/multi_deparse_procedure.out b/src/test/regress/expected/multi_deparse_procedure.out index d510a8642..62adca50b 100644 --- a/src/test/regress/expected/multi_deparse_procedure.out +++ b/src/test/regress/expected/multi_deparse_procedure.out @@ -49,14 +49,14 @@ BEGIN RAISE INFO 'information message %', $1; END; $proc$; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('raise_info(text)'); create_distributed_function --------------------------------------------------------------------- (1 row) -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info CALLED ON NULL INPUT $cmd$); diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 7caceeeda..70dc4c2a0 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -105,7 +105,7 @@ ORDER BY 1, 2; DROP EXTENSION citus; \c -- these tests switch between citus versions and call ddl's that require pg_dist_object to be created -SET citus.enable_object_propagation TO 'false'; +SET citus.enable_metadata_sync TO 'false'; SET citus.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.0-1'; ALTER EXTENSION citus UPDATE TO '8.0-2'; @@ -492,7 +492,7 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Test downgrade to 9.4-1 from 9.5-1 ALTER EXTENSION citus UPDATE TO '9.5-1'; BEGIN; - SET citus.enable_object_propagation TO on; + SET citus.enable_metadata_sync TO on; SELECT master_add_node('localhost', :master_port, groupId=>0); NOTICE: localhost:xxxxx is the coordinator and already contains metadata, skipping syncing the metadata master_add_node @@ -508,7 +508,7 @@ NOTICE: create_citus_local_table is deprecated in favour of citus_add_local_tab (1 row) - RESET citus.enable_object_propagation; + RESET citus.enable_metadata_sync; -- downgrade from 9.5-1 to 9.4-1 should fail as we have a citus local table ALTER EXTENSION citus UPDATE TO '9.4-1'; ERROR: citus local tables are introduced in Citus 9.5 diff --git a/src/test/regress/expected/single_node.out b/src/test/regress/expected/single_node.out index 6b92e7bba..0f94e6e7b 100644 --- a/src/test/regress/expected/single_node.out +++ b/src/test/regress/expected/single_node.out @@ -186,7 +186,7 @@ BEGIN; -- it'd spawn a bg worker targeting this node -- and that changes the connection count specific tests -- here - SET LOCAL citus.enable_metadata_sync_by_default TO OFF; + SET LOCAL citus.enable_metadata_sync TO OFF; -- cannot add workers with specific IP as long as I have a placeholder coordinator record SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port); ERROR: cannot add a worker node when the coordinator hostname is set to localhost @@ -198,7 +198,7 @@ BEGIN; -- it'd spawn a bg worker targeting this node -- and that changes the connection count specific tests -- here - SET LOCAL citus.enable_metadata_sync_by_default TO OFF; + SET LOCAL citus.enable_metadata_sync TO OFF; -- adding localhost workers is ok SELECT 1 FROM master_add_node('localhost', :worker_1_port); NOTICE: shards are still on the coordinator after adding the new node @@ -228,7 +228,7 @@ BEGIN; -- it'd spawn a bg worker targeting this node -- and that changes the connection count specific tests -- here - SET LOCAL citus.enable_metadata_sync_by_default TO OFF; + SET LOCAL citus.enable_metadata_sync TO OFF; -- adding workers with specific IP is ok now SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port); NOTICE: shards are still on the coordinator after adding the new node diff --git a/src/test/regress/expected/turn_mx_off.out b/src/test/regress/expected/turn_mx_off.out index 01331dd55..4dd6d592e 100644 --- a/src/test/regress/expected/turn_mx_off.out +++ b/src/test/regress/expected/turn_mx_off.out @@ -1,4 +1,4 @@ -ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF; +ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; SELECT pg_reload_conf(); pg_reload_conf --------------------------------------------------------------------- diff --git a/src/test/regress/expected/turn_mx_off_0.out b/src/test/regress/expected/turn_mx_off_0.out index 4fbcb04dd..ac71ba668 100644 --- a/src/test/regress/expected/turn_mx_off_0.out +++ b/src/test/regress/expected/turn_mx_off_0.out @@ -1,4 +1,4 @@ -ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF; +ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; SELECT pg_reload_conf(); pg_reload_conf --------------------------------------------------------------------- diff --git a/src/test/regress/expected/turn_mx_off_1.out b/src/test/regress/expected/turn_mx_off_1.out index 7497f24c6..1f1b9c071 100644 --- a/src/test/regress/expected/turn_mx_off_1.out +++ b/src/test/regress/expected/turn_mx_off_1.out @@ -1,4 +1,4 @@ -ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF; +ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; SELECT pg_reload_conf(); pg_reload_conf --------------------------------------------------------------------- diff --git a/src/test/regress/expected/turn_mx_on.out b/src/test/regress/expected/turn_mx_on.out index f6e9c6856..9b7ac5028 100644 --- a/src/test/regress/expected/turn_mx_on.out +++ b/src/test/regress/expected/turn_mx_on.out @@ -1,4 +1,4 @@ -ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON; +ALTER SYSTEM SET citus.enable_metadata_sync TO ON; SELECT pg_reload_conf(); pg_reload_conf --------------------------------------------------------------------- diff --git a/src/test/regress/expected/turn_mx_on_0.out b/src/test/regress/expected/turn_mx_on_0.out index ea67ce573..ab6e15a07 100644 --- a/src/test/regress/expected/turn_mx_on_0.out +++ b/src/test/regress/expected/turn_mx_on_0.out @@ -1,4 +1,4 @@ -ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON; +ALTER SYSTEM SET citus.enable_metadata_sync TO ON; SELECT pg_reload_conf(); pg_reload_conf --------------------------------------------------------------------- diff --git a/src/test/regress/expected/turn_mx_on_1.out b/src/test/regress/expected/turn_mx_on_1.out index 21d1ecfd9..0936b86a6 100644 --- a/src/test/regress/expected/turn_mx_on_1.out +++ b/src/test/regress/expected/turn_mx_on_1.out @@ -1,4 +1,4 @@ -ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON; +ALTER SYSTEM SET citus.enable_metadata_sync TO ON; SELECT pg_reload_conf(); pg_reload_conf --------------------------------------------------------------------- diff --git a/src/test/regress/input/multi_copy.source b/src/test/regress/input/multi_copy.source index 79075482a..0b384c047 100644 --- a/src/test/regress/input/multi_copy.source +++ b/src/test/regress/input/multi_copy.source @@ -582,7 +582,7 @@ SET session_replication_role = DEFAULT; -- disable test_user on the first worker \c - :default_user - :worker_1_port -SET citus.enable_object_propagation TO off; +SET citus.enable_metadata_sync TO off; ALTER USER test_user WITH nologin; \c - test_user - :master_port @@ -616,7 +616,7 @@ SELECT shardid, shardstate, nodename, nodeport -- re-enable test_user on the first worker \c - :default_user - :worker_1_port -SET citus.enable_object_propagation TO off; +SET citus.enable_metadata_sync TO off; ALTER USER test_user WITH login; \c - test_user - :master_port diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index e909f8c7a..db44baa61 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -319,11 +319,8 @@ test: check_mx test: distributed_functions distributed_functions_conflict test: distributed_collations test: distributed_procedure - -# blocked on #5583 -test: turn_mx_off test: distributed_collations_conflict -test: turn_mx_on +test: check_mx # --------- # deparsing logic tests diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index 0788cf3c9..2bd0c7b77 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -745,7 +745,7 @@ UPDATE pg_dist_shard_placement SET nodeport = :worker_1_port+10 WHERE shardid = SET session_replication_role = DEFAULT; -- disable test_user on the first worker \c - :default_user - :worker_1_port -SET citus.enable_object_propagation TO off; +SET citus.enable_metadata_sync TO off; ALTER USER test_user WITH nologin; \c - test_user - :master_port -- reissue copy, and it should fail @@ -804,7 +804,7 @@ SELECT shardid, shardstate, nodename, nodeport -- re-enable test_user on the first worker \c - :default_user - :worker_1_port -SET citus.enable_object_propagation TO off; +SET citus.enable_metadata_sync TO off; ALTER USER test_user WITH login; \c - test_user - :master_port DROP TABLE numbers_hash; diff --git a/src/test/regress/spec/isolation_check_mx.spec b/src/test/regress/spec/isolation_check_mx.spec index 8958b92e8..f3593ff0b 100644 --- a/src/test/regress/spec/isolation_check_mx.spec +++ b/src/test/regress/spec/isolation_check_mx.spec @@ -2,7 +2,7 @@ session "s1" step "check_mx" { - SHOW citus.enable_metadata_sync_by_default; + SHOW citus.enable_metadata_sync; SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary'; } diff --git a/src/test/regress/spec/isolation_turn_mx_off.spec b/src/test/regress/spec/isolation_turn_mx_off.spec index 7df0fc538..f80fc0a1e 100644 --- a/src/test/regress/spec/isolation_turn_mx_off.spec +++ b/src/test/regress/spec/isolation_turn_mx_off.spec @@ -2,7 +2,7 @@ session "s1" step "disable-mx-by-default" { - ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF; + ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; } step "reload" diff --git a/src/test/regress/spec/isolation_turn_mx_on.spec b/src/test/regress/spec/isolation_turn_mx_on.spec index ebc49705f..5e35c13e5 100644 --- a/src/test/regress/spec/isolation_turn_mx_on.spec +++ b/src/test/regress/spec/isolation_turn_mx_on.spec @@ -2,7 +2,7 @@ session "s1" step "enable-mx-by-default" { - ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON; + ALTER SYSTEM SET citus.enable_metadata_sync TO ON; } step "reload" diff --git a/src/test/regress/sql/check_mx.sql b/src/test/regress/sql/check_mx.sql index 7922905fc..6c9b2b664 100644 --- a/src/test/regress/sql/check_mx.sql +++ b/src/test/regress/sql/check_mx.sql @@ -1,3 +1,3 @@ -SHOW citus.enable_metadata_sync_by_default; +SHOW citus.enable_metadata_sync; SELECT bool_and(metadatasynced) FROM pg_dist_node WHERE noderole = 'primary'; diff --git a/src/test/regress/sql/disable_object_propagation.sql b/src/test/regress/sql/disable_object_propagation.sql index a50521860..431d56d61 100644 --- a/src/test/regress/sql/disable_object_propagation.sql +++ b/src/test/regress/sql/disable_object_propagation.sql @@ -1,5 +1,5 @@ SET citus.next_shard_id TO 20030000; -SET citus.enable_object_propagation TO false; -- all tests here verify old behaviour without distributing types,functions,etc automatically +SET citus.enable_metadata_sync TO false; -- all tests here verify old behaviour without distributing types,functions,etc automatically CREATE USER typeowner_for_disabled_object_propagation_guc; CREATE SCHEMA disabled_object_propagation; @@ -37,7 +37,7 @@ SELECT create_distributed_table('t3', 'a'); -- verify ALTER TYPE statements are not propagated for types, even though they are marked distributed BEGIN; -- object propagation is turned off after xact finished, type is already marked distributed by then -SET LOCAL citus.enable_object_propagation TO on; +SET LOCAL citus.enable_metadata_sync TO on; CREATE TYPE tt3 AS (a int, b int); CREATE TABLE t4 (a int PRIMARY KEY, b tt3); SELECT create_distributed_table('t4','a'); @@ -75,7 +75,7 @@ $$); -- suppress any warnings during cleanup SET client_min_messages TO error; -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; DROP SCHEMA disabled_object_propagation CASCADE; DROP SCHEMA disabled_object_propagation2 CASCADE; DROP USER typeowner_for_disabled_object_propagation_guc; diff --git a/src/test/regress/sql/distributed_collations_conflict.sql b/src/test/regress/sql/distributed_collations_conflict.sql index 5f3063651..c84e241a4 100644 --- a/src/test/regress/sql/distributed_collations_conflict.sql +++ b/src/test/regress/sql/distributed_collations_conflict.sql @@ -4,6 +4,8 @@ SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$); \c - - - :worker_1_port SET search_path TO collation_conflict; +SET citus.enable_metadata_sync TO off; + CREATE COLLATION caseinsensitive ( provider = icu, locale = 'und-u-ks-level2' @@ -36,6 +38,8 @@ DROP TABLE tblcoll; DROP COLLATION caseinsensitive; \c - - - :worker_1_port +SET citus.enable_metadata_sync TO off; + SET search_path TO collation_conflict; CREATE COLLATION caseinsensitive ( diff --git a/src/test/regress/sql/multi_deparse_function.sql b/src/test/regress/sql/multi_deparse_function.sql index 2dd0801e6..ba823f669 100644 --- a/src/test/regress/sql/multi_deparse_function.sql +++ b/src/test/regress/sql/multi_deparse_function.sql @@ -70,9 +70,9 @@ CREATE FUNCTION add(integer, integer) RETURNS integer -- Since deparse logic on workers can not work for if function -- is distributed on workers, we are disabling object propagation -- first. Same trick has been applied multiple times in this test. -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('add(int,int)'); -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION add CALLED ON NULL INPUT @@ -276,10 +276,10 @@ CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text) RETURNS TEXT AS $$ SELECT 'Overloaded function called with param: ' || $1 $$ LANGUAGE SQL; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"()'); SELECT create_distributed_function('"CiTuS.TeeN"."TeeNFunCT10N.1!?!"(text)'); -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() SET SCHEMA "CiTUS.TEEN2" @@ -294,9 +294,9 @@ $cmd$); CREATE FUNCTION func_default_param(param INT DEFAULT 0) RETURNS TEXT AS $$ SELECT 'supplied param is : ' || param; $$ LANGUAGE SQL; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_default_param(INT)'); -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_default_param RENAME TO func_with_default_param; @@ -306,9 +306,9 @@ $cmd$); CREATE FUNCTION func_out_param(IN param INT, OUT result TEXT) AS $$ SELECT 'supplied param is : ' || param; $$ LANGUAGE SQL; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_out_param(INT)'); -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_out_param RENAME TO func_in_and_out_param; @@ -321,9 +321,9 @@ BEGIN a := a * a; END; $$ LANGUAGE plpgsql; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('square(NUMERIC)'); -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION square SET search_path TO DEFAULT; @@ -343,9 +343,9 @@ BEGIN FROM generate_subscripts(list, 1) g(i); END; $$ LANGUAGE plpgsql; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('sum_avg(NUMERIC[])'); -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION sum_avg COST 10000; @@ -358,9 +358,9 @@ RESET citus.enable_ddl_propagation; CREATE FUNCTION func_custom_param(IN param intpair, OUT total INT) AS $$ SELECT param.x + param.y $$ LANGUAGE SQL; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_custom_param(intpair)'); -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param; @@ -372,9 +372,9 @@ CREATE FUNCTION func_returns_table(IN count INT) RETURNS TABLE (x INT, y INT) AS $$ SELECT i,i FROM generate_series(1,count) i $$ LANGUAGE SQL; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_returns_table(INT)'); -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_returns_table ROWS 100; diff --git a/src/test/regress/sql/multi_deparse_procedure.sql b/src/test/regress/sql/multi_deparse_procedure.sql index 6941e749b..2f582ed00 100644 --- a/src/test/regress/sql/multi_deparse_procedure.sql +++ b/src/test/regress/sql/multi_deparse_procedure.sql @@ -56,9 +56,9 @@ BEGIN RAISE INFO 'information message %', $1; END; $proc$; -SET citus.enable_object_propagation TO OFF; +SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('raise_info(text)'); -RESET citus.enable_object_propagation; +RESET citus.enable_metadata_sync; SELECT deparse_and_run_on_workers($cmd$ ALTER PROCEDURE raise_info CALLED ON NULL INPUT diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index 833fa35bd..43f24fb10 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -101,7 +101,7 @@ DROP EXTENSION citus; \c -- these tests switch between citus versions and call ddl's that require pg_dist_object to be created -SET citus.enable_object_propagation TO 'false'; +SET citus.enable_metadata_sync TO 'false'; SET citus.enable_version_checks TO 'false'; @@ -212,11 +212,11 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '9.5-1'; BEGIN; - SET citus.enable_object_propagation TO on; + SET citus.enable_metadata_sync TO on; SELECT master_add_node('localhost', :master_port, groupId=>0); CREATE TABLE citus_local_table (a int); SELECT create_citus_local_table('citus_local_table'); - RESET citus.enable_object_propagation; + RESET citus.enable_metadata_sync; -- downgrade from 9.5-1 to 9.4-1 should fail as we have a citus local table ALTER EXTENSION citus UPDATE TO '9.4-1'; diff --git a/src/test/regress/sql/single_node.sql b/src/test/regress/sql/single_node.sql index 5aba05770..13659f2eb 100644 --- a/src/test/regress/sql/single_node.sql +++ b/src/test/regress/sql/single_node.sql @@ -101,7 +101,7 @@ BEGIN; -- it'd spawn a bg worker targeting this node -- and that changes the connection count specific tests -- here - SET LOCAL citus.enable_metadata_sync_by_default TO OFF; + SET LOCAL citus.enable_metadata_sync TO OFF; -- cannot add workers with specific IP as long as I have a placeholder coordinator record SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port); COMMIT; @@ -111,7 +111,7 @@ BEGIN; -- it'd spawn a bg worker targeting this node -- and that changes the connection count specific tests -- here - SET LOCAL citus.enable_metadata_sync_by_default TO OFF; + SET LOCAL citus.enable_metadata_sync TO OFF; -- adding localhost workers is ok SELECT 1 FROM master_add_node('localhost', :worker_1_port); COMMIT; @@ -127,7 +127,7 @@ BEGIN; -- it'd spawn a bg worker targeting this node -- and that changes the connection count specific tests -- here - SET LOCAL citus.enable_metadata_sync_by_default TO OFF; + SET LOCAL citus.enable_metadata_sync TO OFF; -- adding workers with specific IP is ok now SELECT 1 FROM master_add_node('127.0.0.1', :worker_1_port); COMMIT; diff --git a/src/test/regress/sql/turn_mx_off.sql b/src/test/regress/sql/turn_mx_off.sql index 1a34d7ab8..f1fc21536 100644 --- a/src/test/regress/sql/turn_mx_off.sql +++ b/src/test/regress/sql/turn_mx_off.sql @@ -1,4 +1,4 @@ -ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO OFF; +ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; SELECT pg_reload_conf(); SET client_min_messages TO ERROR; diff --git a/src/test/regress/sql/turn_mx_on.sql b/src/test/regress/sql/turn_mx_on.sql index 67b548043..b471728f6 100644 --- a/src/test/regress/sql/turn_mx_on.sql +++ b/src/test/regress/sql/turn_mx_on.sql @@ -1,4 +1,4 @@ -ALTER SYSTEM SET citus.enable_metadata_sync_by_default TO ON; +ALTER SYSTEM SET citus.enable_metadata_sync TO ON; SELECT pg_reload_conf(); SELECT pg_sleep(0.1); From bcb00e33189f454169a5a42d2102c70c37deac96 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Thu, 3 Feb 2022 10:13:05 +0100 Subject: [PATCH 11/28] remove not used files --- src/test/regress/sql/turn_mx_off.sql | 5 ----- src/test/regress/sql/turn_mx_on.sql | 6 ------ 2 files changed, 11 deletions(-) delete mode 100644 src/test/regress/sql/turn_mx_off.sql delete mode 100644 src/test/regress/sql/turn_mx_on.sql diff --git a/src/test/regress/sql/turn_mx_off.sql b/src/test/regress/sql/turn_mx_off.sql deleted file mode 100644 index f1fc21536..000000000 --- a/src/test/regress/sql/turn_mx_off.sql +++ /dev/null @@ -1,5 +0,0 @@ -ALTER SYSTEM SET citus.enable_metadata_sync TO OFF; -SELECT pg_reload_conf(); - -SET client_min_messages TO ERROR; -SELECT stop_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; diff --git a/src/test/regress/sql/turn_mx_on.sql b/src/test/regress/sql/turn_mx_on.sql deleted file mode 100644 index b471728f6..000000000 --- a/src/test/regress/sql/turn_mx_on.sql +++ /dev/null @@ -1,6 +0,0 @@ -ALTER SYSTEM SET citus.enable_metadata_sync TO ON; -SELECT pg_reload_conf(); -SELECT pg_sleep(0.1); - -SET client_min_messages TO ERROR; -SELECT start_metadata_sync_to_node(nodename, nodeport) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; From 923bb194a47ab3880acb4f06dc61b36c7cf4ef19 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Thu, 3 Feb 2022 11:17:11 +0100 Subject: [PATCH 12/28] Move isolation_multiuser_locking to MX tests --- .../expected/isolation_multiuser_locking.out | 240 ++++++++++-------- ...licate_reference_tables_to_coordinator.out | 4 +- src/test/regress/isolation_schedule | 2 +- .../spec/isolation_multiuser_locking.spec | 6 + 4 files changed, 150 insertions(+), 102 deletions(-) diff --git a/src/test/regress/expected/isolation_multiuser_locking.out b/src/test/regress/expected/isolation_multiuser_locking.out index a667b5dfb..30f474b84 100644 --- a/src/test/regress/expected/isolation_multiuser_locking.out +++ b/src/test/regress/expected/isolation_multiuser_locking.out @@ -2,295 +2,337 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s2-reindex s1-insert s2-commit s1-commit step s1-begin: - BEGIN; - SET ROLE test_user_1; + BEGIN; + SET ROLE test_user_1; step s2-begin: - BEGIN; - SET ROLE test_user_2; + BEGIN; + SET ROLE test_user_2; step s2-reindex: - REINDEX TABLE test_table; + REINDEX TABLE test_table; ERROR: must be owner of table test_table step s1-insert: - UPDATE test_table SET column2 = 1; + UPDATE test_table SET column2 = 1; step s2-commit: - COMMIT; + COMMIT; step s1-commit: - COMMIT; + COMMIT; starting permutation: s1-grant s1-begin s2-begin s2-reindex s1-insert s2-insert s2-commit s1-commit step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; + SET ROLE test_user_1; + SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); + GRANT ALL ON test_table TO test_user_2; + SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); bool_and --------------------------------------------------------------------- t (1 row) +run_command_on_workers +--------------------------------------------------------------------- +(localhost,57637,t,SET) +(localhost,57638,t,SET) +(2 rows) + step s1-begin: - BEGIN; - SET ROLE test_user_1; + BEGIN; + SET ROLE test_user_1; step s2-begin: - BEGIN; - SET ROLE test_user_2; + BEGIN; + SET ROLE test_user_2; step s2-reindex: - REINDEX TABLE test_table; + REINDEX TABLE test_table; ERROR: must be owner of table test_table step s1-insert: - UPDATE test_table SET column2 = 1; + UPDATE test_table SET column2 = 1; step s2-insert: - UPDATE test_table SET column2 = 2; + UPDATE test_table SET column2 = 2; ERROR: current transaction is aborted, commands ignored until end of transaction block step s2-commit: - COMMIT; + COMMIT; step s1-commit: - COMMIT; + COMMIT; starting permutation: s1-grant s1-begin s2-begin s1-reindex s2-insert s1-insert s1-commit s2-commit step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; + SET ROLE test_user_1; + SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); + GRANT ALL ON test_table TO test_user_2; + SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); bool_and --------------------------------------------------------------------- t (1 row) +run_command_on_workers +--------------------------------------------------------------------- +(localhost,57637,t,SET) +(localhost,57638,t,SET) +(2 rows) + step s1-begin: - BEGIN; - SET ROLE test_user_1; + BEGIN; + SET ROLE test_user_1; step s2-begin: - BEGIN; - SET ROLE test_user_2; + BEGIN; + SET ROLE test_user_2; step s1-reindex: - REINDEX TABLE test_table; + REINDEX TABLE test_table; step s2-insert: - UPDATE test_table SET column2 = 2; + UPDATE test_table SET column2 = 2; step s1-insert: - UPDATE test_table SET column2 = 1; + UPDATE test_table SET column2 = 1; step s1-commit: - COMMIT; + COMMIT; step s2-insert: <... completed> step s2-commit: - COMMIT; + COMMIT; starting permutation: s1-begin s2-begin s2-index s1-insert s2-commit s1-commit s2-drop-index step s1-begin: - BEGIN; - SET ROLE test_user_1; + BEGIN; + SET ROLE test_user_1; step s2-begin: - BEGIN; - SET ROLE test_user_2; + BEGIN; + SET ROLE test_user_2; step s2-index: - CREATE INDEX test_index ON test_table(column1); + CREATE INDEX test_index ON test_table(column1); ERROR: must be owner of table test_table step s1-insert: - UPDATE test_table SET column2 = 1; + UPDATE test_table SET column2 = 1; step s2-commit: - COMMIT; + COMMIT; step s1-commit: - COMMIT; + COMMIT; step s2-drop-index: - DROP INDEX IF EXISTS test_index; + DROP INDEX IF EXISTS test_index; starting permutation: s1-grant s1-begin s2-begin s2-insert s1-index s2-insert s2-commit s1-commit s1-drop-index step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; + SET ROLE test_user_1; + SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); + GRANT ALL ON test_table TO test_user_2; + SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); bool_and --------------------------------------------------------------------- t (1 row) +run_command_on_workers +--------------------------------------------------------------------- +(localhost,57637,t,SET) +(localhost,57638,t,SET) +(2 rows) + step s1-begin: - BEGIN; - SET ROLE test_user_1; + BEGIN; + SET ROLE test_user_1; step s2-begin: - BEGIN; - SET ROLE test_user_2; + BEGIN; + SET ROLE test_user_2; step s2-insert: - UPDATE test_table SET column2 = 2; + UPDATE test_table SET column2 = 2; step s1-index: - CREATE INDEX test_index ON test_table(column1); + CREATE INDEX test_index ON test_table(column1); step s2-insert: - UPDATE test_table SET column2 = 2; + UPDATE test_table SET column2 = 2; step s2-commit: - COMMIT; + COMMIT; step s1-index: <... completed> step s1-commit: - COMMIT; + COMMIT; step s1-drop-index: - DROP INDEX IF EXISTS test_index; + DROP INDEX IF EXISTS test_index; starting permutation: s1-grant s1-begin s2-begin s1-index s2-index s1-insert s1-commit s2-commit s1-drop-index s2-drop-index step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; + SET ROLE test_user_1; + SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); + GRANT ALL ON test_table TO test_user_2; + SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); bool_and --------------------------------------------------------------------- t (1 row) +run_command_on_workers +--------------------------------------------------------------------- +(localhost,57637,t,SET) +(localhost,57638,t,SET) +(2 rows) + step s1-begin: - BEGIN; - SET ROLE test_user_1; + BEGIN; + SET ROLE test_user_1; step s2-begin: - BEGIN; - SET ROLE test_user_2; + BEGIN; + SET ROLE test_user_2; step s1-index: - CREATE INDEX test_index ON test_table(column1); + CREATE INDEX test_index ON test_table(column1); step s2-index: - CREATE INDEX test_index ON test_table(column1); + CREATE INDEX test_index ON test_table(column1); ERROR: must be owner of table test_table step s1-insert: - UPDATE test_table SET column2 = 1; + UPDATE test_table SET column2 = 1; step s1-commit: - COMMIT; + COMMIT; step s2-commit: - COMMIT; + COMMIT; step s1-drop-index: - DROP INDEX IF EXISTS test_index; + DROP INDEX IF EXISTS test_index; step s2-drop-index: - DROP INDEX IF EXISTS test_index; + DROP INDEX IF EXISTS test_index; starting permutation: s1-begin s2-begin s2-truncate s1-insert s2-commit s1-commit step s1-begin: - BEGIN; - SET ROLE test_user_1; + BEGIN; + SET ROLE test_user_1; step s2-begin: - BEGIN; - SET ROLE test_user_2; + BEGIN; + SET ROLE test_user_2; step s2-truncate: - TRUNCATE test_table; + TRUNCATE test_table; ERROR: permission denied for table test_table step s1-insert: - UPDATE test_table SET column2 = 1; + UPDATE test_table SET column2 = 1; step s2-commit: - COMMIT; + COMMIT; step s1-commit: - COMMIT; + COMMIT; starting permutation: s1-grant s1-begin s2-begin s1-truncate s2-insert s1-insert s1-commit s2-commit step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; + SET ROLE test_user_1; + SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); + GRANT ALL ON test_table TO test_user_2; + SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); bool_and --------------------------------------------------------------------- t (1 row) +run_command_on_workers +--------------------------------------------------------------------- +(localhost,57637,t,SET) +(localhost,57638,t,SET) +(2 rows) + step s1-begin: - BEGIN; - SET ROLE test_user_1; + BEGIN; + SET ROLE test_user_1; step s2-begin: - BEGIN; - SET ROLE test_user_2; + BEGIN; + SET ROLE test_user_2; step s1-truncate: - TRUNCATE test_table; + TRUNCATE test_table; step s2-insert: - UPDATE test_table SET column2 = 2; + UPDATE test_table SET column2 = 2; step s1-insert: - UPDATE test_table SET column2 = 1; + UPDATE test_table SET column2 = 1; step s1-commit: - COMMIT; + COMMIT; step s2-insert: <... completed> step s2-commit: - COMMIT; + COMMIT; starting permutation: s1-grant s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit step s1-grant: - SET ROLE test_user_1; - SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); - GRANT ALL ON test_table TO test_user_2; + SET ROLE test_user_1; + SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); + GRANT ALL ON test_table TO test_user_2; + SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); bool_and --------------------------------------------------------------------- t (1 row) +run_command_on_workers +--------------------------------------------------------------------- +(localhost,57637,t,SET) +(localhost,57638,t,SET) +(2 rows) + step s1-begin: - BEGIN; - SET ROLE test_user_1; + BEGIN; + SET ROLE test_user_1; step s2-begin: - BEGIN; - SET ROLE test_user_2; + BEGIN; + SET ROLE test_user_2; step s1-truncate: - TRUNCATE test_table; + TRUNCATE test_table; step s2-truncate: - TRUNCATE test_table; + TRUNCATE test_table; step s1-commit: - COMMIT; + COMMIT; step s2-truncate: <... completed> step s2-commit: - COMMIT; + COMMIT; diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index d28a6b714..c012ef156 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -103,8 +103,8 @@ step s2-view-worker: query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- -UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression -UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression +UPDATE public.ref_table_1500803 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression +UPDATE public.ref_table_1500803 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression (2 rows) step s2-end: diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index a2583034a..6108d3542 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -89,6 +89,7 @@ test: isolation_ref_update_delete_upsert_vs_all_on_mx test: isolation_dis2ref_foreign_keys_on_mx test: isolation_metadata_sync_deadlock test: isolation_replicated_dist_on_mx +test: isolation_multiuser_locking # MXless tests test: isolation_check_mx @@ -96,4 +97,3 @@ test: isolation_replicate_reference_tables_to_coordinator test: isolation_turn_mx_off test: isolation_reference_copy_vs_all test: isolation_ref2ref_foreign_keys -test: isolation_multiuser_locking diff --git a/src/test/regress/spec/isolation_multiuser_locking.spec b/src/test/regress/spec/isolation_multiuser_locking.spec index 3c5193e96..8303e9459 100644 --- a/src/test/regress/spec/isolation_multiuser_locking.spec +++ b/src/test/regress/spec/isolation_multiuser_locking.spec @@ -1,5 +1,8 @@ setup { + SELECT citus_internal.replace_isolation_tester_func(); + SELECT citus_internal.refresh_isolation_tester_prepared_statement(); + SET citus.shard_replication_factor TO 1; CREATE USER test_user_1; @@ -16,6 +19,8 @@ setup teardown { + SELECT citus_internal.restore_isolation_tester_func(); + BEGIN; DROP TABLE IF EXISTS test_table; DROP USER test_user_1, test_user_2; @@ -31,6 +36,7 @@ step "s1-grant" SET ROLE test_user_1; SELECT bool_and(success) FROM run_command_on_placements('test_table', 'GRANT ALL ON TABLE %s TO test_user_2'); GRANT ALL ON test_table TO test_user_2; + SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off; GRANT ALL ON test_table TO test_user_2;$$); } step "s1-begin" From 72d7d926114dad988bf05360e9b569222188dd1e Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Fri, 4 Feb 2022 10:52:40 +0100 Subject: [PATCH 13/28] Apply code review feedback --- src/backend/distributed/commands/truncate.c | 6 +++++- src/backend/distributed/operations/repair_shards.c | 6 +++++- .../isolation_replicate_reference_tables_to_coordinator.out | 4 ++-- src/test/regress/isolation_schedule | 2 +- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c index 109a1d941..815a90f93 100644 --- a/src/backend/distributed/commands/truncate.c +++ b/src/backend/distributed/commands/truncate.c @@ -437,8 +437,12 @@ AcquireDistributedLockOnRelations(List *relationIdList, LOCKMODE lockMode) /* * We only acquire distributed lock on relation if * the relation is sync'ed between mx nodes. + * + * Even if users disable metadata sync, we cannot + * allow them not to acquire the remote locks. + * Hence, we have !IsCoordinator() check. */ - if (ShouldSyncTableMetadata(relationId)) + if (ShouldSyncTableMetadata(relationId) || !IsCoordinator()) { char *qualifiedRelationName = generate_qualified_relation_name(relationId); StringInfo lockRelationCommand = makeStringInfo(); diff --git a/src/backend/distributed/operations/repair_shards.c b/src/backend/distributed/operations/repair_shards.c index 734d97450..1efec3193 100644 --- a/src/backend/distributed/operations/repair_shards.c +++ b/src/backend/distributed/operations/repair_shards.c @@ -556,8 +556,12 @@ BlockWritesToShardList(List *shardList) Oid firstDistributedTableId = firstShardInterval->relationId; bool shouldSyncMetadata = ShouldSyncTableMetadata(firstDistributedTableId); - if (shouldSyncMetadata) + if (shouldSyncMetadata || !IsCoordinator()) { + /* + * Even if users disable metadata sync, we cannot allow them not to + * acquire the remote locks. Hence, we have !IsCoordinator() check. + */ LockShardListMetadataOnWorkers(ExclusiveLock, shardList); } } diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index c012ef156..d28a6b714 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -103,8 +103,8 @@ step s2-view-worker: query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- -UPDATE public.ref_table_1500803 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression -UPDATE public.ref_table_1500803 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression +UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression +UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression (2 rows) step s2-end: diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index 6108d3542..f0700c734 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -89,11 +89,11 @@ test: isolation_ref_update_delete_upsert_vs_all_on_mx test: isolation_dis2ref_foreign_keys_on_mx test: isolation_metadata_sync_deadlock test: isolation_replicated_dist_on_mx +test: isolation_replicate_reference_tables_to_coordinator test: isolation_multiuser_locking # MXless tests test: isolation_check_mx -test: isolation_replicate_reference_tables_to_coordinator test: isolation_turn_mx_off test: isolation_reference_copy_vs_all test: isolation_ref2ref_foreign_keys From 79442df1b7d415fe2c984e4fcf3b6371a84231d4 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Fri, 4 Feb 2022 16:37:25 +0300 Subject: [PATCH 14/28] Fix coordinator/worker query targetlists for agg. that we cannot push-down (#5679) Previously, we were wrapping targetlist nodes with Vars that reference to the result of the worker query, if the node itself is not `Const` or not a `Param`. Indeed, we should not do that unless the node itself is a `Var` node or contains a `Var` within it (e.g.: `OpExpr(Var(column_a) > 2)`). Otherwise, when worker query returns empty result set, then combine query exec would crash since the `Var` would be pointing to an empty tuple slot, which is not desirable for the node-executor methods. --- .../planner/multi_logical_optimizer.c | 22 +++- .../regress/expected/aggregate_support.out | 108 ++++++++++++++++++ src/test/regress/sql/aggregate_support.sql | 51 +++++++++ 3 files changed, 179 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index cb415f49a..cbd9abc43 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -1616,7 +1616,19 @@ MasterAggregateExpression(Aggref *originalAggregate, Expr *directarg; foreach_ptr(directarg, originalAggregate->aggdirectargs) { - if (!IsA(directarg, Const) && !IsA(directarg, Param)) + /* + * Need to replace nodes that contain any Vars with Vars referring + * to the related column of the result set returned for the worker + * aggregation. + * + * When there are no Vars, then the expression can be fully evaluated + * on the coordinator, so we skip it here. This is not just an + * optimization, but the result of the expression might require + * calling the final function of the aggregate, and doing so when + * there are no input rows (i.e.: with an empty tuple slot) is not + * desirable for the node-executor methods. + */ + if (pull_var_clause_default((Node *) directarg) != NIL) { Var *var = makeVar(masterTableId, walkerContext->columnId, exprType((Node *) directarg), @@ -3070,7 +3082,13 @@ WorkerAggregateExpressionList(Aggref *originalAggregate, Expr *directarg; foreach_ptr(directarg, originalAggregate->aggdirectargs) { - if (!IsA(directarg, Const) && !IsA(directarg, Param)) + /* + * The worker aggregation should execute any node that contains any + * Var nodes and return the result in the targetlist, so that the + * combine query can then fetch the result via remote scan; see + * MasterAggregateExpression. + */ + if (pull_var_clause_default((Node *) directarg) != NIL) { workerAggregateList = lappend(workerAggregateList, directarg); } diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index 5a7f3e7ae..89ce53c70 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -712,6 +712,19 @@ select array_agg(val order by valf) from aggdata; {0,NULL,2,3,5,2,4,NULL,NULL,8,NULL} (1 row) +-- test by using some other node types as arguments to agg +select key, percentile_cont((key - (key > 4)::int) / 10.0) within group(order by val) from aggdata group by key; + key | percentile_cont +--------------------------------------------------------------------- + 1 | 2 + 2 | 2.4 + 3 | 4 + 5 | + 6 | + 7 | 8 + 9 | 0 +(7 rows) + -- Test TransformSubqueryNode select * FROM ( SELECT key, mode() within group (order by floor(agg1.val/2)) m from aggdata agg1 @@ -932,5 +945,100 @@ SELECT square_func(5), a, count(a) FROM t1 GROUP BY a; ERROR: function aggregate_support.square_func(integer) does not exist HINT: No function matches the given name and argument types. You might need to add explicit type casts. CONTEXT: while executing command on localhost:xxxxx +-- Test the cases where the worker agg exec. returns no tuples. +CREATE TABLE dist_table (dist_col int, agg_col numeric); +SELECT create_distributed_table('dist_table', 'dist_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE ref_table (int_col int); +SELECT create_reference_table('ref_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + percentile_disc +--------------------------------------------------------------------- + +(1 row) + +SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col) +FROM (SELECT *, random() FROM dist_table) a; + percentile_disc +--------------------------------------------------------------------- + +(1 row) + +SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + percentile_disc +--------------------------------------------------------------------- + +(1 row) + +SELECT SUM(COALESCE(agg_col, 3)) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + sum +--------------------------------------------------------------------- + +(1 row) + +SELECT AVG(COALESCE(agg_col, 10)) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + avg +--------------------------------------------------------------------- + +(1 row) + +insert into dist_table values (2, 11.2), (3, NULL), (6, 3.22), (3, 4.23), (5, 5.25), (4, 63.4), (75, NULL), (80, NULL), (96, NULL), (8, 1078), (0, 1.19); +-- run the same queries after loading some data +SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + percentile_disc +--------------------------------------------------------------------- + 3.22 +(1 row) + +SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col) +FROM (SELECT *, random() FROM dist_table) a; + percentile_disc +--------------------------------------------------------------------- + 3.22 +(1 row) + +SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + percentile_disc +--------------------------------------------------------------------- + 1.19 +(1 row) + +SELECT floor(SUM(COALESCE(agg_col, 3))) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + floor +--------------------------------------------------------------------- + 1178 +(1 row) + +SELECT floor(AVG(COALESCE(agg_col, 10))) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + floor +--------------------------------------------------------------------- + 109 +(1 row) + set client_min_messages to error; drop schema aggregate_support cascade; diff --git a/src/test/regress/sql/aggregate_support.sql b/src/test/regress/sql/aggregate_support.sql index 7c0614aa8..dc7215f79 100644 --- a/src/test/regress/sql/aggregate_support.sql +++ b/src/test/regress/sql/aggregate_support.sql @@ -376,6 +376,9 @@ select percentile_cont(0.5) within group(order by valf) from aggdata; select key, percentile_cont(key/10.0) within group(order by val) from aggdata group by key; select array_agg(val order by valf) from aggdata; +-- test by using some other node types as arguments to agg +select key, percentile_cont((key - (key > 4)::int) / 10.0) within group(order by val) from aggdata group by key; + -- Test TransformSubqueryNode select * FROM ( @@ -479,6 +482,54 @@ SELECT square_func(5), a FROM t1 GROUP BY a; -- the expression will be pushed down. SELECT square_func(5), a, count(a) FROM t1 GROUP BY a; +-- Test the cases where the worker agg exec. returns no tuples. + +CREATE TABLE dist_table (dist_col int, agg_col numeric); +SELECT create_distributed_table('dist_table', 'dist_col'); + +CREATE TABLE ref_table (int_col int); +SELECT create_reference_table('ref_table'); + +SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + +SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col) +FROM (SELECT *, random() FROM dist_table) a; + +SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + +SELECT SUM(COALESCE(agg_col, 3)) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + +SELECT AVG(COALESCE(agg_col, 10)) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + +insert into dist_table values (2, 11.2), (3, NULL), (6, 3.22), (3, 4.23), (5, 5.25), (4, 63.4), (75, NULL), (80, NULL), (96, NULL), (8, 1078), (0, 1.19); + +-- run the same queries after loading some data +SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + +SELECT PERCENTILE_DISC(.25) WITHIN GROUP (ORDER BY agg_col) +FROM (SELECT *, random() FROM dist_table) a; + +SELECT PERCENTILE_DISC((2 > random())::int::numeric / 10) WITHIN GROUP (ORDER BY agg_col) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + +SELECT floor(SUM(COALESCE(agg_col, 3))) +FROM dist_table +LEFT JOIN ref_table ON TRUE; + +SELECT floor(AVG(COALESCE(agg_col, 10))) +FROM dist_table +LEFT JOIN ref_table ON TRUE; set client_min_messages to error; drop schema aggregate_support cascade; From b5c116449b4c4c88441430209a73ae26b446e1dd Mon Sep 17 00:00:00 2001 From: Ying Xu <32597660+yxu2162@users.noreply.github.com> Date: Fri, 4 Feb 2022 12:45:07 -0800 Subject: [PATCH 15/28] Removed dependency from EnsureTableOwner (#5676) Removed dependency for EnsureTableOwner. Also removed pg_fini() and columnar_tableam_finish() Still need to remove CheckCitusVersion dependency to make Columnar_tableam.h dependency free from Citus. --- src/backend/columnar/columnar_tableam.c | 19 ++++++++++--------- src/backend/columnar/mod.c | 7 ------- src/backend/distributed/shared_library_init.c | 9 --------- src/include/columnar/columnar_tableam.h | 9 ++++----- src/test/regress/expected/multi_multiuser.out | 8 ++++++++ src/test/regress/sql/multi_multiuser.sql | 6 ++++++ 6 files changed, 28 insertions(+), 30 deletions(-) diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index ff6e6cffc..8183feddc 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -1913,13 +1913,6 @@ columnar_tableam_init() } -void -columnar_tableam_finish() -{ - object_access_hook = PrevObjectAccessHook; -} - - /* * Get the number of chunks filtered out during the given scan. */ @@ -2334,7 +2327,11 @@ alter_columnar_table_set(PG_FUNCTION_ARGS) quote_identifier(RelationGetRelationName(rel))))); } - EnsureTableOwner(relationId); + if (!pg_class_ownercheck(relationId, GetUserId())) + { + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLE, + get_rel_name(relationId)); + } ColumnarOptions options = { 0 }; if (!ReadColumnarOptions(relationId, &options)) @@ -2454,7 +2451,11 @@ alter_columnar_table_reset(PG_FUNCTION_ARGS) quote_identifier(RelationGetRelationName(rel))))); } - EnsureTableOwner(relationId); + if (!pg_class_ownercheck(relationId, GetUserId())) + { + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_TABLE, + get_rel_name(relationId)); + } ColumnarOptions options = { 0 }; if (!ReadColumnarOptions(relationId, &options)) diff --git a/src/backend/columnar/mod.c b/src/backend/columnar/mod.c index 8908ad618..f2679f326 100644 --- a/src/backend/columnar/mod.c +++ b/src/backend/columnar/mod.c @@ -28,10 +28,3 @@ columnar_init(void) columnar_init_gucs(); columnar_tableam_init(); } - - -void -columnar_fini(void) -{ - columnar_tableam_finish(); -} diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 95250f19d..00e7a523a 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -108,7 +108,6 @@ static GucStringAssignHook OldApplicationNameAssignHook = NULL; void _PG_init(void); -void _PG_fini(void); static void DoInitialCleanup(void); static void ResizeStackToMaximumDepth(void); @@ -358,14 +357,6 @@ _PG_init(void) } -/* shared library deconstruction function */ -void -_PG_fini(void) -{ - columnar_fini(); -} - - /* * DoInitialCleanup does cleanup at start time. * Currently it: diff --git a/src/include/columnar/columnar_tableam.h b/src/include/columnar/columnar_tableam.h index 9b03da3b0..784cf0341 100644 --- a/src/include/columnar/columnar_tableam.h +++ b/src/include/columnar/columnar_tableam.h @@ -7,9 +7,9 @@ #include "access/tableam.h" #include "access/skey.h" #include "nodes/bitmapset.h" - -#include "distributed/coordinator_protocol.h" - +#include "access/heapam.h" +#include "catalog/indexing.h" +#include "utils/acl.h" /* * Number of valid ItemPointer Offset's for "row number" <> "ItemPointer" @@ -50,8 +50,7 @@ typedef struct ColumnarScanDescData *ColumnarScanDesc; const TableAmRoutine * GetColumnarTableAmRoutine(void); extern void columnar_tableam_init(void); -extern void columnar_tableam_finish(void); - +extern bool CheckCitusVersion(int elevel); extern TableScanDesc columnar_beginscan_extended(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index e15161fc9..01417a974 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -283,6 +283,14 @@ SELECT * FROM columnar_table; 1 (2 rows) +-- Fail to alter a columnar table that is created by a different user +SET ROLE full_access; +SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000); +ERROR: must be owner of table columnar_table +-- Fail to reset a columnar table value created by a different user +SELECT alter_columnar_table_reset('columnar_table', chunk_group_row_limit => true); +ERROR: must be owner of table columnar_table +SET ROLE read_access; -- and drop it DROP TABLE columnar_table; -- cannot modify columnar metadata table as unprivileged user diff --git a/src/test/regress/sql/multi_multiuser.sql b/src/test/regress/sql/multi_multiuser.sql index 204b7360d..aed7fe20b 100644 --- a/src/test/regress/sql/multi_multiuser.sql +++ b/src/test/regress/sql/multi_multiuser.sql @@ -169,6 +169,12 @@ SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000) -- insert some data and read INSERT INTO columnar_table VALUES (1), (1); SELECT * FROM columnar_table; +-- Fail to alter a columnar table that is created by a different user +SET ROLE full_access; +SELECT alter_columnar_table_set('columnar_table', chunk_group_row_limit => 2000); +-- Fail to reset a columnar table value created by a different user +SELECT alter_columnar_table_reset('columnar_table', chunk_group_row_limit => true); +SET ROLE read_access; -- and drop it DROP TABLE columnar_table; From c8e504dd692a37ea563de012ad70c2ea6cf40c78 Mon Sep 17 00:00:00 2001 From: Teja Mupparti Date: Fri, 4 Feb 2022 10:33:00 -0800 Subject: [PATCH 16/28] Fix the issue #5673 If the expression is simple, such as, SELECT function() or PEFORM function() in PL/PgSQL code, PL engine does a simple expression evaluation which can't interpret the Citus CustomScan Node. Code checks for simple expressions when executing an UDF but missed the DO-Block scenario, this commit fixes it. --- .../planner/function_call_delegation.c | 14 +++--- .../expected/forcedelegation_functions.out | 43 ++++++++++++++++++- .../regress/sql/forcedelegation_functions.sql | 21 +++++++++ 3 files changed, 70 insertions(+), 8 deletions(-) diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index cef7cdf25..716c5357c 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -363,14 +363,16 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext) } /* - * If the expression is simple, such as, SELECT fn() in - * PL/PgSQL code, PL engine is doing simple expression - * evaluation, which can't interpret the CustomScan Node. - * Function from FROM clause is not simple, so it's ok. + * If the expression is simple, such as, SELECT function() or PEFORM function() + * in PL/PgSQL code, PL engine does a simple expression evaluation which can't + * interpret the Citus CustomScan Node. + * Note: Function from FROM clause is not simple, so it's ok to pushdown. */ - if (MaybeExecutingUDF() && IsQuerySimple(planContext->query) && !fromFuncExpr) + if ((MaybeExecutingUDF() || DoBlockLevel > 0) && + IsQuerySimple(planContext->query) && + !fromFuncExpr) { - ereport(DEBUG1, (errmsg("Skipping delegation of function " + ereport(DEBUG1, (errmsg("Skipping pushdown of function " "from a PL/PgSQL simple expression"))); return NULL; } diff --git a/src/test/regress/expected/forcedelegation_functions.out b/src/test/regress/expected/forcedelegation_functions.out index 6ab843a42..ad3b6cb8e 100644 --- a/src/test/regress/expected/forcedelegation_functions.out +++ b/src/test/regress/expected/forcedelegation_functions.out @@ -627,7 +627,7 @@ DETAIL: A distributed function is created. To make sure subsequent commands see (1 row) SELECT outer_emp(); -DEBUG: Skipping delegation of function from a PL/PgSQL simple expression +DEBUG: Skipping pushdown of function from a PL/PgSQL simple expression CONTEXT: SQL statement "SELECT inner_emp('hello')" PL/pgSQL function outer_emp() line XX at PERFORM outer_emp @@ -1388,10 +1388,47 @@ SELECT COUNT(*) FROM table_test_prepare; 28 (1 row) +CREATE TABLE test_perform(i int); +SELECT create_distributed_table('test_perform', 'i', colocate_with := 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE FUNCTION test(x int) +RETURNS int +AS $$ +DECLARE +BEGIN + RAISE NOTICE 'INPUT %', x; + RETURN x; +END; +$$ LANGUAGE plpgsql; +SELECT create_distributed_function('test(int)', 'x', + colocate_with := 'test_perform', force_delegation := true); +DEBUG: switching to sequential query execution mode +DETAIL: A distributed function is created. To make sure subsequent commands see the type correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +DO $$ +BEGIN + PERFORM test(3); +END; +$$ LANGUAGE plpgsql; +DEBUG: Skipping pushdown of function from a PL/PgSQL simple expression +CONTEXT: SQL statement "SELECT test(3)" +PL/pgSQL function inline_code_block line XX at PERFORM +NOTICE: INPUT 3 +CONTEXT: PL/pgSQL function test(integer) line XX at RAISE +SQL statement "SELECT test(3)" +PL/pgSQL function inline_code_block line XX at PERFORM RESET client_min_messages; SET citus.log_remote_commands TO off; DROP SCHEMA forcepushdown_schema CASCADE; -NOTICE: drop cascades to 36 other objects +NOTICE: drop cascades to 38 other objects DETAIL: drop cascades to table test_forcepushdown drop cascades to table test_forcepushdown_noncolocate drop cascades to function insert_data(integer) @@ -1428,3 +1465,5 @@ drop cascades to function insert_data_cte_nondist(integer) drop cascades to table table_test_prepare drop cascades to function test_prepare(integer,integer) drop cascades to function outer_test_prepare(integer,integer) +drop cascades to table test_perform +drop cascades to function test(integer) diff --git a/src/test/regress/sql/forcedelegation_functions.sql b/src/test/regress/sql/forcedelegation_functions.sql index a20505ae6..77b171fc1 100644 --- a/src/test/regress/sql/forcedelegation_functions.sql +++ b/src/test/regress/sql/forcedelegation_functions.sql @@ -661,6 +661,27 @@ SELECT outer_test_prepare(1,2); SELECT COUNT(*) FROM table_test_prepare; +CREATE TABLE test_perform(i int); +SELECT create_distributed_table('test_perform', 'i', colocate_with := 'none'); + +CREATE OR REPLACE FUNCTION test(x int) +RETURNS int +AS $$ +DECLARE +BEGIN + RAISE NOTICE 'INPUT %', x; + RETURN x; +END; +$$ LANGUAGE plpgsql; + +SELECT create_distributed_function('test(int)', 'x', + colocate_with := 'test_perform', force_delegation := true); +DO $$ +BEGIN + PERFORM test(3); +END; +$$ LANGUAGE plpgsql; + RESET client_min_messages; SET citus.log_remote_commands TO off; DROP SCHEMA forcepushdown_schema CASCADE; From 0cae8e7d6b905780e4dcf8f59d9538b8d59f13f9 Mon Sep 17 00:00:00 2001 From: Marco Slot Date: Sun, 6 Feb 2022 21:36:34 +0100 Subject: [PATCH 17/28] Remove local-node-first shard placement --- .../distributed/operations/stage_protocol.c | 6 +- .../operations/worker_node_manager.c | 143 ------------------ src/backend/distributed/shared_library_init.c | 1 - .../distributed/coordinator_protocol.h | 5 +- src/include/distributed/worker_manager.h | 1 - 5 files changed, 3 insertions(+), 153 deletions(-) diff --git a/src/backend/distributed/operations/stage_protocol.c b/src/backend/distributed/operations/stage_protocol.c index c473e7974..2fa052536 100644 --- a/src/backend/distributed/operations/stage_protocol.c +++ b/src/backend/distributed/operations/stage_protocol.c @@ -173,11 +173,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS) { WorkerNode *candidateNode = NULL; - if (ShardPlacementPolicy == SHARD_PLACEMENT_LOCAL_NODE_FIRST) - { - candidateNode = WorkerGetLocalFirstCandidateNode(candidateNodeList); - } - else if (ShardPlacementPolicy == SHARD_PLACEMENT_ROUND_ROBIN) + if (ShardPlacementPolicy == SHARD_PLACEMENT_ROUND_ROBIN) { candidateNode = WorkerGetRoundRobinCandidateNode(workerNodeList, shardId, candidateNodeIndex); diff --git a/src/backend/distributed/operations/worker_node_manager.c b/src/backend/distributed/operations/worker_node_manager.c index 7fbc53e32..938b90a24 100644 --- a/src/backend/distributed/operations/worker_node_manager.c +++ b/src/backend/distributed/operations/worker_node_manager.c @@ -40,8 +40,6 @@ int MaxWorkerNodesTracked = 2048; /* determines worker node hash table size * /* Local functions forward declarations */ -static WorkerNode * WorkerGetNodeWithName(const char *hostname); -static char * ClientHostAddress(StringInfo remoteHostStringInfo); static List * PrimaryNodesNotInList(List *currentList); static WorkerNode * FindRandomNodeFromList(List *candidateWorkerNodeList); static bool OddNumber(uint32 number); @@ -152,147 +150,6 @@ WorkerGetRoundRobinCandidateNode(List *workerNodeList, uint64 shardId, } -/* - * WorkerGetLocalFirstCandidateNode takes in a list of worker nodes, and then - * allocates a new worker node. The allocation is performed according to the - * following policy: if the list is empty, the node where the caller is connecting - * from is allocated; if the list is not empty, a node is allocated according - * to random policy. - */ -WorkerNode * -WorkerGetLocalFirstCandidateNode(List *currentNodeList) -{ - WorkerNode *candidateNode = NULL; - uint32 currentNodeCount = list_length(currentNodeList); - - /* choose first candidate node to be the client's host */ - if (currentNodeCount == 0) - { - StringInfo clientHostStringInfo = makeStringInfo(); - char *errorMessage = ClientHostAddress(clientHostStringInfo); - - if (errorMessage != NULL) - { - ereport(ERROR, (errmsg("%s", errorMessage), - errdetail("Could not find the first worker " - "node for local-node-first policy."), - errhint("Make sure that you are not on the " - "master node."))); - } - - /* if hostname is localhost.localdomain, change it to localhost */ - char *clientHost = clientHostStringInfo->data; - if (strncmp(clientHost, "localhost.localdomain", WORKER_LENGTH) == 0) - { - clientHost = pstrdup("localhost"); - } - - candidateNode = WorkerGetNodeWithName(clientHost); - if (candidateNode == NULL) - { - ereport(ERROR, (errmsg("could not find worker node for " - "host: %s", clientHost))); - } - } - else - { - /* find a candidate node different from those already selected */ - candidateNode = WorkerGetRandomCandidateNode(currentNodeList); - } - - return candidateNode; -} - - -/* - * ClientHostAddress appends the connecting client's fully qualified hostname - * to the given StringInfo. If there is no such connection or the connection is - * over Unix domain socket, the function fills the error message and returns it. - * On success, it just returns NULL. - */ -static char * -ClientHostAddress(StringInfo clientHostStringInfo) -{ - Port *port = MyProcPort; - char *clientHost = NULL; - char *errorMessage = NULL; - int clientHostLength = NI_MAXHOST; - int flags = NI_NAMEREQD; /* require fully qualified hostname */ - int nameFound = 0; - - if (port == NULL) - { - errorMessage = "cannot find tcp/ip connection to client"; - return errorMessage; - } - - switch (port->raddr.addr.ss_family) - { - case AF_INET: -#ifdef HAVE_IPV6 - case AF_INET6: -#endif - { - break; - } - - default: - { - errorMessage = "invalid address family in connection"; - return errorMessage; - } - } - - clientHost = palloc0(clientHostLength); - - nameFound = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen, - clientHost, clientHostLength, NULL, 0, flags); - if (nameFound == 0) - { - appendStringInfo(clientHostStringInfo, "%s", clientHost); - } - else - { - StringInfo errorMessageStringInfo = makeStringInfo(); - appendStringInfo(errorMessageStringInfo, "could not resolve client host: %s", - gai_strerror(nameFound)); - - errorMessage = errorMessageStringInfo->data; - return errorMessage; - } - - return errorMessage; -} - - -/* - * WorkerGetNodeWithName finds and returns a node from the membership list that - * has the given hostname. The function returns null if no such node exists. - */ -static WorkerNode * -WorkerGetNodeWithName(const char *hostname) -{ - WorkerNode *workerNode = NULL; - HASH_SEQ_STATUS status; - HTAB *workerNodeHash = GetWorkerNodeHash(); - - hash_seq_init(&status, workerNodeHash); - - while ((workerNode = hash_seq_search(&status)) != NULL) - { - int nameCompare = strncmp(workerNode->workerName, hostname, WORKER_LENGTH); - if (nameCompare == 0) - { - /* we need to terminate the scan since we break */ - hash_seq_term(&status); - break; - } - } - - return workerNode; -} - - /* * ActivePrimaryNonCoordinatorNodeCount returns the number of groups with a primary in the cluster. * This method excludes coordinator even if it is added as a worker to cluster. diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 00e7a523a..11898618c 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -169,7 +169,6 @@ static const struct config_enum_entry task_executor_type_options[] = { }; static const struct config_enum_entry shard_placement_policy_options[] = { - { "local-node-first", SHARD_PLACEMENT_LOCAL_NODE_FIRST, false }, { "round-robin", SHARD_PLACEMENT_ROUND_ROBIN, false }, { "random", SHARD_PLACEMENT_RANDOM, false }, { NULL, 0, false } diff --git a/src/include/distributed/coordinator_protocol.h b/src/include/distributed/coordinator_protocol.h index f0bda8515..c3e149c07 100644 --- a/src/include/distributed/coordinator_protocol.h +++ b/src/include/distributed/coordinator_protocol.h @@ -76,9 +76,8 @@ typedef enum { SHARD_PLACEMENT_INVALID_FIRST = 0, - SHARD_PLACEMENT_LOCAL_NODE_FIRST = 1, - SHARD_PLACEMENT_ROUND_ROBIN = 2, - SHARD_PLACEMENT_RANDOM = 3 + SHARD_PLACEMENT_ROUND_ROBIN = 1, + SHARD_PLACEMENT_RANDOM = 2 } ShardPlacementPolicyType; /* diff --git a/src/include/distributed/worker_manager.h b/src/include/distributed/worker_manager.h index 91d91a880..0a6b637b3 100644 --- a/src/include/distributed/worker_manager.h +++ b/src/include/distributed/worker_manager.h @@ -70,7 +70,6 @@ extern WorkerNode * WorkerGetRandomCandidateNode(List *currentNodeList); extern WorkerNode * WorkerGetRoundRobinCandidateNode(List *workerNodeList, uint64 shardId, uint32 placementIndex); -extern WorkerNode * WorkerGetLocalFirstCandidateNode(List *currentNodeList); extern uint32 ActivePrimaryNonCoordinatorNodeCount(void); extern uint32 ActivePrimaryNodeCount(void); extern List * ActivePrimaryNonCoordinatorNodeList(LOCKMODE lockMode); From 872f0a79dbeea0188802cb4b036070ee522d52ee Mon Sep 17 00:00:00 2001 From: Marco Slot Date: Sun, 6 Feb 2022 21:41:20 +0100 Subject: [PATCH 18/28] Remove random shard placement policy --- .../distributed/operations/node_protocol.c | 1 - .../distributed/operations/stage_protocol.c | 19 +-- .../operations/worker_node_manager.c | 149 ------------------ src/backend/distributed/shared_library_init.c | 22 --- .../distributed/coordinator_protocol.h | 9 -- 5 files changed, 3 insertions(+), 197 deletions(-) diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index 0cb0a5ace..d18ef749c 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -67,7 +67,6 @@ /* Shard related configuration */ int ShardCount = 32; int ShardReplicationFactor = 1; /* desired replication factor for shards */ -int ShardPlacementPolicy = SHARD_PLACEMENT_ROUND_ROBIN; int NextShardId = 0; int NextPlacementId = 0; diff --git a/src/backend/distributed/operations/stage_protocol.c b/src/backend/distributed/operations/stage_protocol.c index 2fa052536..d6e9c0f2a 100644 --- a/src/backend/distributed/operations/stage_protocol.c +++ b/src/backend/distributed/operations/stage_protocol.c @@ -171,22 +171,9 @@ master_create_empty_shard(PG_FUNCTION_ARGS) /* first retrieve a list of random nodes for shard placements */ while (candidateNodeIndex < attemptableNodeCount) { - WorkerNode *candidateNode = NULL; - - if (ShardPlacementPolicy == SHARD_PLACEMENT_ROUND_ROBIN) - { - candidateNode = WorkerGetRoundRobinCandidateNode(workerNodeList, shardId, - candidateNodeIndex); - } - else if (ShardPlacementPolicy == SHARD_PLACEMENT_RANDOM) - { - candidateNode = WorkerGetRandomCandidateNode(candidateNodeList); - } - else - { - ereport(ERROR, (errmsg("unrecognized shard placement policy"))); - } - + WorkerNode *candidateNode = WorkerGetRoundRobinCandidateNode(workerNodeList, + shardId, + candidateNodeIndex); if (candidateNode == NULL) { ereport(ERROR, (errmsg("could only find %u of %u possible nodes", diff --git a/src/backend/distributed/operations/worker_node_manager.c b/src/backend/distributed/operations/worker_node_manager.c index 938b90a24..1054049e4 100644 --- a/src/backend/distributed/operations/worker_node_manager.c +++ b/src/backend/distributed/operations/worker_node_manager.c @@ -40,10 +40,6 @@ int MaxWorkerNodesTracked = 2048; /* determines worker node hash table size * /* Local functions forward declarations */ -static List * PrimaryNodesNotInList(List *currentList); -static WorkerNode * FindRandomNodeFromList(List *candidateWorkerNodeList); -static bool OddNumber(uint32 number); -static bool ListMember(List *currentList, WorkerNode *workerNode); static bool NodeIsPrimaryWorker(WorkerNode *node); static bool NodeIsReadableWorker(WorkerNode *node); @@ -53,73 +49,6 @@ static bool NodeIsReadableWorker(WorkerNode *node); * ------------------------------------------------------------ */ -/* - * WorkerGetRandomCandidateNode accepts a list of WorkerNode's and returns a random - * primary node which is not in that list. - * - * Note that the function returns null if the worker membership list does not - * contain enough nodes to allocate a new worker node. - */ -WorkerNode * -WorkerGetRandomCandidateNode(List *currentNodeList) -{ - WorkerNode *workerNode = NULL; - bool wantSameRack = false; - uint32 tryCount = WORKER_RACK_TRIES; - - uint32 currentNodeCount = list_length(currentNodeList); - List *candidateWorkerNodeList = PrimaryNodesNotInList(currentNodeList); - - /* we check if the shard has already been placed on all nodes known to us */ - if (list_length(candidateWorkerNodeList) == 0) - { - return NULL; - } - - /* if current node list is empty, randomly pick one node and return */ - if (currentNodeCount == 0) - { - workerNode = FindRandomNodeFromList(candidateWorkerNodeList); - return workerNode; - } - - /* - * If the current list has an odd number of nodes (1, 3, 5, etc), we want to - * place the shard on a different rack than the first node's rack. - * Otherwise, we want to place the shard on the same rack as the first node. - */ - if (OddNumber(currentNodeCount)) - { - wantSameRack = false; - } - else - { - wantSameRack = true; - } - - /* - * We try to find a worker node that fits our rack-aware placement strategy. - * If after a predefined number of tries, we still cannot find such a node, - * we simply give up and return the last worker node we found. - */ - for (uint32 tryIndex = 0; tryIndex < tryCount; tryIndex++) - { - WorkerNode *firstNode = (WorkerNode *) linitial(currentNodeList); - char *firstRack = firstNode->workerRack; - - workerNode = FindRandomNodeFromList(candidateWorkerNodeList); - char *workerRack = workerNode->workerRack; - - bool sameRack = (strncmp(workerRack, firstRack, WORKER_LENGTH) == 0); - if ((sameRack && wantSameRack) || (!sameRack && !wantSameRack)) - { - break; - } - } - - return workerNode; -} - /* * WorkerGetRoundRobinCandidateNode takes in a list of worker nodes and returns @@ -399,84 +328,6 @@ NodeIsReadableWorker(WorkerNode *node) } -/* - * PrimaryNodesNotInList scans through the worker node hash and returns a list of all - * primary nodes which are not in currentList. It runs in O(n*m) but currentList is - * quite small. - */ -static List * -PrimaryNodesNotInList(List *currentList) -{ - List *workerNodeList = NIL; - HTAB *workerNodeHash = GetWorkerNodeHash(); - WorkerNode *workerNode = NULL; - HASH_SEQ_STATUS status; - - hash_seq_init(&status, workerNodeHash); - - while ((workerNode = hash_seq_search(&status)) != NULL) - { - if (ListMember(currentList, workerNode)) - { - continue; - } - - if (NodeIsPrimary(workerNode)) - { - workerNodeList = lappend(workerNodeList, workerNode); - } - } - - return workerNodeList; -} - - -/* FindRandomNodeFromList picks a random node from the list provided to it. */ -static WorkerNode * -FindRandomNodeFromList(List *candidateWorkerNodeList) -{ - uint32 candidateNodeCount = list_length(candidateWorkerNodeList); - - /* nb, the random seed has already been set by the postmaster when starting up */ - uint32 workerPosition = (random() % candidateNodeCount); - - WorkerNode *workerNode = - (WorkerNode *) list_nth(candidateWorkerNodeList, workerPosition); - - return workerNode; -} - - -/* - * OddNumber function returns true if given number is odd; returns false otherwise. - */ -static bool -OddNumber(uint32 number) -{ - bool oddNumber = ((number % 2) == 1); - return oddNumber; -} - - -/* Checks if given worker node is a member of the current list. */ -static bool -ListMember(List *currentList, WorkerNode *workerNode) -{ - Size keySize = WORKER_LENGTH + sizeof(uint32); - - WorkerNode *currentNode = NULL; - foreach_ptr(currentNode, currentList) - { - if (WorkerNodeCompare(workerNode, currentNode, keySize) == 0) - { - return true; - } - } - - return false; -} - - /* * CompareWorkerNodes compares two pointers to worker nodes using the exact * same logic employed by WorkerNodeCompare. diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 11898618c..e109bceed 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -168,12 +168,6 @@ static const struct config_enum_entry task_executor_type_options[] = { { NULL, 0, false } }; -static const struct config_enum_entry shard_placement_policy_options[] = { - { "round-robin", SHARD_PLACEMENT_ROUND_ROBIN, false }, - { "random", SHARD_PLACEMENT_RANDOM, false }, - { NULL, 0, false } -}; - static const struct config_enum_entry use_secondary_nodes_options[] = { { "never", USE_SECONDARY_NODES_NEVER, false }, { "always", USE_SECONDARY_NODES_ALWAYS, false }, @@ -1628,22 +1622,6 @@ RegisterCitusConfigVariables(void) GUC_STANDARD, NULL, NULL, NULL); - DefineCustomEnumVariable( - "citus.shard_placement_policy", - gettext_noop("Sets the policy to use when choosing nodes for shard placement."), - gettext_noop("The master node chooses which worker nodes to place new shards " - "on. This configuration value specifies the policy to use when " - "selecting these nodes. The local-node-first policy places the " - "first replica on the client node and chooses others randomly. " - "The round-robin policy aims to distribute shards evenly across " - "the cluster by selecting nodes in a round-robin fashion." - "The random policy picks all workers randomly."), - &ShardPlacementPolicy, - SHARD_PLACEMENT_ROUND_ROBIN, shard_placement_policy_options, - PGC_USERSET, - GUC_STANDARD, - NULL, NULL, NULL); - DefineCustomIntVariable( "citus.shard_replication_factor", gettext_noop("Sets the replication factor for shards."), diff --git a/src/include/distributed/coordinator_protocol.h b/src/include/distributed/coordinator_protocol.h index c3e149c07..bda318a25 100644 --- a/src/include/distributed/coordinator_protocol.h +++ b/src/include/distributed/coordinator_protocol.h @@ -72,14 +72,6 @@ #define DROP_FOREIGN_TABLE_COMMAND "DROP FOREIGN TABLE IF EXISTS %s CASCADE" #define CREATE_SCHEMA_COMMAND "CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s" -/* Enumeration that defines the shard placement policy to use while staging */ -typedef enum -{ - SHARD_PLACEMENT_INVALID_FIRST = 0, - SHARD_PLACEMENT_ROUND_ROBIN = 1, - SHARD_PLACEMENT_RANDOM = 2 -} ShardPlacementPolicyType; - /* * TableDDLCommandType encodes the implementation used by TableDDLCommand. See comments in * TableDDLCpmmand for details. @@ -211,7 +203,6 @@ extern TableDDLCommand * ColumnarGetCustomTableOptionsDDL(char *schemaName, /* Config variables managed via guc.c */ extern int ShardCount; extern int ShardReplicationFactor; -extern int ShardPlacementPolicy; extern int NextShardId; extern int NextPlacementId; From 8ae7577581256289c6e47f32f829ad577bbac735 Mon Sep 17 00:00:00 2001 From: Burak Velioglu Date: Fri, 4 Feb 2022 15:37:49 +0300 Subject: [PATCH 19/28] Use superuser connection while syncing dependent objects' pg_dist_object tuples --- .../distributed/commands/dependencies.c | 10 +- src/backend/distributed/metadata/distobject.c | 94 +++++++++++++++---- .../transaction/worker_transaction.c | 21 ++++- src/include/distributed/metadata/distobject.h | 1 + src/include/distributed/worker_transaction.h | 1 + 5 files changed, 104 insertions(+), 23 deletions(-) diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index ea1c59064..f82ddf065 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -120,7 +120,15 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) */ foreach_ptr(dependency, dependenciesWithCommands) { - MarkObjectDistributed(dependency); + /* + * pg_dist_object entries must be propagated with the super user, since + * the owner of the target object may not own dependencies but we must + * propagate as we send objects itself with the superuser. + * + * Only dependent object's metadata should be propagated with super user. + * Metadata of the table itself must be propagated with the current user. + */ + MarkObjectDistributedViaSuperUser(dependency); } } diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index 37aaa3aed..ba67a073b 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -46,6 +46,8 @@ #include "utils/rel.h" +static void MarkObjectDistributedLocally(const ObjectAddress *distAddress); +static char * CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress); static int ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, Datum *paramValues); @@ -141,14 +143,60 @@ ObjectExists(const ObjectAddress *address) /* - * MarkObjectDistributed marks an object as a distributed object by citus. Marking is done - * by adding appropriate entries to citus.pg_dist_object. + * MarkObjectDistributed marks an object as a distributed object. Marking is done + * by adding appropriate entries to citus.pg_dist_object and also marking the object + * as distributed by opening a connection using current user to all of the workers + * with metadata if object propagation is on. * - * This also marks the object as distributed on all of the workers with metadata - * if object propagation is on. + * This function should be used if the user creating the given object. If you want + * to mark dependent objects as distributed check MarkObjectDistributedViaSuperUser. */ void MarkObjectDistributed(const ObjectAddress *distAddress) +{ + MarkObjectDistributedLocally(distAddress); + + if (EnableMetadataSync) + { + char *workerPgDistObjectUpdateCommand = + CreatePgDistObjectEntryCommand(distAddress); + SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand); + } +} + + +/* + * MarkObjectDistributedViaSuperUser marks an object as a distributed object. Marking + * is done by adding appropriate entries to citus.pg_dist_object and also marking the + * object as distributed by opening a connection using super user to all of the workers + * with metadata if object propagation is on. + * + * This function should be used to mark dependent object as distributed. If you want + * to mark the object you are creating please check MarkObjectDistributed. + */ +void +MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress) +{ + MarkObjectDistributedLocally(distAddress); + + if (EnableMetadataSync) + { + char *workerPgDistObjectUpdateCommand = + CreatePgDistObjectEntryCommand(distAddress); + SendCommandToWorkersWithMetadataViaSuperUser(workerPgDistObjectUpdateCommand); + } +} + + +/* + * MarkObjectDistributedLocally marks an object as a distributed object by citus. + * Marking is done by adding appropriate entries to citus.pg_dist_object. + * + * This function should never be called alone, MarkObjectDistributed() or + * MarkObjectDistributedViaSuperUser() should be called. + */ +static void +MarkObjectDistributedLocally(const ObjectAddress *distAddress) { int paramCount = 3; Oid paramTypes[3] = { @@ -161,32 +209,38 @@ MarkObjectDistributed(const ObjectAddress *distAddress) ObjectIdGetDatum(distAddress->objectId), Int32GetDatum(distAddress->objectSubId) }; - char *insertQuery = "INSERT INTO citus.pg_dist_object (classid, objid, objsubid) " "VALUES ($1, $2, $3) ON CONFLICT DO NOTHING"; - int spiStatus = ExecuteCommandAsSuperuser(insertQuery, paramCount, paramTypes, paramValues); if (spiStatus < 0) { ereport(ERROR, (errmsg("failed to insert object into citus.pg_dist_object"))); } +} - if (EnableMetadataSync) - { - /* create a list by adding the address of value to not to have warning */ - List *objectAddressList = list_make1((ObjectAddress *) distAddress); - List *distArgumetIndexList = list_make1_int(INVALID_DISTRIBUTION_ARGUMENT_INDEX); - List *colocationIdList = list_make1_int(INVALID_COLOCATION_ID); - List *forceDelegationList = list_make1_int(NO_FORCE_PUSHDOWN); - char *workerPgDistObjectUpdateCommand = - MarkObjectsDistributedCreateCommand(objectAddressList, - distArgumetIndexList, - colocationIdList, - forceDelegationList); - SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand); - } +/* + * CreatePgDistObjectEntryCommand creates command to insert pg_dist_object tuple + * for the given object address. + */ +static char * +CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress) +{ + /* create a list by adding the address of value to not to have warning */ + List *objectAddressList = + list_make1((ObjectAddress *) objectAddress); + List *distArgumetIndexList = list_make1_int(INVALID_DISTRIBUTION_ARGUMENT_INDEX); + List *colocationIdList = list_make1_int(INVALID_COLOCATION_ID); + List *forceDelegationList = list_make1_int(NO_FORCE_PUSHDOWN); + + char *workerPgDistObjectUpdateCommand = + MarkObjectsDistributedCreateCommand(objectAddressList, + distArgumetIndexList, + colocationIdList, + forceDelegationList); + + return workerPgDistObjectUpdateCommand; } diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index 61baff4fe..e94abed53 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -112,8 +112,7 @@ SendCommandToWorkerAsUser(const char *nodeName, int32 nodePort, const char *node /* * SendCommandToWorkers sends a command to all workers in * parallel. Commands are committed on the workers when the local - * transaction commits. The connection are made as the extension - * owner to ensure write access to the Citus metadata tables. + * transaction commits. */ void SendCommandToWorkersWithMetadata(const char *command) @@ -123,6 +122,24 @@ SendCommandToWorkersWithMetadata(const char *command) } +/* + * SendCommandToWorkersWithMetadataViaSuperUser sends a command to all workers in + * parallel by opening a super user connection. Commands are committed on the workers + * when the local transaction commits. The connection are made as the extension + * owner to ensure write access to the Citus metadata tables. + * + * Since we prevent to open superuser connections for metadata tables, it is + * discourated to use it. Consider using it only for propagating pg_dist_object + * tuples for dependent objects. + */ +void +SendCommandToWorkersWithMetadataViaSuperUser(const char *command) +{ + SendCommandToMetadataWorkersParams(command, CitusExtensionOwnerName(), + 0, NULL, NULL); +} + + /* * TargetWorkerSetNodeList returns a list of WorkerNode's that satisfies the * TargetWorkerSet. diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index 659e8ab7f..472cd83e2 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -23,6 +23,7 @@ extern bool CitusExtensionObject(const ObjectAddress *objectAddress); extern bool IsObjectDistributed(const ObjectAddress *address); extern bool ClusterHasDistributedFunctionWithDistArgument(void); extern void MarkObjectDistributed(const ObjectAddress *distAddress); +extern void MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress); extern void UnmarkObjectDistributed(const ObjectAddress *address); extern bool IsTableOwnedByExtension(Oid relationId); extern bool IsObjectAddressOwnedByExtension(const ObjectAddress *target, diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h index 63d419c66..c3748ee5b 100644 --- a/src/include/distributed/worker_transaction.h +++ b/src/include/distributed/worker_transaction.h @@ -49,6 +49,7 @@ extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(cons List * commandList); extern void SendCommandToWorkersWithMetadata(const char *command); +extern void SendCommandToWorkersWithMetadataViaSuperUser(const char *command); extern void SendBareCommandListToMetadataWorkers(List *commandList); extern void EnsureNoModificationsHaveBeenDone(void); extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName, From ab248c17856b718e2eec850d39e5e8c2c5bde696 Mon Sep 17 00:00:00 2001 From: Burak Velioglu Date: Fri, 4 Feb 2022 16:02:55 +0300 Subject: [PATCH 20/28] Check object ownership while creating pg_dist_object entries on remote --- .../metadata/pg_get_object_address_12_13_14.c | 189 +----------------- 1 file changed, 8 insertions(+), 181 deletions(-) diff --git a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c index c2a8e29e3..c4da6764a 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c +++ b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c @@ -38,7 +38,6 @@ static void ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, Node *node, Relation *relation); -static void ErrorIfUserNotAllowedToPropagateExtension(char *extensionName); static List * textarray_to_strvaluelist(ArrayType *arr); /* It is defined on PG >= 13 versions by default */ @@ -398,9 +397,6 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, Node *node, Relation *relation) { Oid userId = GetUserId(); - AclMode aclMaskResult = 0; - bool skipAclCheck = false; - Oid idToCheck = InvalidOid; if (!SupportedDependencyByCitus(addr)) { @@ -410,27 +406,19 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, switch (type) { case OBJECT_SCHEMA: - { - idToCheck = addr->objectId; - aclMaskResult = pg_namespace_aclmask(idToCheck, userId, ACL_USAGE, - ACLMASK_ANY); - break; - } - + case OBJECT_DATABASE: case OBJECT_FUNCTION: case OBJECT_PROCEDURE: case OBJECT_AGGREGATE: + case OBJECT_TYPE: + case OBJECT_FOREIGN_SERVER: + case OBJECT_SEQUENCE: + case OBJECT_FOREIGN_TABLE: + case OBJECT_TABLE: + case OBJECT_EXTENSION: + case OBJECT_COLLATION: { check_object_ownership(userId, type, *addr, node, *relation); - skipAclCheck = true; - break; - } - - case OBJECT_DATABASE: - { - idToCheck = addr->objectId; - aclMaskResult = pg_database_aclmask(idToCheck, userId, ACL_CONNECT, - ACLMASK_ANY); break; } @@ -443,54 +431,6 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, "access privileges on role %d with type %d", addr->objectId, type))); } - skipAclCheck = true; - break; - } - - case OBJECT_TYPE: - { - idToCheck = addr->objectId; - aclMaskResult = pg_type_aclmask(idToCheck, userId, ACL_USAGE, - ACLMASK_ANY); - break; - } - - case OBJECT_FOREIGN_SERVER: - { - idToCheck = addr->objectId; - aclMaskResult = pg_foreign_server_aclmask(idToCheck, userId, ACL_USAGE, - ACLMASK_ANY); - break; - } - - case OBJECT_SEQUENCE: - { - idToCheck = addr->objectId; - aclMaskResult = pg_class_aclmask(idToCheck, userId, ACL_USAGE, ACLMASK_ANY); - break; - } - - case OBJECT_FOREIGN_TABLE: - case OBJECT_TABLE: - { - /* table distribution already does the ownership check, so we can stick to that over acl_check */ - check_object_ownership(userId, type, *addr, node, *relation); - skipAclCheck = true; - break; - } - - case OBJECT_EXTENSION: - { - Value *valueNode = (Value *) node; - char *extensionName = strVal(valueNode); - ErrorIfUserNotAllowedToPropagateExtension(extensionName); - skipAclCheck = true; - break; - } - - case OBJECT_COLLATION: - { - skipAclCheck = true; break; } @@ -501,119 +441,6 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, break; } } - - if (!skipAclCheck && aclMaskResult == ACL_NO_RIGHTS) - { - ereport(ERROR, (errmsg("Current user does not have required privileges " - "on %d with type id %d to distribute it", - idToCheck, type))); - } -} - - -/* - * ErrorIfUserNotAllowedToPropagateExtension errors out if the current user does - * not have required privileges to propagate extension - */ -static void -ErrorIfUserNotAllowedToPropagateExtension(char *extensionName) -{ - const int nameAttributeIndex = 1; - const int superuserAttributeIndex = 4; -#if PG_VERSION_NUM >= PG_VERSION_13 - const int trustedAttributeIndex = 5; -#endif - - LOCAL_FCINFO(fcinfo, 0); - FmgrInfo flinfo; - - bool goForward = true; - bool doCopy = false; - - EState *estate = CreateExecutorState(); - ReturnSetInfo *extensionsResultSet = makeNode(ReturnSetInfo); - extensionsResultSet->econtext = GetPerTupleExprContext(estate); - extensionsResultSet->allowedModes = SFRM_Materialize; - - fmgr_info(F_PG_AVAILABLE_EXTENSION_VERSIONS, &flinfo); - InitFunctionCallInfoData(*fcinfo, &flinfo, 0, InvalidOid, NULL, - (Node *) extensionsResultSet); - - /* - * pg_available_extensions_versions returns result set containing all - * available extension versions with whether the extension requires - * superuser and it is trusted information. - */ - (*pg_available_extension_versions)(fcinfo); - - TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat( - extensionsResultSet->setDesc, - &TTSOpsMinimalTuple); - bool hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, - goForward, - doCopy, - tupleTableSlot); - while (hasTuple) - { - bool isNull = false; - Datum curExtensionNameDatum = slot_getattr(tupleTableSlot, - nameAttributeIndex, - &isNull); - char *curExtensionName = NameStr(*DatumGetName(curExtensionNameDatum)); - if (strcmp(extensionName, curExtensionName) == 0) - { - Datum superuserExpectedDatum = slot_getattr(tupleTableSlot, - superuserAttributeIndex, - &isNull); - bool superuserExpected = DatumGetBool(superuserExpectedDatum); - -#if PG_VERSION_NUM < PG_VERSION_13 - if (superuserExpected) - { - EnsureSuperUser(); - } -#else - if (superuserExpected) - { - /* - * After PG 13, if the extension is trusted it can be created - * by the user having CREATE privilege on the database even if - * the extension requires superuser. - */ - Datum trustedExtensionDatum = slot_getattr(tupleTableSlot, - trustedAttributeIndex, - &isNull); - bool trustedExtension = DatumGetBool(trustedExtensionDatum); - - if (trustedExtension) - { - /* Allow if user has CREATE privilege on current database */ - AclResult aclresult = pg_database_aclcheck(MyDatabaseId, - GetUserId(), - ACL_CREATE); - if (aclresult != ACLCHECK_OK) - { - ereport(ERROR, (errmsg("operation is not allowed"), - errhint("Must have CREATE privilege " - "on database to propagate " - "extension %s", curExtensionName))); - } - } - else - { - EnsureSuperUser(); - } - } -#endif - break; - } - - ExecClearTuple(tupleTableSlot); - hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, - doCopy, tupleTableSlot); - } - - ExecDropSingleTupleTableSlot(tupleTableSlot); } From c0aece64d06425391008eda4e8ad9fd520bf58cb Mon Sep 17 00:00:00 2001 From: Burak Velioglu Date: Fri, 4 Feb 2022 16:14:54 +0300 Subject: [PATCH 21/28] Add test for checking distributed extension function --- .../non_super_user_object_metadata.out | 47 ++++++++++++++++++- .../sql/non_super_user_object_metadata.sql | 18 +++++++ 2 files changed, 63 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/non_super_user_object_metadata.out b/src/test/regress/expected/non_super_user_object_metadata.out index ebbf9510e..0ff03c0b8 100644 --- a/src/test/regress/expected/non_super_user_object_metadata.out +++ b/src/test/regress/expected/non_super_user_object_metadata.out @@ -79,6 +79,21 @@ SELECT create_distributed_function('test_function(int)'); (1 row) +-- Create and distribute plpgsql extension's function +CREATE OR REPLACE FUNCTION plpgsql_dist_function(text) +RETURNS void +LANGUAGE plpgsql AS +$$ + BEGIN + RAISE NOTICE '%', $1; + END; +$$; +SELECT create_distributed_function('plpgsql_dist_function(text)'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + -- show that schema, types, function and sequence has marked as distributed -- on the coordinator node RESET ROLE; @@ -124,6 +139,12 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dis (function,"{local_schema,test_function}",{integer}) (1 row) +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.plpgsql_dist_function'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{local_schema,plpgsql_dist_function}",{pg_catalog.text}) +(1 row) + -- show those objects marked as distributed on metadata worker node as well SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema'::regnamespace::oid;$$) ORDER BY 1,2; nodename | nodeport | success | result @@ -174,6 +195,27 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas localhost | 57638 | t | (function,"{local_schema,test_function}",{integer}) (2 rows) +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.plpgsql_dist_function'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{local_schema,plpgsql_dist_function}",{pg_catalog.text}) + localhost | 57638 | t | (function,"{local_schema,plpgsql_dist_function}",{pg_catalog.text}) +(2 rows) + +-- Show that extension plpgsql is also marked as distributed as a dependency of plpgsl_dist_function +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{plpgsql}%'; + obj_identifier +--------------------------------------------------------------------- + (extension,{plpgsql},{}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{plpgsql}%';$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (extension,{plpgsql},{}) + localhost | 57638 | t | (extension,{plpgsql},{}) +(2 rows) + -- show that schema is owned by the superuser SELECT rolname FROM pg_roles JOIN pg_namespace ON(pg_namespace.nspowner = pg_roles.oid) WHERE nspname = 'local_schema'; rolname @@ -372,8 +414,9 @@ SELECT * FROM run_command_on_workers($$ SELECT distribution_argument_index FROM -- Show that dropping schema doesn't affect the worker node DROP SCHEMA local_schema CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to table metadata_dist_test_table +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to function plpgsql_dist_function(text) +drop cascades to table metadata_dist_test_table drop cascades to function metadata_dist_test_proc(integer,integer) SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{local_schema}%'; obj_identifier diff --git a/src/test/regress/sql/non_super_user_object_metadata.sql b/src/test/regress/sql/non_super_user_object_metadata.sql index 67e9687a3..be965288f 100644 --- a/src/test/regress/sql/non_super_user_object_metadata.sql +++ b/src/test/regress/sql/non_super_user_object_metadata.sql @@ -55,6 +55,18 @@ SET search_path TO local_schema; SELECT create_distributed_table('dist_table', 'a'); SELECT create_distributed_function('test_function(int)'); +-- Create and distribute plpgsql extension's function +CREATE OR REPLACE FUNCTION plpgsql_dist_function(text) +RETURNS void +LANGUAGE plpgsql AS +$$ + BEGIN + RAISE NOTICE '%', $1; + END; +$$; + +SELECT create_distributed_function('plpgsql_dist_function(text)'); + -- show that schema, types, function and sequence has marked as distributed -- on the coordinator node RESET ROLE; @@ -65,6 +77,7 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dis SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema.test_sequence'::regclass::oid; SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.dist_table_e_seq'::regclass::oid; SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_function'::regproc::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.plpgsql_dist_function'::regproc::oid; -- show those objects marked as distributed on metadata worker node as well SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema'::regnamespace::oid;$$) ORDER BY 1,2; @@ -74,6 +87,11 @@ SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(clas SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'test_sequence_schema.test_sequence'::regclass::oid;$$) ORDER BY 1,2; SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.dist_table_e_seq'::regclass::oid;$$) ORDER BY 1,2; SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.test_function'::regproc::oid;$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'local_schema.plpgsql_dist_function'::regproc::oid;$$) ORDER BY 1,2; + +-- Show that extension plpgsql is also marked as distributed as a dependency of plpgsl_dist_function +SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{plpgsql}%'; +SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{plpgsql}%';$$) ORDER BY 1,2; -- show that schema is owned by the superuser SELECT rolname FROM pg_roles JOIN pg_namespace ON(pg_namespace.nspowner = pg_roles.oid) WHERE nspname = 'local_schema'; From 0a70b78bf59998e3fbcf4db8622fd2f5dd21914c Mon Sep 17 00:00:00 2001 From: Burak Velioglu Date: Fri, 4 Feb 2022 16:29:12 +0300 Subject: [PATCH 22/28] Add test for dist type --- .../regress/expected/metadata_sync_helpers.out | 16 ++++++++++++++++ src/test/regress/sql/metadata_sync_helpers.sql | 13 +++++++++++++ 2 files changed, 29 insertions(+) diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 15de77e4d..cb3b113e2 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -664,6 +664,22 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: must be owner of function distribution_test_function ROLLBACK; +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + assign_distributed_transaction_id +--------------------------------------------------------------------- + +(1 row) + + SET application_name to 'citus_internal'; + \set VERBOSITY terse + CREATE TYPE distributed_test_type AS (a int, b int); + SET ROLE metadata_sync_helper_role; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) + AS (VALUES ('type', ARRAY['distributed_test_type']::text[], ARRAY[]::text[], -1, 0, false)) + SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; +ERROR: must be owner of type distributed_test_type +ROLLBACK; -- we do not allow wrong partmethod -- so manually insert wrong partmethod for the sake of the test SET search_path TO metadata_sync_helpers; diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index 7054c5414..22e337443 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -425,6 +425,19 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SET application_name to 'citus_internal'; + \set VERBOSITY terse + + CREATE TYPE distributed_test_type AS (a int, b int); + + SET ROLE metadata_sync_helper_role; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) + AS (VALUES ('type', ARRAY['distributed_test_type']::text[], ARRAY[]::text[], -1, 0, false)) + SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; +ROLLBACK; + -- we do not allow wrong partmethod -- so manually insert wrong partmethod for the sake of the test SET search_path TO metadata_sync_helpers; From 8ee02b29d0b02c885fb0716f932c1c1f4f7e9640 Mon Sep 17 00:00:00 2001 From: Halil Ozan Akgul Date: Thu, 6 Jan 2022 12:31:00 +0300 Subject: [PATCH 23/28] Introduce global PID --- .../connection/connection_configuration.c | 7 +- .../connection/connection_management.c | 3 +- .../distributed/metadata/metadata_cache.c | 61 ++++++ src/backend/distributed/shared_library_init.c | 1 + .../distributed/sql/citus--10.2-4--11.0-1.sql | 19 ++ .../sql/downgrades/citus--11.0-1--10.2-4.sql | 125 ++++++++++++ .../udfs/citus_dist_stat_activity/11.0-1.sql | 19 ++ .../udfs/citus_dist_stat_activity/latest.sql | 19 ++ .../sql/udfs/citus_lock_waits/11.0-1.sql | 44 ++++ .../sql/udfs/citus_lock_waits/latest.sql | 44 ++++ .../citus_worker_stat_activity/11.0-1.sql | 19 ++ .../citus_worker_stat_activity/latest.sql | 19 ++ .../get_all_active_transactions/11.0-1.sql | 12 ++ .../get_all_active_transactions/latest.sql | 12 ++ .../get_global_active_transactions/11.0-1.sql | 9 + .../get_global_active_transactions/latest.sql | 9 + .../distributed/transaction/backend_data.c | 134 ++++++++++++- .../transaction/citus_dist_stat_activity.c | 19 +- src/include/distributed/backend_data.h | 6 + .../distributed/connection_management.h | 2 +- src/include/distributed/metadata_cache.h | 1 + .../failure_connection_establishment.out | 4 +- .../isolation_distributed_transaction_id.out | 3 +- .../isolation_get_all_active_transactions.out | 16 +- .../regress/expected/isolation_global_pid.out | 145 ++++++++++++++ ...licate_reference_tables_to_coordinator.out | 4 +- .../expected/metadata_sync_helpers.out | 188 +++++++++++------- src/test/regress/isolation_schedule | 1 + .../isolation_distributed_transaction_id.spec | 3 +- ...isolation_get_all_active_transactions.spec | 16 +- .../regress/spec/isolation_global_pid.spec | 96 +++++++++ ...icate_reference_tables_to_coordinator.spec | 4 +- .../sql/failure_connection_establishment.sql | 2 +- .../regress/sql/metadata_sync_helpers.sql | 168 +++++++++------- 34 files changed, 1057 insertions(+), 177 deletions(-) create mode 100644 src/backend/distributed/sql/udfs/citus_dist_stat_activity/11.0-1.sql create mode 100644 src/backend/distributed/sql/udfs/citus_dist_stat_activity/latest.sql create mode 100644 src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql create mode 100644 src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql create mode 100644 src/backend/distributed/sql/udfs/citus_worker_stat_activity/11.0-1.sql create mode 100644 src/backend/distributed/sql/udfs/citus_worker_stat_activity/latest.sql create mode 100644 src/backend/distributed/sql/udfs/get_all_active_transactions/11.0-1.sql create mode 100644 src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql create mode 100644 src/backend/distributed/sql/udfs/get_global_active_transactions/11.0-1.sql create mode 100644 src/backend/distributed/sql/udfs/get_global_active_transactions/latest.sql create mode 100644 src/test/regress/expected/isolation_global_pid.out create mode 100644 src/test/regress/spec/isolation_global_pid.spec diff --git a/src/backend/distributed/connection/connection_configuration.c b/src/backend/distributed/connection/connection_configuration.c index 32dc21e40..41017fab9 100644 --- a/src/backend/distributed/connection/connection_configuration.c +++ b/src/backend/distributed/connection/connection_configuration.c @@ -10,6 +10,7 @@ #include "postgres.h" +#include "distributed/backend_data.h" #include "distributed/citus_safe_lib.h" #include "distributed/connection_management.h" #include "distributed/metadata_cache.h" @@ -232,6 +233,10 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, */ char nodePortString[12] = ""; + StringInfo applicationName = makeStringInfo(); + appendStringInfo(applicationName, "%s%ld", CITUS_APPLICATION_NAME_PREFIX, + GetGlobalPID()); + /* * This function has three sections: * - Initialize the keywords and values (to be copied later) of global parameters @@ -260,7 +265,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, key->database, key->user, GetDatabaseEncodingName(), - CITUS_APPLICATION_NAME + applicationName->data }; /* diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index 89a863109..13b52790a 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -19,6 +19,7 @@ #include "access/hash.h" #include "commands/dbcommands.h" +#include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/errormessage.h" #include "distributed/error_codes.h" @@ -1459,7 +1460,7 @@ IsRebalancerInternalBackend(void) bool IsCitusInternalBackend(void) { - return application_name && strcmp(application_name, CITUS_APPLICATION_NAME) == 0; + return ExtractGlobalPID(application_name) != INVALID_CITUS_INTERNAL_BACKEND_GPID; } diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index ac6db8c61..161a56942 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -201,6 +201,9 @@ static bool workerNodeHashValid = false; /* default value is -1, for coordinator it's 0 and for worker nodes > 0 */ static int32 LocalGroupId = -1; +/* default value is -1, increases with every node starting from 1 */ +static int32 LocalNodeId = -1; + /* built first time through in InitializeDistCache */ static ScanKeyData DistPartitionScanKey[1]; static ScanKeyData DistShardScanKey[1]; @@ -3618,6 +3621,62 @@ GetLocalGroupId(void) } +/* + * GetNodeId returns the node identifier of the local node. + */ +int32 +GetLocalNodeId(void) +{ + InitializeCaches(); + + /* + * Already set the node id, no need to read the heap again. + */ + if (LocalNodeId != -1) + { + return LocalNodeId; + } + + uint32 nodeId = -1; + + int32 localGroupId = GetLocalGroupId(); + + bool includeNodesFromOtherClusters = false; + List *workerNodeList = ReadDistNode(includeNodesFromOtherClusters); + + WorkerNode *workerNode = NULL; + foreach_ptr(workerNode, workerNodeList) + { + if (workerNode->groupId == localGroupId && + workerNode->isActive) + { + nodeId = workerNode->nodeId; + break; + } + } + + /* + * nodeId is -1 if we cannot find an active node whose group id is + * localGroupId in pg_dist_node. + */ + if (nodeId == -1) + { + elog(DEBUG4, "there is no active node with group id '%d' on pg_dist_node", + localGroupId); + + /* + * This is expected if the coordinator is not added to the metadata. + * We'll return 0 for this case and for all cases so views can function almost normally + */ + nodeId = 0; + } + + LocalNodeId = nodeId; + + return nodeId; +} + + /* * RegisterLocalGroupIdCacheCallbacks registers the callbacks required to * maintain LocalGroupId at a consistent value. It's separate from @@ -4019,6 +4078,7 @@ InvalidateMetadataSystemCache(void) memset(&MetadataCache, 0, sizeof(MetadataCache)); workerNodeHashValid = false; LocalGroupId = -1; + LocalNodeId = -1; } @@ -4110,6 +4170,7 @@ InvalidateNodeRelationCacheCallback(Datum argument, Oid relationId) if (relationId == InvalidOid || relationId == MetadataCache.distNodeRelationId) { workerNodeHashValid = false; + LocalNodeId = -1; } } diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index e109bceed..70d3f549c 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -458,6 +458,7 @@ StartupCitusBackend(void) InitializeMaintenanceDaemonBackend(); InitializeBackendData(); RegisterConnectionCleanup(); + AssignGlobalPID(); } diff --git a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql index 9c23b31c2..c3ffbb1cb 100644 --- a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql +++ b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql @@ -14,6 +14,25 @@ #include "udfs/worker_drop_sequence_dependency/11.0-1.sql" #include "udfs/worker_drop_shell_table/11.0-1.sql" +#include "udfs/get_all_active_transactions/11.0-1.sql" +#include "udfs/get_global_active_transactions/11.0-1.sql" + +#include "udfs/citus_worker_stat_activity/11.0-1.sql" + +CREATE VIEW citus.citus_worker_stat_activity AS +SELECT * FROM pg_catalog.citus_worker_stat_activity(); +ALTER VIEW citus.citus_worker_stat_activity SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC; + +#include "udfs/citus_dist_stat_activity/11.0-1.sql" + +CREATE VIEW citus.citus_dist_stat_activity AS +SELECT * FROM pg_catalog.citus_dist_stat_activity(); +ALTER VIEW citus.citus_dist_stat_activity SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC; + +-- we have to recreate this view because recreated citus_dist_stat_activity that this view depends +#include "udfs/citus_lock_waits/11.0-1.sql" DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text); DROP FUNCTION pg_catalog.master_get_table_metadata(text); diff --git a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql index 6fbe07ea3..e94ed0bbf 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql @@ -84,3 +84,128 @@ DROP FUNCTION pg_catalog.citus_shards_on_worker(); DROP FUNCTION pg_catalog.citus_shard_indexes_on_worker(); #include "../udfs/create_distributed_function/9.0-1.sql" ALTER TABLE citus.pg_dist_object DROP COLUMN force_delegation; + + +SET search_path = 'pg_catalog'; + + +DROP FUNCTION IF EXISTS get_all_active_transactions(); + + +CREATE OR REPLACE FUNCTION get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$get_all_active_transactions$$; + +COMMENT ON FUNCTION get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz) +IS 'returns distributed transaction ids of active distributed transactions'; + +DROP FUNCTION IF EXISTS get_global_active_transactions(); + +CREATE FUNCTION get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz) + RETURNS SETOF RECORD + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$get_global_active_transactions$$; + COMMENT ON FUNCTION get_global_active_transactions(OUT database_id oid, OUT process_id int, OUT initiator_node_identifier int4, OUT transaction_number int8, OUT transaction_stamp timestamptz) + IS 'returns distributed transaction ids of active distributed transactions from each node of the cluster'; + +RESET search_path; + +DROP FUNCTION pg_catalog.citus_dist_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_dist_stat_activity$$; + +COMMENT ON FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) +IS 'returns distributed transaction activity on distributed tables'; + +CREATE VIEW citus.citus_dist_stat_activity AS +SELECT * FROM pg_catalog.citus_dist_stat_activity(); +ALTER VIEW citus.citus_dist_stat_activity SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC; + +SET search_path = 'pg_catalog'; + +-- we have to recreate this view because we drop citus_dist_stat_activity that this view depends +CREATE VIEW citus.citus_lock_waits AS + +WITH +citus_dist_stat_activity AS +( + SELECT * FROM citus_dist_stat_activity +), +unique_global_wait_edges AS +( + SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() +), +citus_dist_stat_activity_with_node_id AS +( + SELECT + citus_dist_stat_activity.*, (CASE citus_dist_stat_activity.distributed_query_host_name WHEN 'coordinator_host' THEN 0 ELSE pg_dist_node.nodeid END) as initiator_node_id + FROM + citus_dist_stat_activity LEFT JOIN pg_dist_node + ON + citus_dist_stat_activity.distributed_query_host_name = pg_dist_node.nodename AND + citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport +) +SELECT + waiting.pid AS waiting_pid, + blocking.pid AS blocking_pid, + waiting.query AS blocked_statement, + blocking.query AS current_statement_in_blocking_process, + waiting.initiator_node_id AS waiting_node_id, + blocking.initiator_node_id AS blocking_node_id, + waiting.distributed_query_host_name AS waiting_node_name, + blocking.distributed_query_host_name AS blocking_node_name, + waiting.distributed_query_host_port AS waiting_node_port, + blocking.distributed_query_host_port AS blocking_node_port +FROM + unique_global_wait_edges +JOIN + citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id) +JOIN + citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id); + +ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; + +DROP FUNCTION citus_worker_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_worker_stat_activity$$; + +COMMENT ON FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) +IS 'returns distributed transaction activity on shards of distributed tables'; + +CREATE VIEW citus.citus_worker_stat_activity AS +SELECT * FROM pg_catalog.citus_worker_stat_activity(); +ALTER VIEW citus.citus_worker_stat_activity SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_dist_stat_activity/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_dist_stat_activity/11.0-1.sql new file mode 100644 index 000000000..7b38f627d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_dist_stat_activity/11.0-1.sql @@ -0,0 +1,19 @@ +DROP FUNCTION IF EXISTS pg_catalog.citus_dist_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_dist_stat_activity$$; + +COMMENT ON FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +IS 'returns distributed transaction activity on distributed tables'; diff --git a/src/backend/distributed/sql/udfs/citus_dist_stat_activity/latest.sql b/src/backend/distributed/sql/udfs/citus_dist_stat_activity/latest.sql new file mode 100644 index 000000000..7b38f627d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_dist_stat_activity/latest.sql @@ -0,0 +1,19 @@ +DROP FUNCTION IF EXISTS pg_catalog.citus_dist_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_dist_stat_activity$$; + +COMMENT ON FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +IS 'returns distributed transaction activity on distributed tables'; diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql new file mode 100644 index 000000000..2ae40374a --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql @@ -0,0 +1,44 @@ +SET search_path = 'pg_catalog'; + +CREATE VIEW citus.citus_lock_waits AS +WITH +citus_dist_stat_activity AS +( + SELECT * FROM citus_dist_stat_activity +), +unique_global_wait_edges AS +( + SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() +), +citus_dist_stat_activity_with_node_id AS +( + SELECT + citus_dist_stat_activity.*, (CASE citus_dist_stat_activity.distributed_query_host_name WHEN 'coordinator_host' THEN 0 ELSE pg_dist_node.nodeid END) as initiator_node_id + FROM + citus_dist_stat_activity LEFT JOIN pg_dist_node + ON + citus_dist_stat_activity.distributed_query_host_name = pg_dist_node.nodename AND + citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport +) +SELECT + waiting.pid AS waiting_pid, + blocking.pid AS blocking_pid, + waiting.query AS blocked_statement, + blocking.query AS current_statement_in_blocking_process, + waiting.initiator_node_id AS waiting_node_id, + blocking.initiator_node_id AS blocking_node_id, + waiting.distributed_query_host_name AS waiting_node_name, + blocking.distributed_query_host_name AS blocking_node_name, + waiting.distributed_query_host_port AS waiting_node_port, + blocking.distributed_query_host_port AS blocking_node_port +FROM + unique_global_wait_edges +JOIN + citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id) +JOIN + citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id); + +ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql new file mode 100644 index 000000000..2ae40374a --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql @@ -0,0 +1,44 @@ +SET search_path = 'pg_catalog'; + +CREATE VIEW citus.citus_lock_waits AS +WITH +citus_dist_stat_activity AS +( + SELECT * FROM citus_dist_stat_activity +), +unique_global_wait_edges AS +( + SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() +), +citus_dist_stat_activity_with_node_id AS +( + SELECT + citus_dist_stat_activity.*, (CASE citus_dist_stat_activity.distributed_query_host_name WHEN 'coordinator_host' THEN 0 ELSE pg_dist_node.nodeid END) as initiator_node_id + FROM + citus_dist_stat_activity LEFT JOIN pg_dist_node + ON + citus_dist_stat_activity.distributed_query_host_name = pg_dist_node.nodename AND + citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport +) +SELECT + waiting.pid AS waiting_pid, + blocking.pid AS blocking_pid, + waiting.query AS blocked_statement, + blocking.query AS current_statement_in_blocking_process, + waiting.initiator_node_id AS waiting_node_id, + blocking.initiator_node_id AS blocking_node_id, + waiting.distributed_query_host_name AS waiting_node_name, + blocking.distributed_query_host_name AS blocking_node_name, + waiting.distributed_query_host_port AS waiting_node_port, + blocking.distributed_query_host_port AS blocking_node_port +FROM + unique_global_wait_edges +JOIN + citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id) +JOIN + citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id); + +ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_worker_stat_activity/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_worker_stat_activity/11.0-1.sql new file mode 100644 index 000000000..6f585b2e8 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_worker_stat_activity/11.0-1.sql @@ -0,0 +1,19 @@ +DROP FUNCTION IF EXISTS pg_catalog.citus_worker_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_worker_stat_activity$$; + +COMMENT ON FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +IS 'returns distributed transaction activity on shards of distributed tables'; diff --git a/src/backend/distributed/sql/udfs/citus_worker_stat_activity/latest.sql b/src/backend/distributed/sql/udfs/citus_worker_stat_activity/latest.sql new file mode 100644 index 000000000..6f585b2e8 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_worker_stat_activity/latest.sql @@ -0,0 +1,19 @@ +DROP FUNCTION IF EXISTS pg_catalog.citus_worker_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_worker_stat_activity$$; + +COMMENT ON FUNCTION pg_catalog.citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text, OUT global_pid int8) +IS 'returns distributed transaction activity on shards of distributed tables'; diff --git a/src/backend/distributed/sql/udfs/get_all_active_transactions/11.0-1.sql b/src/backend/distributed/sql/udfs/get_all_active_transactions/11.0-1.sql new file mode 100644 index 000000000..636abb0dd --- /dev/null +++ b/src/backend/distributed/sql/udfs/get_all_active_transactions/11.0-1.sql @@ -0,0 +1,12 @@ +DROP FUNCTION IF EXISTS pg_catalog.get_all_active_transactions(); +CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$get_all_active_transactions$$; + +COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8) +IS 'returns transaction information for all Citus initiated transactions'; diff --git a/src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql b/src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql new file mode 100644 index 000000000..636abb0dd --- /dev/null +++ b/src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql @@ -0,0 +1,12 @@ +DROP FUNCTION IF EXISTS pg_catalog.get_all_active_transactions(); +CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$get_all_active_transactions$$; + +COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8) +IS 'returns transaction information for all Citus initiated transactions'; diff --git a/src/backend/distributed/sql/udfs/get_global_active_transactions/11.0-1.sql b/src/backend/distributed/sql/udfs/get_global_active_transactions/11.0-1.sql new file mode 100644 index 000000000..c0831b521 --- /dev/null +++ b/src/backend/distributed/sql/udfs/get_global_active_transactions/11.0-1.sql @@ -0,0 +1,9 @@ +DROP FUNCTION IF EXISTS pg_catalog.get_global_active_transactions(); +CREATE OR REPLACE FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8) + RETURNS SETOF RECORD + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$get_global_active_transactions$$; +COMMENT ON FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8) + IS 'returns transaction information for all Citus initiated transactions from each node of the cluster'; diff --git a/src/backend/distributed/sql/udfs/get_global_active_transactions/latest.sql b/src/backend/distributed/sql/udfs/get_global_active_transactions/latest.sql new file mode 100644 index 000000000..c0831b521 --- /dev/null +++ b/src/backend/distributed/sql/udfs/get_global_active_transactions/latest.sql @@ -0,0 +1,9 @@ +DROP FUNCTION IF EXISTS pg_catalog.get_global_active_transactions(); +CREATE OR REPLACE FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8) + RETURNS SETOF RECORD + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$get_global_active_transactions$$; +COMMENT ON FUNCTION pg_catalog.get_global_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8) + IS 'returns transaction information for all Citus initiated transactions from each node of the cluster'; diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index c76a80460..4e037e30c 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -15,6 +15,9 @@ #include "distributed/pg_version_constants.h" #include "miscadmin.h" +#include "unistd.h" + +#include "safe_lib.h" #include "funcapi.h" #include "access/htup_details.h" @@ -43,7 +46,7 @@ #define GET_ACTIVE_TRANSACTION_QUERY "SELECT * FROM get_all_active_transactions();" -#define ACTIVE_TRANSACTION_COLUMN_COUNT 6 +#define ACTIVE_TRANSACTION_COLUMN_COUNT 7 /* * Each backend's data reside in the shared memory @@ -78,6 +81,7 @@ typedef struct BackendManagementShmemData static void StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescriptor); +static uint64 GenerateGlobalPID(void); static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static BackendManagementShmemData *backendManagementShmemData = NULL; @@ -86,6 +90,7 @@ static BackendData *MyBackendData = NULL; static void BackendManagementShmemInit(void); static size_t BackendManagementShmemSize(void); +static void UnSetGlobalPID(void); PG_FUNCTION_INFO_V1(assign_distributed_transaction_id); @@ -315,6 +320,7 @@ get_global_active_transactions(PG_FUNCTION_ARGS) values[3] = ParseBoolField(result, rowIndex, 3); values[4] = ParseIntField(result, rowIndex, 4); values[5] = ParseTimestampTzField(result, rowIndex, 5); + values[6] = ParseIntField(result, rowIndex, 6); tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls); } @@ -384,8 +390,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto SpinLockAcquire(¤tBackend->mutex); - /* we're only interested in backends initiated by Citus */ - if (currentBackend->citusBackend.initiatorNodeIdentifier < 0) + if (currentBackend->globalPID == INVALID_CITUS_INTERNAL_BACKEND_GPID) { SpinLockRelease(¤tBackend->mutex); continue; @@ -427,6 +432,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto values[3] = !coordinatorOriginatedQuery; values[4] = UInt64GetDatum(transactionNumber); values[5] = TimestampTzGetDatum(transactionIdTimestamp); + values[6] = UInt64GetDatum(currentBackend->globalPID); tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls); @@ -631,6 +637,7 @@ InitializeBackendData(void) /* zero out the backend data */ UnSetDistributedTransactionId(); + UnSetGlobalPID(); UnlockBackendSharedMemory(); } @@ -664,6 +671,24 @@ UnSetDistributedTransactionId(void) } +/* + * UnSetGlobalPID resets the global pid for the current backend. + */ +static void +UnSetGlobalPID(void) +{ + /* backend does not exist if the extension is not created */ + if (MyBackendData) + { + SpinLockAcquire(&MyBackendData->mutex); + + MyBackendData->globalPID = 0; + + SpinLockRelease(&MyBackendData->mutex); + } +} + + /* * LockBackendSharedMemory is a simple wrapper around LWLockAcquire on the * shared memory lock. @@ -780,6 +805,109 @@ MarkCitusInitiatedCoordinatorBackend(void) } +/* + * AssignGlobalPID assigns a global process id for the current backend. + * If this is a Citus initiated backend, which means it is distributed part of a distributed + * query, then this function assigns the global pid extracted from the application name. + * If not, this function assigns a new generated global pid. + */ +void +AssignGlobalPID(void) +{ + uint64 globalPID = INVALID_CITUS_INTERNAL_BACKEND_GPID; + + if (!IsCitusInternalBackend()) + { + globalPID = GenerateGlobalPID(); + } + else + { + globalPID = ExtractGlobalPID(application_name); + } + + SpinLockAcquire(&MyBackendData->mutex); + MyBackendData->globalPID = globalPID; + SpinLockRelease(&MyBackendData->mutex); +} + + +/* + * GetGlobalPID returns the global process id of the current backend. + */ +uint64 +GetGlobalPID(void) +{ + uint64 globalPID = INVALID_CITUS_INTERNAL_BACKEND_GPID; + + if (MyBackendData) + { + SpinLockAcquire(&MyBackendData->mutex); + globalPID = MyBackendData->globalPID; + SpinLockRelease(&MyBackendData->mutex); + } + + return globalPID; +} + + +/* + * GenerateGlobalPID generates the global process id for the current backend. + */ +static uint64 +GenerateGlobalPID(void) +{ + /* + * We try to create a human readable global pid that consists of node id and process id. + * By multiplying node id with 10^10 and adding pid we generate a number where the smallest + * 10 digit represent the pid and the remaining digits are the node id. + * + * Both node id and pid are 32 bit. We use 10^10 to fit all possible pids. Some very large + * node ids might cause overflow. But even for the applications that scale around 50 nodes every + * day it'd take about 100K years. So we are not worried. + */ + return (((uint64) GetLocalNodeId()) * 10000000000) + getpid(); +} + + +/* + * ExtractGlobalPID extracts the global process id from the application name and returns it + * if the application name is not compatible with Citus' application names returns 0. + */ +uint64 +ExtractGlobalPID(char *applicationName) +{ + /* does application name exist */ + if (!applicationName) + { + return INVALID_CITUS_INTERNAL_BACKEND_GPID; + } + + /* we create our own copy of application name incase the original changes */ + char *applicationNameCopy = pstrdup(applicationName); + + uint64 prefixLength = strlen(CITUS_APPLICATION_NAME_PREFIX); + + /* does application name start with Citus's application name prefix */ + if (strncmp(applicationNameCopy, CITUS_APPLICATION_NAME_PREFIX, prefixLength) != 0) + { + return INVALID_CITUS_INTERNAL_BACKEND_GPID; + } + + /* are the remaining characters of the application name numbers */ + uint64 numberOfRemainingChars = strlen(applicationNameCopy) - prefixLength; + if (numberOfRemainingChars <= 0 || + !strisdigit_s(applicationNameCopy + prefixLength, numberOfRemainingChars)) + { + return INVALID_CITUS_INTERNAL_BACKEND_GPID; + } + + char *globalPIDString = &applicationNameCopy[prefixLength]; + uint64 globalPID = strtoul(globalPIDString, NULL, 10); + + return globalPID; +} + + /* * CurrentDistributedTransactionNumber returns the transaction number of the * current distributed transaction. The caller must make sure a distributed diff --git a/src/backend/distributed/transaction/citus_dist_stat_activity.c b/src/backend/distributed/transaction/citus_dist_stat_activity.c index d85959925..04014693e 100644 --- a/src/backend/distributed/transaction/citus_dist_stat_activity.c +++ b/src/backend/distributed/transaction/citus_dist_stat_activity.c @@ -108,7 +108,7 @@ * showing the initiator_node_id we expand it to initiator_node_host and * initiator_node_port. */ -#define CITUS_DIST_STAT_ACTIVITY_QUERY_COLS 23 +#define CITUS_DIST_STAT_ACTIVITY_QUERY_COLS 24 #define CITUS_DIST_STAT_ADDITIONAL_COLS 3 #define CITUS_DIST_STAT_ACTIVITY_COLS \ CITUS_DIST_STAT_ACTIVITY_QUERY_COLS + CITUS_DIST_STAT_ADDITIONAL_COLS @@ -147,11 +147,12 @@ SELECT \ pg_stat_activity.backend_xid, \ pg_stat_activity.backend_xmin, \ pg_stat_activity.query, \ - pg_stat_activity.backend_type \ + pg_stat_activity.backend_type, \ + dist_txs.global_pid \ FROM \ pg_stat_activity \ INNER JOIN \ - get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ + get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_pid) \ ON pg_stat_activity.pid = dist_txs.process_id \ WHERE \ dist_txs.worker_query = false;" @@ -181,14 +182,15 @@ SELECT \ pg_stat_activity.backend_xid, \ pg_stat_activity.backend_xmin, \ pg_stat_activity.query, \ - pg_stat_activity.backend_type \ + pg_stat_activity.backend_type, \ + dist_txs.global_id \ FROM \ pg_stat_activity \ LEFT JOIN \ - get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp) \ + get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_id) \ ON pg_stat_activity.pid = dist_txs.process_id \ WHERE \ - pg_stat_activity.application_name = 'citus_internal' \ + pg_stat_activity.application_name SIMILAR TO 'citus_internal gpid=\\d+' \ AND \ pg_stat_activity.query NOT ILIKE '%stat_activity%';" @@ -223,6 +225,7 @@ typedef struct CitusDistStat TransactionId backend_xmin; text *query; text *backend_type; + uint64 global_pid; } CitusDistStat; @@ -501,6 +504,7 @@ ParseCitusDistStat(PGresult *result, int64 rowIndex) citusDistStat->backend_xmin = ParseXIDField(result, rowIndex, 20); citusDistStat->query = ParseTextField(result, rowIndex, 21); citusDistStat->backend_type = ParseTextField(result, rowIndex, 22); + citusDistStat->global_pid = ParseIntField(result, rowIndex, 23); return citusDistStat; } @@ -688,6 +692,7 @@ HeapTupleToCitusDistStat(HeapTuple result, TupleDesc rowDescriptor) citusDistStat->backend_xmin = ParseXIDFieldFromHeapTuple(result, rowDescriptor, 21); citusDistStat->query = ParseTextFieldFromHeapTuple(result, rowDescriptor, 22); citusDistStat->backend_type = ParseTextFieldFromHeapTuple(result, rowDescriptor, 23); + citusDistStat->global_pid = ParseIntFieldFromHeapTuple(result, rowDescriptor, 24); return citusDistStat; } @@ -1098,6 +1103,8 @@ ReturnCitusDistStats(List *citusStatsList, FunctionCallInfo fcinfo) nulls[25] = true; } + values[26] = Int32GetDatum(citusDistStat->global_pid); + tuplestore_putvalues(tupleStore, tupleDesc, values, nulls); } } diff --git a/src/include/distributed/backend_data.h b/src/include/distributed/backend_data.h index c68bfcf16..bf9a57fd1 100644 --- a/src/include/distributed/backend_data.h +++ b/src/include/distributed/backend_data.h @@ -50,6 +50,7 @@ typedef struct BackendData Oid userId; slock_t mutex; bool cancelledDueToDeadlock; + uint64 globalPID; CitusInitiatedBackend citusBackend; DistributedTransactionId transactionId; } BackendData; @@ -63,6 +64,9 @@ extern void UnlockBackendSharedMemory(void); extern void UnSetDistributedTransactionId(void); extern void AssignDistributedTransactionId(void); extern void MarkCitusInitiatedCoordinatorBackend(void); +extern void AssignGlobalPID(void); +extern uint64 GetGlobalPID(void); +extern uint64 ExtractGlobalPID(char *applicationName); extern void GetBackendDataForProc(PGPROC *proc, BackendData *result); extern void CancelTransactionDueToDeadlock(PGPROC *proc); extern bool MyBackendGotCancelledDueToDeadlock(bool clearState); @@ -73,4 +77,6 @@ extern int GetAllActiveClientBackendCount(void); extern void IncrementClientBackendCounter(void); extern void DecrementClientBackendCounter(void); +#define INVALID_CITUS_INTERNAL_BACKEND_GPID 0 + #endif /* BACKEND_DATA_H */ diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index 721617474..ad575cfe5 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -29,7 +29,7 @@ #define ERROR_BUFFER_SIZE 256 /* application name used for internal connections in Citus */ -#define CITUS_APPLICATION_NAME "citus_internal" +#define CITUS_APPLICATION_NAME_PREFIX "citus_internal gpid=" /* application name used for internal connections in rebalancer */ #define CITUS_REBALANCER_NAME "citus_rebalancer" diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h index 6dac101af..46ba72a49 100644 --- a/src/include/distributed/metadata_cache.h +++ b/src/include/distributed/metadata_cache.h @@ -165,6 +165,7 @@ extern CitusTableCacheEntry * LookupCitusTableCacheEntry(Oid relationId); extern DistObjectCacheEntry * LookupDistObjectCacheEntry(Oid classid, Oid objid, int32 objsubid); extern int32 GetLocalGroupId(void); +extern int32 GetLocalNodeId(void); extern void CitusTableCacheFlushInvalidatedEntries(void); extern Oid LookupShardRelationFromCatalog(int64 shardId, bool missing_ok); extern List * ShardPlacementListIncludingOrphanedPlacements(uint64 shardId); diff --git a/src/test/regress/expected/failure_connection_establishment.out b/src/test/regress/expected/failure_connection_establishment.out index 6284107d2..9c44269a3 100644 --- a/src/test/regress/expected/failure_connection_establishment.out +++ b/src/test/regress/expected/failure_connection_establishment.out @@ -225,8 +225,8 @@ SELECT count(*) FROM single_replicatated WHERE key = 100; RESET client_min_messages; -- verify get_global_active_transactions works when a timeout happens on a connection -SELECT get_global_active_transactions(); - get_global_active_transactions +SELECT * FROM get_global_active_transactions() WHERE transaction_number != 0; + datid | process_id | initiator_node_identifier | worker_query | transaction_number | transaction_stamp | global_pid --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/isolation_distributed_transaction_id.out b/src/test/regress/expected/isolation_distributed_transaction_id.out index 1e41f8957..3d44f0069 100644 --- a/src/test/regress/expected/isolation_distributed_transaction_id.out +++ b/src/test/regress/expected/isolation_distributed_transaction_id.out @@ -94,7 +94,8 @@ step s1-verify-current-xact-is-on-worker: get_current_transaction_id() as xact, run_command_on_workers($$ SELECT row(initiator_node_identifier, transaction_number) - FROM get_all_active_transactions(); + FROM get_all_active_transactions() + WHERE transaction_number != 0; $$) as remote ORDER BY remote.nodeport ASC; diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index 9d94470ec..d5c4765b8 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -35,8 +35,8 @@ step s2-begin-insert: step s3-as-admin: -- Admin should be able to see all transactions - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- @@ -51,8 +51,8 @@ count step s3-as-user-1: -- User should only be able to see its own transactions SET ROLE test_user_1; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- @@ -67,8 +67,8 @@ count step s3-as-readonly: -- Other user should not see transactions SET ROLE test_readonly; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- @@ -83,8 +83,8 @@ count step s3-as-monitor: -- Monitor should see all transactions SET ROLE test_monitor; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_global_pid.out b/src/test/regress/expected/isolation_global_pid.out new file mode 100644 index 000000000..19e055079 --- /dev/null +++ b/src/test/regress/expected/isolation_global_pid.out @@ -0,0 +1,145 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-start-session-level-connection s1-worker-begin s1-worker-select s2-coordinator-citus_dist_stat_activity s2-coordinator-citus_worker_stat_activity s1-worker-commit s1-stop-session-level-connection +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-worker-begin: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-worker-select: + SELECT run_commands_on_session_level_connection_to_node('SET citus.enable_local_execution TO off; SET citus.force_max_query_parallelization TO ON; SELECT * FROM dist_table'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-coordinator-citus_dist_stat_activity: + SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%'; + +?column? +--------------------------------------------------------------------- +t +(1 row) + +step s2-coordinator-citus_worker_stat_activity: + SELECT query FROM citus_worker_stat_activity() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ) + ORDER BY 1; + +query +--------------------------------------------------------------------- +SELECT a, b FROM public.dist_table_12345000 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345001 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345002 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345003 dist_table WHERE true +(4 rows) + +step s1-worker-commit: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-stop-session-level-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-coordinator-begin s1-coordinator-select s2-coordinator-citus_dist_stat_activity s2-coordinator-citus_worker_stat_activity s2-coordinator-get_all_active_transactions s2-coordinator-get_global_active_transactions s1-coordinator-commit +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-coordinator-begin: + BEGIN; + +step s1-coordinator-select: + SET citus.enable_local_execution TO off; + SET citus.force_max_query_parallelization TO ON; + SELECT * FROM dist_table; + +a|b +--------------------------------------------------------------------- +(0 rows) + +step s2-coordinator-citus_dist_stat_activity: + SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%'; + +?column? +--------------------------------------------------------------------- +t +(1 row) + +step s2-coordinator-citus_worker_stat_activity: + SELECT query FROM citus_worker_stat_activity() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ) + ORDER BY 1; + +query +--------------------------------------------------------------------- +SELECT a, b FROM public.dist_table_12345000 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345001 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345002 dist_table WHERE true +SELECT a, b FROM public.dist_table_12345003 dist_table WHERE true +(4 rows) + +step s2-coordinator-get_all_active_transactions: + SELECT count(*) FROM get_all_active_transactions() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ); + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-coordinator-get_global_active_transactions: + SELECT count(*) FROM get_global_active_transactions() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ) + AND transaction_number != 0; + +count +--------------------------------------------------------------------- + 5 +(1 row) + +step s1-coordinator-commit: + COMMIT; + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index d28a6b714..04e427ce6 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -136,8 +136,8 @@ step s1-update-ref-table: step s2-active-transactions: -- Admin should be able to see all transactions - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index cb3b113e2..26d300e84 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -36,7 +36,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; @@ -73,7 +73,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: must be owner of table test ROLLBACK; @@ -85,7 +85,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); ERROR: must be owner of table test ROLLBACK; @@ -99,7 +99,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -112,6 +112,54 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; 1 (1 row) +ROLLBACK; +-- application_name with incorrect gpid +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + assign_distributed_transaction_id +--------------------------------------------------------------------- + +(1 row) + + SET application_name to 'citus_internal gpid=not a correct gpid'; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ERROR: This is an internal Citus function can only be used in a distributed transaction +ROLLBACK; +-- application_name with empty gpid +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + assign_distributed_transaction_id +--------------------------------------------------------------------- + +(1 row) + + SET application_name to 'citus_internal gpid='; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ERROR: This is an internal Citus function can only be used in a distributed transaction +ROLLBACK; +-- empty application_name +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + assign_distributed_transaction_id +--------------------------------------------------------------------- + +(1 row) + + SET application_name to ''; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ERROR: This is an internal Citus function can only be used in a distributed transaction +ROLLBACK; +-- application_name with incorrect prefix +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + assign_distributed_transaction_id +--------------------------------------------------------------------- + +(1 row) + + SET application_name to 'citus gpid=10000000001'; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- fails because there is no X distribution method BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -121,7 +169,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ERROR: Metadata syncing is only allowed for hash, reference and local tables:X ROLLBACK; @@ -133,7 +181,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); ERROR: column "non_existing_col" of relation "test_2" does not exist ROLLBACK; @@ -145,7 +193,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); ERROR: relation cannot be NULL ROLLBACK; @@ -157,7 +205,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); ERROR: Metadata syncing is only allowed for valid colocation id values. ROLLBACK; @@ -169,7 +217,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); ERROR: Metadata syncing is only allowed for hash, reference and local tables:X ROLLBACK; @@ -181,7 +229,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata @@ -200,7 +248,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata @@ -219,7 +267,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); ERROR: Distribution column cannot be NULL for relation "test_2" @@ -252,7 +300,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -268,7 +316,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); ERROR: could not find valid entry for shard xxxxx @@ -298,7 +346,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ERROR: role "non_existing_user" does not exist ROLLBACK; @@ -329,7 +377,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); ERROR: Reference or local tables cannot have distribution columns ROLLBACK; @@ -341,7 +389,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); ERROR: Metadata syncing is only allowed for known replication models. ROLLBACK; @@ -353,7 +401,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); ERROR: Local or references tables can only have 's' or 't' as the replication model. ROLLBACK; @@ -368,7 +416,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -387,7 +435,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -402,7 +450,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -417,7 +465,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); citus_internal_add_partition_metadata --------------------------------------------------------------------- @@ -445,7 +493,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); citus_internal_update_relation_colocation --------------------------------------------------------------------- @@ -461,7 +509,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -476,7 +524,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -491,7 +539,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) @@ -506,7 +554,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) @@ -521,7 +569,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) @@ -536,7 +584,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), @@ -554,7 +602,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -569,7 +617,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false)) @@ -583,7 +631,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false)) @@ -598,7 +646,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -614,7 +662,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) @@ -635,7 +683,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE TABLE publication_test_table(id int); CREATE PUBLICATION publication_test FOR TABLE publication_test_table; @@ -653,7 +701,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE FUNCTION distribution_test_function(int) RETURNS int AS $$ SELECT $1 $$ @@ -671,7 +719,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE TYPE distributed_test_type AS (a int, b int); SET ROLE metadata_sync_helper_role; @@ -690,7 +738,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse UPDATE pg_dist_partition SET partmethod = 'X'; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) @@ -709,7 +757,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) @@ -736,7 +784,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '11'::text, '20'::text), @@ -767,7 +815,7 @@ BEGIN; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; @@ -779,7 +827,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_3'::regclass, 1420009::bigint, 't'::"char", '21'::text, '30'::text), @@ -806,7 +854,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) @@ -821,7 +869,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), @@ -837,7 +885,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) @@ -858,7 +906,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) @@ -880,7 +928,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) @@ -895,7 +943,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, 1::int, -10)) @@ -910,7 +958,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1430100, 1, 0::bigint, 1::int, 10)) @@ -925,7 +973,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 10, 0::bigint, 1::int, 1500000)) @@ -940,7 +988,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES ( 1420000, 1, 0::bigint, 123123123::int, 1500000)) @@ -968,7 +1016,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -984,7 +1032,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420007, 1, 0::bigint, get_node_id(), 1500000)) @@ -999,7 +1047,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -1040,7 +1088,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); citus_internal_update_relation_colocation --------------------------------------------------------------------- @@ -1057,7 +1105,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); ERROR: Node with group id 1014 for shard placement xxxxx does not exist @@ -1070,7 +1118,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); ERROR: Active placement for shard xxxxx is not found on group:14 @@ -1083,7 +1131,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 0 @@ -1096,7 +1144,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 213123123123 @@ -1109,7 +1157,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); ERROR: must be owner of table super_user_table @@ -1122,7 +1170,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -1131,7 +1179,7 @@ ERROR: must be owner of table super_user_table ROLLBACK; -- the user only allowed to delete shards in a distributed transaction BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -1146,7 +1194,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) @@ -1173,7 +1221,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) @@ -1207,7 +1255,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the repmodel -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' @@ -1222,7 +1270,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the vartype of table from int to bigint -- so that making two tables colocated fails UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}' @@ -1237,7 +1285,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' @@ -1252,7 +1300,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' @@ -1270,7 +1318,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); citus_internal_add_partition_metadata @@ -1293,7 +1341,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); citus_internal_add_partition_metadata diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index f0700c734..5df2e9a6d 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -63,6 +63,7 @@ test: shared_connection_waits test: isolation_cancellation test: isolation_undistribute_table test: isolation_fix_partition_shard_index_names +test: isolation_global_pid # Rebalancer test: isolation_blocking_move_single_shard_commands diff --git a/src/test/regress/spec/isolation_distributed_transaction_id.spec b/src/test/regress/spec/isolation_distributed_transaction_id.spec index 372cd8f78..f928918ed 100644 --- a/src/test/regress/spec/isolation_distributed_transaction_id.spec +++ b/src/test/regress/spec/isolation_distributed_transaction_id.spec @@ -54,7 +54,8 @@ step "s1-verify-current-xact-is-on-worker" get_current_transaction_id() as xact, run_command_on_workers($$ SELECT row(initiator_node_identifier, transaction_number) - FROM get_all_active_transactions(); + FROM get_all_active_transactions() + WHERE transaction_number != 0; $$) as remote ORDER BY remote.nodeport ASC; } diff --git a/src/test/regress/spec/isolation_get_all_active_transactions.spec b/src/test/regress/spec/isolation_get_all_active_transactions.spec index da0c4553c..fd69c0ac4 100644 --- a/src/test/regress/spec/isolation_get_all_active_transactions.spec +++ b/src/test/regress/spec/isolation_get_all_active_transactions.spec @@ -71,32 +71,32 @@ session "s3" step "s3-as-admin" { -- Admin should be able to see all transactions - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } step "s3-as-user-1" { -- User should only be able to see its own transactions SET ROLE test_user_1; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } step "s3-as-readonly" { -- Other user should not see transactions SET ROLE test_readonly; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } step "s3-as-monitor" { -- Monitor should see all transactions SET ROLE test_monitor; - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } permutation "s1-grant" "s1-begin-insert" "s2-begin-insert" "s3-as-admin" "s3-as-user-1" "s3-as-readonly" "s3-as-monitor" "s1-commit" "s2-commit" diff --git a/src/test/regress/spec/isolation_global_pid.spec b/src/test/regress/spec/isolation_global_pid.spec new file mode 100644 index 000000000..4306d2640 --- /dev/null +++ b/src/test/regress/spec/isolation_global_pid.spec @@ -0,0 +1,96 @@ +#include "isolation_mx_common.include.spec" + +setup +{ + SET citus.next_shard_id TO 12345000; + CREATE TABLE dist_table (a INT, b INT); + SELECT create_distributed_table('dist_table', 'a', shard_count:=4); +} + +teardown +{ + DROP TABLE dist_table; + SELECT citus_internal.restore_isolation_tester_func(); +} + +session "s1" + +step "s1-coordinator-begin" +{ + BEGIN; +} + +step "s1-coordinator-select" +{ + SET citus.enable_local_execution TO off; + SET citus.force_max_query_parallelization TO ON; + SELECT * FROM dist_table; +} + +step "s1-coordinator-commit" +{ + COMMIT; +} + +step "s1-start-session-level-connection" +{ + + SELECT start_session_level_connection_to_node('localhost', 57637); +} + +step "s1-worker-begin" +{ + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); +} + +step "s1-worker-select" +{ + SELECT run_commands_on_session_level_connection_to_node('SET citus.enable_local_execution TO off; SET citus.force_max_query_parallelization TO ON; SELECT * FROM dist_table'); +} + +step "s1-worker-commit" +{ + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); +} + +step "s1-stop-session-level-connection" +{ + SELECT stop_session_level_connection_to_node(); +} + +session "s2" + +step "s2-coordinator-citus_dist_stat_activity" +{ + SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%'; +} + +step "s2-coordinator-citus_worker_stat_activity" +{ + SELECT query FROM citus_worker_stat_activity() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ) + ORDER BY 1; +} + +step "s2-coordinator-get_all_active_transactions" +{ + SELECT count(*) FROM get_all_active_transactions() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ); +} + +step "s2-coordinator-get_global_active_transactions" +{ + SELECT count(*) FROM get_global_active_transactions() WHERE global_pid IN ( + SELECT global_pid FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' + ) + AND transaction_number != 0; +} + + +// worker - coordinator +permutation "s1-start-session-level-connection" "s1-worker-begin" "s1-worker-select" "s2-coordinator-citus_dist_stat_activity" "s2-coordinator-citus_worker_stat_activity" "s1-worker-commit" "s1-stop-session-level-connection" + +// coordinator - coordinator +permutation "s1-coordinator-begin" "s1-coordinator-select" "s2-coordinator-citus_dist_stat_activity" "s2-coordinator-citus_worker_stat_activity" "s2-coordinator-get_all_active_transactions" "s2-coordinator-get_global_active_transactions" "s1-coordinator-commit" diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index 752f26399..12f11bef0 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -106,8 +106,8 @@ step "s2-sleep" step "s2-active-transactions" { -- Admin should be able to see all transactions - SELECT count(*) FROM get_all_active_transactions(); - SELECT count(*) FROM get_global_active_transactions(); + SELECT count(*) FROM get_all_active_transactions() WHERE transaction_number != 0; + SELECT count(*) FROM get_global_active_transactions() WHERE transaction_number != 0; } // we disable the daemon during the regression tests in order to get consistent results diff --git a/src/test/regress/sql/failure_connection_establishment.sql b/src/test/regress/sql/failure_connection_establishment.sql index 43cb97b86..5029d40b7 100644 --- a/src/test/regress/sql/failure_connection_establishment.sql +++ b/src/test/regress/sql/failure_connection_establishment.sql @@ -124,7 +124,7 @@ SELECT count(*) FROM single_replicatated WHERE key = 100; RESET client_min_messages; -- verify get_global_active_transactions works when a timeout happens on a connection -SELECT get_global_active_transactions(); +SELECT * FROM get_global_active_transactions() WHERE transaction_number != 0; -- tests for connectivity checks SET client_min_messages TO ERROR; diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index 22e337443..8e7f13ce5 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -28,7 +28,7 @@ ROLLBACK; -- but we are on the coordinator, so still not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; @@ -67,14 +67,14 @@ SET search_path TO metadata_sync_helpers; -- owner of the table test BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- we do not own the relation BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); ROLLBACK; @@ -83,50 +83,78 @@ CREATE TABLE test_2(col_1 int, col_2 int); CREATE TABLE test_3(col_1 int, col_2 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT count(*) FROM pg_dist_partition WHERE logicalrelid = 'metadata_sync_helpers.test_2'::regclass; ROLLBACK; +-- application_name with incorrect gpid +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SET application_name to 'citus_internal gpid=not a correct gpid'; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ROLLBACK; + +-- application_name with empty gpid +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SET application_name to 'citus_internal gpid='; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ROLLBACK; + +-- empty application_name +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SET application_name to ''; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ROLLBACK; + +-- application_name with incorrect prefix +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SET application_name to 'citus gpid=10000000001'; + SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); +ROLLBACK; + -- fails because there is no X distribution method BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; -- fails because there is the column does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); ROLLBACK; --- fails because we do not allow NULL parameters BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); ROLLBACK; -- fails because colocationId cannot be negative BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); ROLLBACK; -- fails because there is no X replication model BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); @@ -135,7 +163,7 @@ ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key even if distribution key changes BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); @@ -144,7 +172,7 @@ ROLLBACK; -- hash distributed table cannot have NULL distribution key BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); ROLLBACK; @@ -165,14 +193,14 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; -- should throw error even if we skip the checks, there are no such nodes BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); ROLLBACK; @@ -189,7 +217,7 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; @@ -207,21 +235,21 @@ SET search_path TO metadata_sync_helpers; CREATE TABLE test_ref(col_1 int, col_2 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); ROLLBACK; -- non-valid replication model BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); ROLLBACK; -- not-matching replication model for reference table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); ROLLBACK; @@ -231,7 +259,7 @@ SET search_path TO metadata_sync_helpers; CREATE TABLE super_user_table(col_1 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); COMMIT; @@ -244,7 +272,7 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -254,7 +282,7 @@ ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -264,7 +292,7 @@ ROLLBACK; -- ok, now add the table to the pg_dist_partition BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); SELECT citus_internal_add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); @@ -273,14 +301,14 @@ COMMIT; -- we can update to a non-existing colocation group (e.g., colocate_with:=none) BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); ROLLBACK; -- invalid shard ids are not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -290,7 +318,7 @@ ROLLBACK; -- invalid storage types are not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) @@ -300,7 +328,7 @@ ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) @@ -310,7 +338,7 @@ ROLLBACK; -- non-integer shard ranges are not allowed BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) @@ -320,7 +348,7 @@ ROLLBACK; -- shardMinValue should be smaller than shardMaxValue BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) @@ -330,7 +358,7 @@ ROLLBACK; -- we do not allow overlapping shards for the same table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), @@ -344,7 +372,7 @@ ROLLBACK; -- check with non-existing object type BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -354,7 +382,7 @@ ROLLBACK; -- check the sanity of distributionArgumentIndex and colocationId BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false)) @@ -363,7 +391,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false)) @@ -373,7 +401,7 @@ ROLLBACK; -- check with non-existing object BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) @@ -384,7 +412,7 @@ ROLLBACK; -- if any parameter is NULL BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) @@ -397,7 +425,7 @@ ROLLBACK; -- which is known how to distribute BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE TABLE publication_test_table(id int); @@ -412,7 +440,7 @@ ROLLBACK; -- Show that citus_internal_add_object_metadata checks the priviliges BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE FUNCTION distribution_test_function(int) RETURNS int @@ -427,7 +455,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse CREATE TYPE distributed_test_type AS (a int, b int); @@ -443,7 +471,7 @@ ROLLBACK; SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse UPDATE pg_dist_partition SET partmethod = 'X'; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) @@ -457,7 +485,7 @@ ROLLBACK; SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) @@ -475,7 +503,7 @@ SET search_path TO metadata_sync_helpers; -- now, add few shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '11'::text, '20'::text), @@ -491,14 +519,14 @@ COMMIT; -- we cannot mark these two tables colocated because they are not colocated BEGIN; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_3'::regclass, 1420009::bigint, 't'::"char", '21'::text, '30'::text), @@ -512,7 +540,7 @@ COMMIT; -- shardMin/MaxValues should be NULL for reference tables BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) @@ -522,7 +550,7 @@ ROLLBACK; -- reference tables cannot have multiple shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), @@ -533,7 +561,7 @@ ROLLBACK; -- finally, add a shard for reference tables BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) @@ -546,7 +574,7 @@ SET search_path TO metadata_sync_helpers; -- and a shard for the superuser table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) @@ -561,7 +589,7 @@ SET search_path TO metadata_sync_helpers; -- shard does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) @@ -571,7 +599,7 @@ ROLLBACK; -- invalid placementid BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, 1::int, -10)) @@ -581,7 +609,7 @@ ROLLBACK; -- non-existing shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1430100, 1, 0::bigint, 1::int, 10)) @@ -591,7 +619,7 @@ ROLLBACK; -- invalid shard state BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 10, 0::bigint, 1::int, 1500000)) @@ -601,7 +629,7 @@ ROLLBACK; -- non-existing node with non-existing node-id 123123123 BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES ( 1420000, 1, 0::bigint, 123123123::int, 1500000)) @@ -625,7 +653,7 @@ END; $$ language plpgsql; -- fails because we ingest more placements for the same shards to the same worker node BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -636,7 +664,7 @@ ROLLBACK; -- shard is not owned by us BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420007, 1, 0::bigint, get_node_id(), 1500000)) @@ -646,7 +674,7 @@ ROLLBACK; -- sucessfully add placements BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS (VALUES (1420000, 1, 0::bigint, get_node_id(), 1500000), @@ -667,7 +695,7 @@ COMMIT; -- we should be able to colocate both tables now BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); ROLLBACK; @@ -676,7 +704,7 @@ ROLLBACK; -- fails because we are trying to update it to non-existing node BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); COMMIT; @@ -684,7 +712,7 @@ COMMIT; -- fails because the source node doesn't contain the shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); COMMIT; @@ -692,7 +720,7 @@ COMMIT; -- fails because shard does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); COMMIT; @@ -700,7 +728,7 @@ COMMIT; -- fails because none-existing shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); COMMIT; @@ -708,7 +736,7 @@ COMMIT; -- fails because we do not own the shard BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); COMMIT; @@ -716,7 +744,7 @@ COMMIT; -- the user only allowed to delete their own shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -725,7 +753,7 @@ ROLLBACK; -- the user only allowed to delete shards in a distributed transaction BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) @@ -735,7 +763,7 @@ ROLLBACK; -- the user cannot delete non-existing shards BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) @@ -750,7 +778,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT count(*) FROM pg_dist_placement WHERE shardid = 1420000; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) @@ -767,7 +795,7 @@ ROLLBACK; SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the repmodel -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' @@ -778,7 +806,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the vartype of table from int to bigint -- so that making two tables colocated fails UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}' @@ -788,7 +816,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' @@ -798,7 +826,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; -- with an ugly trick, update the partmethod of the table to not-valid -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' @@ -812,7 +840,7 @@ CREATE TABLE test_6(int_col int, text_col text); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); SELECT citus_internal_add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); @@ -828,7 +856,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; CREATE TABLE test_8(int_col int, text_col text COLLATE "caseinsensitive"); SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SET application_name to 'citus_internal'; + SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); SELECT citus_internal_add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); From 1e3c8e34c04a6d7b1d27529a581f03c692809a3e Mon Sep 17 00:00:00 2001 From: Teja Mupparti Date: Wed, 26 Jan 2022 22:01:02 -0800 Subject: [PATCH 24/28] Allow create_distributed_function() on a function owned by an extension Implement #5649 Allow create_distributed_function() on functions owned by extensions 1) Only update pg_dist_object, and do not propagate CREATE FUNCTION. 2) Ensure corresponding extension is in pg_dist_object. 3) Verify if dependencies exist on the function they should resolve to the extension. 4) Impact on node-scaling: We build a list of ddl commands based on all objects in pg_dist_object. We need to omit the ddl's for the extension-function, as it will get propagated by the virtue of the extension creation. 5) Extra checks for functions coming from extensions, to not propagate changes via ddl commands, even though the function is marked as distributed in pg_dist_object --- .../distributed/commands/dependencies.c | 9 + src/backend/distributed/commands/function.c | 118 +++++++---- .../expected/distributed_functions.out | 3 +- .../expected/propagate_extension_commands.out | 196 +++++++++++++++++- .../propagate_extension_commands_1.out | 196 +++++++++++++++++- .../sql/propagate_extension_commands.sql | 115 +++++++++- 6 files changed, 593 insertions(+), 44 deletions(-) diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index f82ddf065..9ee50ed47 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -396,6 +396,15 @@ ReplicateAllObjectsToNodeCommandList(const char *nodeName, int nodePort) ObjectAddress *dependency = NULL; foreach_ptr(dependency, dependencies) { + if (IsObjectAddressOwnedByExtension(dependency, NULL)) + { + /* + * we expect extension-owned objects to be created as a result + * of the extension being created. + */ + continue; + } + ddlCommands = list_concat(ddlCommands, GetDependencyCreateDDLCommands(dependency)); } diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 497a32dbb..ab03fe77a 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -85,7 +85,6 @@ static ObjectAddress FunctionToObjectAddress(ObjectType objectType, ObjectWithArgs *objectWithArgs, bool missing_ok); static void ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt); -static void ErrorIfFunctionDependsOnExtension(const ObjectAddress *functionAddress); static char * quote_qualified_func_name(Oid funcOid); static void DistributeFunctionWithDistributionArgument(RegProcedure funcOid, char *distributionArgumentName, @@ -101,6 +100,9 @@ static void DistributeFunctionColocatedWithDistributedTable(RegProcedure funcOid static void DistributeFunctionColocatedWithReferenceTable(const ObjectAddress *functionAddress); +static void EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress, + const ObjectAddress extensionAddress, + char *distributionArgumentName); PG_FUNCTION_INFO_V1(create_distributed_function); @@ -127,6 +129,7 @@ create_distributed_function(PG_FUNCTION_ARGS) char *colocateWithTableName = NULL; bool *forceDelegationAddress = NULL; bool forceDelegation = false; + ObjectAddress extensionAddress = { 0 }; /* if called on NULL input, error out */ if (funcOid == InvalidOid) @@ -187,22 +190,35 @@ create_distributed_function(PG_FUNCTION_ARGS) EnsureFunctionOwner(funcOid); ObjectAddressSet(functionAddress, ProcedureRelationId, funcOid); - ErrorIfFunctionDependsOnExtension(&functionAddress); /* - * when we allow propagation within a transaction block we should make sure to only - * allow this in sequential mode + * If the function is owned by an extension, only update the + * pg_dist_object, and not propagate the CREATE FUNCTION. Function + * will be created by the virtue of the extension creation. */ - EnsureSequentialModeForFunctionDDL(); + if (IsObjectAddressOwnedByExtension(&functionAddress, &extensionAddress)) + { + EnsureExtensionFunctionCanBeDistributed(functionAddress, extensionAddress, + distributionArgumentName); + } + else + { + /* + * when we allow propagation within a transaction block we should make sure + * to only allow this in sequential mode. + */ + EnsureSequentialModeForFunctionDDL(); - EnsureDependenciesExistOnAllNodes(&functionAddress); + EnsureDependenciesExistOnAllNodes(&functionAddress); - const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true); - const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid); - initStringInfo(&ddlCommand); - appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_METADATA_SYNC, - createFunctionSQL, alterFunctionOwnerSQL, ENABLE_METADATA_SYNC); - SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(), ddlCommand.data); + const char *createFunctionSQL = GetFunctionDDLCommand(funcOid, true); + const char *alterFunctionOwnerSQL = GetFunctionAlterOwnerCommand(funcOid); + initStringInfo(&ddlCommand); + appendStringInfo(&ddlCommand, "%s;%s;%s;%s", DISABLE_METADATA_SYNC, + createFunctionSQL, alterFunctionOwnerSQL, ENABLE_METADATA_SYNC); + SendCommandToWorkersAsUser(NON_COORDINATOR_NODES, CurrentUserName(), + ddlCommand.data); + } MarkObjectDistributed(&functionAddress); @@ -2013,33 +2029,6 @@ ErrorIfUnsupportedAlterFunctionStmt(AlterFunctionStmt *stmt) } -/* - * ErrorIfFunctionDependsOnExtension functions depending on extensions should raise an - * error informing the user why they can't be distributed. - */ -static void -ErrorIfFunctionDependsOnExtension(const ObjectAddress *functionAddress) -{ - /* captures the extension address during lookup */ - ObjectAddress extensionAddress = { 0 }; - - if (IsObjectAddressOwnedByExtension(functionAddress, &extensionAddress)) - { - char *functionName = - getObjectIdentity_compat(functionAddress, /* missingOk: */ false); - char *extensionName = - getObjectIdentity_compat(&extensionAddress, /* missingOk: */ false); - ereport(ERROR, (errmsg("unable to create a distributed function from functions " - "owned by an extension"), - errdetail("Function \"%s\" has a dependency on extension \"%s\". " - "Functions depending on an extension cannot be " - "distributed. Create the function by creating the " - "extension on the workers.", functionName, - extensionName))); - } -} - - /* returns the quoted qualified name of a given function oid */ static char * quote_qualified_func_name(Oid funcOid) @@ -2048,3 +2037,54 @@ quote_qualified_func_name(Oid funcOid) get_namespace_name(get_func_namespace(funcOid)), get_func_name(funcOid)); } + + +/* + * EnsureExtensionFuncionCanBeCreated checks if the dependent objects + * (including extension) exists on all nodes, if not, creates them. In + * addition, it also checks if distribution argument is passed. + */ +static void +EnsureExtensionFunctionCanBeDistributed(const ObjectAddress functionAddress, + const ObjectAddress extensionAddress, + char *distributionArgumentName) +{ + if (CitusExtensionObject(&extensionAddress)) + { + /* + * Citus extension is a special case. It's the extension that + * provides the 'distributed capabilities' in the first place. + * Trying to distribute it's own function(s) doesn't make sense. + */ + ereport(ERROR, (errmsg("Citus extension functions(%s) " + "cannot be distributed.", + get_func_name(functionAddress.objectId)))); + } + + /* + * Distributing functions from extensions has the most benefit when + * distribution argument is specified. + */ + if (distributionArgumentName == NULL) + { + ereport(ERROR, (errmsg("Extension functions(%s) " + "without distribution argument " + "are not supported.", + get_func_name(functionAddress.objectId)))); + } + + /* + * Ensure corresponding extension is in pg_dist_object. + * Functions owned by an extension are depending internally on that extension, + * hence EnsureDependenciesExistOnAllNodes() creates the extension, which in + * turn creates the function, and thus we don't have to create it ourself like + * we do for non-extension functions. + */ + ereport(DEBUG1, (errmsg("Extension(%s) owning the " + "function(%s) is not distributed, " + "attempting to propogate the extension", + get_extension_name(extensionAddress.objectId), + get_func_name(functionAddress.objectId)))); + + EnsureDependenciesExistOnAllNodes(&functionAddress); +} diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index 2ab760014..5068cf709 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -469,8 +469,7 @@ ALTER FUNCTION eq(macaddr,macaddr) DEPENDS ON EXTENSION citus; ERROR: distrtibuted functions are not allowed to depend on an extension DETAIL: Function "function_tests.eq(pg_catalog.macaddr,pg_catalog.macaddr)" is already distributed. Functions from extensions are expected to be created on the workers by the extension they depend on. SELECT create_distributed_function('pg_catalog.citus_drop_trigger()'); -ERROR: unable to create a distributed function from functions owned by an extension -DETAIL: Function "pg_catalog.citus_drop_trigger()" has a dependency on extension "citus". Functions depending on an extension cannot be distributed. Create the function by creating the extension on the workers. +ERROR: Citus extension functions(citus_drop_trigger) cannot be distributed. DROP FUNCTION eq(macaddr,macaddr); -- call should fail as function should have been dropped SELECT * FROM run_command_on_workers($$SELECT function_tests.eq('0123456789ab','ba9876543210');$$) ORDER BY 1,2; diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index b7e0618e3..71966ed39 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -398,10 +398,34 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port); 1 (1 row) +-- Test extension function incorrect distribution argument +CREATE TABLE test_extension_function(col varchar); +CREATE EXTENSION seg; +-- Missing distribution argument +SELECT create_distributed_function('seg_in(cstring)'); +ERROR: Extension functions(seg_in) without distribution argument are not supported. +-- Missing colocation argument +SELECT create_distributed_function('seg_in(cstring)', '$1'); +ERROR: cannot distribute the function "seg_in" since there is no table to colocate with +HINT: Provide a distributed table via "colocate_with" option to create_distributed_function() +-- Incorrect distribution argument +SELECT create_distributed_function('seg_in(cstring)', '$2', colocate_with:='test_extension_function'); +ERROR: cannot distribute the function "seg_in" since the distribution argument is not valid +HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() +-- Colocated table is not distributed +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); +ERROR: relation test_extension_function is not distributed +DROP EXTENSION seg; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('test_extension_function', 'col', colocate_with := 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + -- now, create a type that depends on another type, which -- finally depends on an extension BEGIN; - SET citus.shard_replication_factor TO 1; CREATE EXTENSION seg; CREATE EXTENSION isn; CREATE TYPE test_type AS (a int, b seg); @@ -419,9 +443,39 @@ BEGIN; create_reference_table --------------------------------------------------------------------- +(1 row) + + -- Distribute an extension-function + SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + (1 row) COMMIT; +-- Check the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- + seg_in +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) +(1 row) + -- add the node back SELECT 1 from master_add_node('localhost', :worker_2_port); ?column? @@ -443,5 +497,145 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname (localhost,57638,t,2) (2 rows) +-- Check the pg_dist_object on the both nodes +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) + (localhost,57638,t,1) +(2 rows) + +DROP EXTENSION seg CASCADE; +-- Recheck the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- +(0 rows) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) + +-- Distribute an extension-function where extension is not in pg_dist_object +SET citus.enable_ddl_propagation TO false; +CREATE EXTENSION seg; +SET citus.enable_ddl_propagation TO true; +-- Check the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) + +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- Recheck the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- + seg_in +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) + (localhost,57638,t,1) +(2 rows) + +DROP EXTENSION seg; +DROP TABLE test_extension_function; +-- Test extension function altering distribution argument +BEGIN; +SET citus.shard_replication_factor = 1; +CREATE TABLE test_extension_function(col1 float8[], col2 float8[]); +SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE EXTENSION cube; +SELECT create_distributed_function('cube(float8[], float8[])', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); + distribution_argument_index +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT create_distributed_function('cube(float8[], float8[])', '$2', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); + distribution_argument_index +--------------------------------------------------------------------- + 1 +(1 row) + +ROLLBACK; -- drop the schema and all the objects DROP SCHEMA "extension'test" CASCADE; diff --git a/src/test/regress/expected/propagate_extension_commands_1.out b/src/test/regress/expected/propagate_extension_commands_1.out index 99b8ef3a2..482ee5580 100644 --- a/src/test/regress/expected/propagate_extension_commands_1.out +++ b/src/test/regress/expected/propagate_extension_commands_1.out @@ -397,10 +397,34 @@ SELECT 1 from master_remove_node('localhost', :worker_2_port); 1 (1 row) +-- Test extension function incorrect distribution argument +CREATE TABLE test_extension_function(col varchar); +CREATE EXTENSION seg; +-- Missing distribution argument +SELECT create_distributed_function('seg_in(cstring)'); +ERROR: Extension functions(seg_in) without distribution argument are not supported. +-- Missing colocation argument +SELECT create_distributed_function('seg_in(cstring)', '$1'); +ERROR: cannot distribute the function "seg_in" since there is no table to colocate with +HINT: Provide a distributed table via "colocate_with" option to create_distributed_function() +-- Incorrect distribution argument +SELECT create_distributed_function('seg_in(cstring)', '$2', colocate_with:='test_extension_function'); +ERROR: cannot distribute the function "seg_in" since the distribution argument is not valid +HINT: Either provide a valid function argument name or a valid "$paramIndex" to create_distributed_function() +-- Colocated table is not distributed +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); +ERROR: relation test_extension_function is not distributed +DROP EXTENSION seg; +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('test_extension_function', 'col', colocate_with := 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + -- now, create a type that depends on another type, which -- finally depends on an extension BEGIN; - SET citus.shard_replication_factor TO 1; CREATE EXTENSION seg; CREATE EXTENSION isn; CREATE TYPE test_type AS (a int, b seg); @@ -418,9 +442,39 @@ BEGIN; create_reference_table --------------------------------------------------------------------- +(1 row) + + -- Distribute an extension-function + SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + (1 row) COMMIT; +-- Check the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- + seg_in +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) +(1 row) + -- add the node back SELECT 1 from master_add_node('localhost', :worker_2_port); ?column? @@ -442,5 +496,145 @@ SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname (localhost,57638,t,2) (2 rows) +-- Check the pg_dist_object on the both nodes +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) + (localhost,57638,t,1) +(2 rows) + +DROP EXTENSION seg CASCADE; +-- Recheck the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- +(0 rows) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) + +-- Distribute an extension-function where extension is not in pg_dist_object +SET citus.enable_ddl_propagation TO false; +CREATE EXTENSION seg; +SET citus.enable_ddl_propagation TO true; +-- Check the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,0) + (localhost,57638,t,0) +(2 rows) + +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- Recheck the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + distributedfunction +--------------------------------------------------------------------- + seg_in +(1 row) + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,1) + (localhost,57638,t,1) +(2 rows) + +DROP EXTENSION seg; +DROP TABLE test_extension_function; +-- Test extension function altering distribution argument +BEGIN; +SET citus.shard_replication_factor = 1; +CREATE TABLE test_extension_function(col1 float8[], col2 float8[]); +SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE EXTENSION cube; +SELECT create_distributed_function('cube(float8[], float8[])', '$1', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); + distribution_argument_index +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT create_distributed_function('cube(float8[], float8[])', '$2', 'test_extension_function'); + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); + distribution_argument_index +--------------------------------------------------------------------- + 1 +(1 row) + +ROLLBACK; -- drop the schema and all the objects DROP SCHEMA "extension'test" CASCADE; diff --git a/src/test/regress/sql/propagate_extension_commands.sql b/src/test/regress/sql/propagate_extension_commands.sql index 500dc00b8..6356d71e3 100644 --- a/src/test/regress/sql/propagate_extension_commands.sql +++ b/src/test/regress/sql/propagate_extension_commands.sql @@ -227,10 +227,25 @@ SET search_path TO "extension'test"; -- remove the node, we'll add back again SELECT 1 from master_remove_node('localhost', :worker_2_port); +-- Test extension function incorrect distribution argument +CREATE TABLE test_extension_function(col varchar); +CREATE EXTENSION seg; +-- Missing distribution argument +SELECT create_distributed_function('seg_in(cstring)'); +-- Missing colocation argument +SELECT create_distributed_function('seg_in(cstring)', '$1'); +-- Incorrect distribution argument +SELECT create_distributed_function('seg_in(cstring)', '$2', colocate_with:='test_extension_function'); +-- Colocated table is not distributed +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); +DROP EXTENSION seg; + +SET citus.shard_replication_factor TO 1; +SELECT create_distributed_table('test_extension_function', 'col', colocate_with := 'none'); + -- now, create a type that depends on another type, which -- finally depends on an extension BEGIN; - SET citus.shard_replication_factor TO 1; CREATE EXTENSION seg; CREATE EXTENSION isn; CREATE TYPE test_type AS (a int, b seg); @@ -243,8 +258,25 @@ BEGIN; CREATE TABLE t3 (a int, b test_type_3); SELECT create_reference_table('t3'); + -- Distribute an extension-function + SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); COMMIT; +-- Check the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + -- add the node back SELECT 1 from master_add_node('localhost', :worker_2_port); @@ -252,5 +284,86 @@ SELECT 1 from master_add_node('localhost', :worker_2_port); SELECT count(*) FROM citus.pg_dist_object WHERE objid IN (SELECT oid FROM pg_extension WHERE extname IN ('seg', 'isn')); SELECT run_command_on_workers($$SELECT count(*) FROM pg_extension WHERE extname IN ('seg', 'isn')$$); +-- Check the pg_dist_object on the both nodes +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + +DROP EXTENSION seg CASCADE; + +-- Recheck the pg_dist_object +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + +-- Distribute an extension-function where extension is not in pg_dist_object +SET citus.enable_ddl_propagation TO false; +CREATE EXTENSION seg; +SET citus.enable_ddl_propagation TO true; + +-- Check the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); + +SELECT create_distributed_function('seg_in(cstring)', '$1', 'test_extension_function'); + +-- Recheck the extension in pg_dist_object +SELECT count(*) FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_extension WHERE extname = 'seg'); + +SELECT pg_proc.proname as DistributedFunction +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; + +SELECT run_command_on_workers($$ +SELECT count(*) +FROM citus.pg_dist_object, pg_proc +WHERE pg_proc.proname = 'seg_in' and +pg_proc.oid = citus.pg_dist_object.objid and +classid = 'pg_proc'::regclass; +$$); +DROP EXTENSION seg; +DROP TABLE test_extension_function; + + +-- Test extension function altering distribution argument +BEGIN; +SET citus.shard_replication_factor = 1; +CREATE TABLE test_extension_function(col1 float8[], col2 float8[]); +SELECT create_distributed_table('test_extension_function', 'col1', colocate_with := 'none'); +CREATE EXTENSION cube; + +SELECT create_distributed_function('cube(float8[], float8[])', '$1', 'test_extension_function'); +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); + +SELECT create_distributed_function('cube(float8[], float8[])', '$2', 'test_extension_function'); +SELECT distribution_argument_index FROM citus.pg_dist_object WHERE classid = 'pg_catalog.pg_proc'::pg_catalog.regclass AND +objid = (SELECT oid FROM pg_proc WHERE prosrc = 'cube_a_f8_f8'); +ROLLBACK; + -- drop the schema and all the objects DROP SCHEMA "extension'test" CASCADE; From 1c30f61a70b1af6940264102fb0fe8fbf4556395 Mon Sep 17 00:00:00 2001 From: Onder Kalaci Date: Tue, 8 Feb 2022 11:49:35 +0100 Subject: [PATCH 25/28] Prevent citus.node_conninfo to use "application_name" With https://github.com/citusdata/citus/pull/5657, Citus uses a fixed application_name while connecting to remote nodes for internal purposes. It means that we cannot allow users to override it via citus.node_conninfo. --- src/backend/distributed/shared_library_init.c | 1 - src/test/regress/expected/node_conninfo_reload.out | 4 ++++ src/test/regress/sql/node_conninfo_reload.sql | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 70d3f549c..5dbc7d3e1 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -1984,7 +1984,6 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source) { /* this array _must_ be kept in an order usable by bsearch */ const char *allowedConninfoKeywords[] = { - "application_name", "connect_timeout", #if defined(ENABLE_GSS) && defined(ENABLE_SSPI) "gsslib", diff --git a/src/test/regress/expected/node_conninfo_reload.out b/src/test/regress/expected/node_conninfo_reload.out index 932610bc9..d7b853226 100644 --- a/src/test/regress/expected/node_conninfo_reload.out +++ b/src/test/regress/expected/node_conninfo_reload.out @@ -76,6 +76,10 @@ select count(*) from test where a = 0; (1 row) ALTER SYSTEM SET citus.node_conninfo = 'sslmode=doesnotexist'; +-- we cannot set application name +ALTER SYSTEM SET citus.node_conninfo = 'application_name=XXX'; +ERROR: invalid value for parameter "citus.node_conninfo": "application_name=XXX" +DETAIL: Prohibited conninfo keyword detected: application_name BEGIN; -- Should still work (no SIGHUP yet); select count(*) from test where a = 0; diff --git a/src/test/regress/sql/node_conninfo_reload.sql b/src/test/regress/sql/node_conninfo_reload.sql index 5a4f3c75b..3790f2c98 100644 --- a/src/test/regress/sql/node_conninfo_reload.sql +++ b/src/test/regress/sql/node_conninfo_reload.sql @@ -32,6 +32,9 @@ show citus.node_conninfo; select count(*) from test where a = 0; ALTER SYSTEM SET citus.node_conninfo = 'sslmode=doesnotexist'; +-- we cannot set application name +ALTER SYSTEM SET citus.node_conninfo = 'application_name=XXX'; + BEGIN; -- Should still work (no SIGHUP yet); select count(*) from test where a = 0; From d0711ea9b4e21678d572abb6c0ed8053ddcc313f Mon Sep 17 00:00:00 2001 From: Marco Slot Date: Mon, 7 Feb 2022 22:34:41 +0100 Subject: [PATCH 26/28] Delegate function calls in FROM outside of transaction block --- .../planner/function_call_delegation.c | 10 --- .../expected/forcedelegation_functions.out | 13 +--- .../multi_mx_function_call_delegation.out | 64 +++++++++++++++---- .../multi_mx_function_call_delegation_0.out | 64 +++++++++++++++---- .../regress/sql/forcedelegation_functions.sql | 3 +- .../sql/multi_mx_function_call_delegation.sql | 13 +++- 6 files changed, 121 insertions(+), 46 deletions(-) diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index 716c5357c..9ea275035 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -313,16 +313,6 @@ TryToDelegateFunctionCall(DistributedPlanningContext *planContext) return NULL; } - if (fromFuncExpr && !IsMultiStatementTransaction()) - { - /* - * For now, let's not push the function from the FROM clause unless it's in a - * multistatement transaction with the forceDelegation flag ON. - */ - ereport(DEBUG2, (errmsg("function from the FROM clause is not pushed"))); - return NULL; - } - /* dissuade the planner from trying a generic plan with parameters */ (void) expression_tree_walker((Node *) funcExpr->args, contain_param_walker, &walkerParamContext); diff --git a/src/test/regress/expected/forcedelegation_functions.out b/src/test/regress/expected/forcedelegation_functions.out index ad3b6cb8e..abfcaced8 100644 --- a/src/test/regress/expected/forcedelegation_functions.out +++ b/src/test/regress/expected/forcedelegation_functions.out @@ -1162,18 +1162,11 @@ SELECT * FROM forcepushdown_schema.test_subquery ORDER BY 1; (5 rows) -- Query with targetList greater than 1 --- Function from FROM clause is not delegated outside of a BEGIN (for now) +-- Function from FROM clause is delegated outside of a BEGIN SELECT 1,2,3 FROM select_data(100); -DEBUG: generating subplan XXX_1 for subquery SELECT data FROM forcepushdown_schema.test_subquery WHERE (data OPERATOR(pg_catalog.=) 100) -CONTEXT: SQL statement "SELECT result FROM forcepushdown_schema.test_subquery WHERE data = - (SELECT data FROM forcepushdown_schema.test_subquery WHERE data = a)" -PL/pgSQL function select_data(integer) line XX at SQL statement -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT result FROM forcepushdown_schema.test_subquery WHERE (data OPERATOR(pg_catalog.=) (SELECT intermediate_result.data FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(data integer))) -CONTEXT: SQL statement "SELECT result FROM forcepushdown_schema.test_subquery WHERE data = - (SELECT data FROM forcepushdown_schema.test_subquery WHERE data = a)" -PL/pgSQL function select_data(integer) line XX at SQL statement +DEBUG: pushing down the function call NOTICE: Result: -1 -CONTEXT: PL/pgSQL function select_data(integer) line XX at RAISE +DETAIL: from localhost:xxxxx ?column? | ?column? | ?column? --------------------------------------------------------------------- 1 | 2 | 3 diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index d48f001bf..8192ee35c 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -630,18 +630,6 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment (1 row) -- test forms we don't distribute -select * from mx_call_func(2, 0); -DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT ((3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))))::integer -CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment - y ---------------------------------------------------------------------- - 29 -(1 row) - select mx_call_func(2, 0) where mx_call_func(0, 2) = 0; DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: PL/pgSQL assignment "y := y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" @@ -671,6 +659,58 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment 29 | 27 (1 row) +-- regular call in FROM can be pushed down +select * from mx_call_func(2, 0); +DEBUG: pushing down the function call + y +--------------------------------------------------------------------- + 28 +(1 row) + +-- prepared statement with 6 invocations to trigger generic plan +prepare call_func(int, int) as select $1 from mx_call_func($1, $2); +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + -- we do not delegate the call, but do push down the query -- that result in remote execution from workers select mx_call_func(id, 0) from mx_call_dist_table_1; diff --git a/src/test/regress/expected/multi_mx_function_call_delegation_0.out b/src/test/regress/expected/multi_mx_function_call_delegation_0.out index 06a7b320d..9125847b4 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation_0.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation_0.out @@ -630,18 +630,6 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment (1 row) -- test forms we don't distribute -select * from mx_call_func(2, 0); -DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment -DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT (3 OPERATOR(pg_catalog.+) (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum bigint))) -CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" -PL/pgSQL function mx_call_func(integer,integer) line XX at assignment - y ---------------------------------------------------------------------- - 29 -(1 row) - select mx_call_func(2, 0) where mx_call_func(0, 2) = 0; DEBUG: generating subplan XXX_1 for subquery SELECT sum((t1.val OPERATOR(pg_catalog.+) t2.val)) AS sum FROM (multi_mx_function_call_delegation.mx_call_dist_table_1 t1 JOIN multi_mx_function_call_delegation.mx_call_dist_table_2 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.id))) CONTEXT: SQL statement "SELECT y + (select sum(t1.val + t2.val) from multi_mx_function_call_delegation.mx_call_dist_table_1 t1 join multi_mx_function_call_delegation.mx_call_dist_table_2 t2 on t1.id = t2.id)" @@ -671,6 +659,58 @@ PL/pgSQL function mx_call_func(integer,integer) line XX at assignment 29 | 27 (1 row) +-- regular call in FROM can be pushed down +select * from mx_call_func(2, 0); +DEBUG: pushing down the function call + y +--------------------------------------------------------------------- + 28 +(1 row) + +-- prepared statement with 6 invocations to trigger generic plan +prepare call_func(int, int) as select $1 from mx_call_func($1, $2); +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + +execute call_func(2, 0); +DEBUG: pushing down the function call + ?column? +--------------------------------------------------------------------- + 2 +(1 row) + -- we do not delegate the call, but do push down the query -- that result in remote execution from workers select mx_call_func(id, 0) from mx_call_dist_table_1; diff --git a/src/test/regress/sql/forcedelegation_functions.sql b/src/test/regress/sql/forcedelegation_functions.sql index 77b171fc1..6f62c41d5 100644 --- a/src/test/regress/sql/forcedelegation_functions.sql +++ b/src/test/regress/sql/forcedelegation_functions.sql @@ -606,8 +606,9 @@ SELECT * FROM forcepushdown_schema.test_subquery ORDER BY 1; -- Query with targetList greater than 1 --- Function from FROM clause is not delegated outside of a BEGIN (for now) +-- Function from FROM clause is delegated outside of a BEGIN SELECT 1,2,3 FROM select_data(100); + BEGIN; -- Function from FROM clause is delegated SELECT 1,2,3 FROM select_data(100); diff --git a/src/test/regress/sql/multi_mx_function_call_delegation.sql b/src/test/regress/sql/multi_mx_function_call_delegation.sql index 206969456..0efcea922 100644 --- a/src/test/regress/sql/multi_mx_function_call_delegation.sql +++ b/src/test/regress/sql/multi_mx_function_call_delegation.sql @@ -274,10 +274,21 @@ select mx_call_func((select x + 1 from mx_call_add(3, 4) x), 2); select mx_call_func(floor(random())::int, 2); -- test forms we don't distribute -select * from mx_call_func(2, 0); select mx_call_func(2, 0) where mx_call_func(0, 2) = 0; select mx_call_func(2, 0), mx_call_func(0, 2); +-- regular call in FROM can be pushed down +select * from mx_call_func(2, 0); + +-- prepared statement with 6 invocations to trigger generic plan +prepare call_func(int, int) as select $1 from mx_call_func($1, $2); +execute call_func(2, 0); +execute call_func(2, 0); +execute call_func(2, 0); +execute call_func(2, 0); +execute call_func(2, 0); +execute call_func(2, 0); + -- we do not delegate the call, but do push down the query -- that result in remote execution from workers select mx_call_func(id, 0) from mx_call_dist_table_1; From 76b63a307b3f88297e393ec1d6a74f381a8dc780 Mon Sep 17 00:00:00 2001 From: Ahmet Gedemenli Date: Fri, 28 Jan 2022 13:49:12 +0300 Subject: [PATCH 27/28] Propagate create/drop schema commands --- .../commands/distribute_object_ops.c | 15 +- src/backend/distributed/commands/schema.c | 250 +++++++++++----- .../deparser/deparse_schema_stmts.c | 95 ++++++ src/include/distributed/commands.h | 3 + src/include/distributed/deparser.h | 2 + .../expected/citus_local_table_triggers.out | 4 +- .../regress/expected/columnar_truncate.out | 7 + .../expected/distributed_collations.out | 10 - .../distributed_collations_conflict.out | 7 - .../expected/distributed_functions.out | 7 - .../distributed_functions_conflict.out | 7 - .../expected/distributed_procedure.out | 14 - .../regress/expected/distributed_types.out | 14 - .../expected/distributed_types_conflict.out | 7 - .../distributed_types_xact_add_enum_value.out | 7 - .../expected/drop_partitioned_table.out | 8 - .../expected/failure_add_disable_node.out | 8 - ...ure_create_distributed_table_non_empty.out | 9 +- .../failure_create_reference_table.out | 2 + .../regress/expected/failure_create_table.out | 2 + .../expected/fkeys_between_local_ref.out | 2 + .../expected/grant_on_schema_propagation.out | 37 +-- ...lation_ensure_dependency_activate_node.out | 270 +----------------- .../expected/isolation_extension_commands.out | 10 +- .../expected/metadata_sync_helpers.out | 32 +-- .../expected/multi_deparse_function.out | 23 +- .../multi_fix_partition_shard_index_names.out | 7 - .../multi_foreign_key_relation_graph.out | 2 + src/test/regress/expected/multi_multiuser.out | 10 - .../expected/multi_mx_create_table.out | 6 - src/test/regress/expected/multi_mx_ddl.out | 7 - .../expected/multi_mx_schema_support.out | 12 +- .../regress/expected/multi_schema_support.out | 33 ++- .../expected/multi_sequence_default.out | 91 +----- src/test/regress/expected/mx_regular_user.out | 10 - .../non_super_user_object_metadata.out | 6 +- .../expected/object_propagation_debug.out | 19 +- .../expected/subquery_prepared_statements.out | 7 - .../input/multi_alter_table_statements.source | 4 - src/test/regress/multi_1_schedule | 1 - .../multi_alter_table_statements.source | 11 - ...ation_ensure_dependency_activate_node.spec | 36 +-- .../sql/citus_local_table_triggers.sql | 4 +- src/test/regress/sql/columnar_truncate.sql | 1 + .../regress/sql/distributed_collations.sql | 15 - .../sql/distributed_collations_conflict.sql | 1 - .../regress/sql/distributed_functions.sql | 7 - .../sql/distributed_functions_conflict.sql | 1 - .../regress/sql/distributed_procedure.sql | 2 - src/test/regress/sql/distributed_types.sql | 2 - .../sql/distributed_types_conflict.sql | 1 - .../distributed_types_xact_add_enum_value.sql | 1 - .../regress/sql/drop_partitioned_table.sql | 2 - .../regress/sql/failure_add_disable_node.sql | 2 - ...ure_create_distributed_table_non_empty.sql | 3 +- .../sql/failure_create_reference_table.sql | 2 + src/test/regress/sql/failure_create_table.sql | 2 + .../regress/sql/fkeys_between_local_ref.sql | 2 + .../sql/grant_on_schema_propagation.sql | 12 +- .../regress/sql/metadata_sync_helpers.sql | 25 +- .../regress/sql/multi_deparse_function.sql | 13 +- .../multi_fix_partition_shard_index_names.sql | 1 - .../sql/multi_foreign_key_relation_graph.sql | 2 + src/test/regress/sql/multi_multiuser.sql | 12 - .../regress/sql/multi_mx_create_table.sql | 9 - src/test/regress/sql/multi_mx_ddl.sql | 1 - .../regress/sql/multi_mx_schema_support.sql | 5 +- src/test/regress/sql/multi_schema_support.sql | 14 +- .../regress/sql/multi_sequence_default.sql | 36 +-- src/test/regress/sql/mx_regular_user.sql | 12 - .../sql/non_super_user_object_metadata.sql | 2 +- .../regress/sql/object_propagation_debug.sql | 15 +- .../sql/subquery_prepared_statements.sql | 1 - 73 files changed, 449 insertions(+), 873 deletions(-) diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 755286ffb..380a83401 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -193,6 +193,14 @@ static DistributeObjectOps Any_CreateForeignServer = { .address = CreateForeignServerStmtObjectAddress, .markDistributed = true, }; +static DistributeObjectOps Any_CreateSchema = { + .deparse = DeparseCreateSchemaStmt, + .qualify = NULL, + .preprocess = PreprocessCreateSchemaStmt, + .postprocess = NULL, + .address = CreateSchemaStmtObjectAddress, + .markDistributed = true, +}; static DistributeObjectOps Any_CreateStatistics = { .deparse = DeparseCreateStatisticsStmt, .qualify = QualifyCreateStatisticsStmt, @@ -538,7 +546,7 @@ static DistributeObjectOps Routine_Rename = { .markDistributed = false, }; static DistributeObjectOps Schema_Drop = { - .deparse = NULL, + .deparse = DeparseDropSchemaStmt, .qualify = NULL, .preprocess = PreprocessDropSchemaStmt, .postprocess = NULL, @@ -976,6 +984,11 @@ GetDistributeObjectOps(Node *node) return &Any_CreatePolicy; } + case T_CreateSchemaStmt: + { + return &Any_CreateSchema; + } + case T_CreateStatsStmt: { return &Any_CreateStatistics; diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index fff2b1c94..adb26b5e7 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -40,8 +40,40 @@ #include "utils/relcache.h" +static ObjectAddress GetObjectAddressBySchemaName(char *schemaName, bool missing_ok); static List * FilterDistributedSchemas(List *schemas); static void EnsureSequentialModeForSchemaDDL(void); +static bool SchemaHasDistributedTableWithFKey(char *schemaName); +static bool ShouldPropagateCreateSchemaStmt(void); + + +/* + * PreprocessCreateSchemaStmt is called during the planning phase for + * CREATE SCHEMA .. + */ +List * +PreprocessCreateSchemaStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + if (!ShouldPropagateCreateSchemaStmt()) + { + return NIL; + } + + EnsureCoordinator(); + + EnsureSequentialModeForSchemaDDL(); + + /* deparse sql*/ + const char *sql = DeparseTreeNode(node); + + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} /* @@ -53,76 +85,54 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { DropStmt *dropStatement = castNode(DropStmt, node); - Relation pgClass = NULL; - HeapTuple heapTuple = NULL; - SysScanDesc scanDescriptor = NULL; - ScanKeyData scanKey[1]; - int scanKeyCount = 1; - Oid scanIndexId = InvalidOid; - bool useIndex = false; + Assert(dropStatement->removeType == OBJECT_SCHEMA); - if (dropStatement->behavior != DROP_CASCADE) + if (!ShouldPropagate()) { return NIL; } - Value *schemaValue = NULL; - foreach_ptr(schemaValue, dropStatement->objects) + EnsureCoordinator(); + + List *distributedSchemas = FilterDistributedSchemas(dropStatement->objects); + + if (list_length(distributedSchemas) < 1) { - const char *schemaString = strVal(schemaValue); - Oid namespaceOid = get_namespace_oid(schemaString, true); - - if (namespaceOid == InvalidOid) - { - continue; - } - - pgClass = table_open(RelationRelationId, AccessShareLock); - - ScanKeyInit(&scanKey[0], Anum_pg_class_relnamespace, BTEqualStrategyNumber, - F_OIDEQ, namespaceOid); - scanDescriptor = systable_beginscan(pgClass, scanIndexId, useIndex, NULL, - scanKeyCount, scanKey); - - heapTuple = systable_getnext(scanDescriptor); - while (HeapTupleIsValid(heapTuple)) - { - Form_pg_class relationForm = (Form_pg_class) GETSTRUCT(heapTuple); - char *relationName = NameStr(relationForm->relname); - Oid relationId = get_relname_relid(relationName, namespaceOid); - - /* we're not interested in non-valid, non-distributed relations */ - if (relationId == InvalidOid || !IsCitusTable(relationId)) - { - heapTuple = systable_getnext(scanDescriptor); - continue; - } - - if (IsCitusTableType(relationId, REFERENCE_TABLE)) - { - /* prevent concurrent EnsureReferenceTablesExistOnAllNodes */ - int colocationId = CreateReferenceTableColocationId(); - LockColocationId(colocationId, ExclusiveLock); - } - - /* invalidate foreign key cache if the table involved in any foreign key */ - if (TableReferenced(relationId) || TableReferencing(relationId)) - { - MarkInvalidateForeignKeyGraph(); - - systable_endscan(scanDescriptor); - table_close(pgClass, NoLock); - return NIL; - } - - heapTuple = systable_getnext(scanDescriptor); - } - - systable_endscan(scanDescriptor); - table_close(pgClass, NoLock); + return NIL; } - return NIL; + EnsureSequentialModeForSchemaDDL(); + + Value *schemaVal = NULL; + foreach_ptr(schemaVal, distributedSchemas) + { + if (SchemaHasDistributedTableWithFKey(strVal(schemaVal))) + { + MarkInvalidateForeignKeyGraph(); + break; + } + } + + /* + * We swap around the schema's in the statement to only contain the distributed + * schemas before deparsing. We need to restore the original list as postgres + * will execute on this statement locally, which requires all original schemas + * from the user to be present. + */ + List *originalObjects = dropStatement->objects; + + dropStatement->objects = distributedSchemas; + + const char *sql = DeparseTreeNode(node); + + dropStatement->objects = originalObjects; + + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } @@ -205,6 +215,19 @@ PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString, } +/* + * CreateSchemaStmtObjectAddress returns the ObjectAddress of the schema that is + * the object of the CreateSchemaStmt. Errors if missing_ok is false. + */ +ObjectAddress +CreateSchemaStmtObjectAddress(Node *node, bool missing_ok) +{ + CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node); + + return GetObjectAddressBySchemaName(stmt->schemaname, missing_ok); +} + + /* * AlterSchemaRenameStmtObjectAddress returns the ObjectAddress of the schema that is * the object of the RenameStmt. Errors if missing_ok is false. @@ -215,7 +238,17 @@ AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok) RenameStmt *stmt = castNode(RenameStmt, node); Assert(stmt->renameType == OBJECT_SCHEMA); - const char *schemaName = stmt->subname; + return GetObjectAddressBySchemaName(stmt->subname, missing_ok); +} + + +/* + * GetObjectAddressBySchemaName returns the ObjectAddress of the schema with the + * given name. Errors out if schema is not found and missing_ok is false. + */ +ObjectAddress +GetObjectAddressBySchemaName(char *schemaName, bool missing_ok) +{ Oid schemaOid = get_namespace_oid(schemaName, missing_ok); ObjectAddress address = { 0 }; @@ -282,9 +315,9 @@ EnsureSequentialModeForSchemaDDL(void) ereport(ERROR, (errmsg("cannot create or modify schema because there was a " "parallel operation on a distributed table in the " "transaction"), - errdetail("When creating or altering a schema, Citus needs to " - "perform all operations over a single connection per " - "node to ensure consistency."), + errdetail("When creating, altering, or dropping a schema, Citus " + "needs to perform all operations over a single " + "connection per node to ensure consistency."), errhint("Try re-running the transaction with " "\"SET LOCAL citus.multi_shard_modify_mode TO " "\'sequential\';\""))); @@ -296,3 +329,88 @@ EnsureSequentialModeForSchemaDDL(void) "use only one connection for all future commands"))); SetLocalMultiShardModifyModeToSequential(); } + + +/* + * SchemaHasDistributedTableWithFKey takes a schema name and scans the relations within + * that schema. If any one of the relations has a foreign key relationship, it returns + * true. Returns false otherwise. + */ +static bool +SchemaHasDistributedTableWithFKey(char *schemaName) +{ + ScanKeyData scanKey[1]; + int scanKeyCount = 1; + Oid scanIndexId = InvalidOid; + bool useIndex = false; + + Oid namespaceOid = get_namespace_oid(schemaName, true); + + if (namespaceOid == InvalidOid) + { + return false; + } + + Relation pgClass = table_open(RelationRelationId, AccessShareLock); + + ScanKeyInit(&scanKey[0], Anum_pg_class_relnamespace, BTEqualStrategyNumber, + F_OIDEQ, namespaceOid); + SysScanDesc scanDescriptor = systable_beginscan(pgClass, scanIndexId, useIndex, NULL, + scanKeyCount, scanKey); + + HeapTuple heapTuple = systable_getnext(scanDescriptor); + while (HeapTupleIsValid(heapTuple)) + { + Form_pg_class relationForm = (Form_pg_class) GETSTRUCT(heapTuple); + char *relationName = NameStr(relationForm->relname); + Oid relationId = get_relname_relid(relationName, namespaceOid); + + /* we're not interested in non-valid, non-distributed relations */ + if (relationId == InvalidOid || !IsCitusTable(relationId)) + { + heapTuple = systable_getnext(scanDescriptor); + continue; + } + + /* invalidate foreign key cache if the table involved in any foreign key */ + if (TableReferenced(relationId) || TableReferencing(relationId)) + { + systable_endscan(scanDescriptor); + table_close(pgClass, NoLock); + return true; + } + + heapTuple = systable_getnext(scanDescriptor); + } + + systable_endscan(scanDescriptor); + table_close(pgClass, NoLock); + + return false; +} + + +/* + * ShouldPropagateCreateSchemaStmt gets called only for CreateSchemaStmt's. + * This function wraps the ShouldPropagate function which is commonly used + * for all object types; additionally it checks whether there's a multi-statement + * transaction ongoing or not. For transaction blocks, we require sequential mode + * with this function, for CREATE SCHEMA statements. If Citus has not already + * switched to sequential mode, we don't propagate. + */ +static bool +ShouldPropagateCreateSchemaStmt() +{ + if (!ShouldPropagate()) + { + return false; + } + + if (IsMultiStatementTransaction() && + MultiShardConnectionType != SEQUENTIAL_CONNECTION) + { + return false; + } + + return true; +} diff --git a/src/backend/distributed/deparser/deparse_schema_stmts.c b/src/backend/distributed/deparser/deparse_schema_stmts.c index 426146f56..f8c3d35a1 100644 --- a/src/backend/distributed/deparser/deparse_schema_stmts.c +++ b/src/backend/distributed/deparser/deparse_schema_stmts.c @@ -14,16 +14,47 @@ #include "distributed/citus_ruleutils.h" #include "distributed/deparser.h" +#include "distributed/listutils.h" #include "lib/stringinfo.h" #include "nodes/nodes.h" #include "utils/builtins.h" +static void AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt); +static void AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt); static void AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSchemaPrivileges(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSchemaSchemas(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnSchemaGrantees(StringInfo buf, GrantStmt *stmt); static void AppendAlterSchemaRenameStmt(StringInfo buf, RenameStmt *stmt); +char * +DeparseCreateSchemaStmt(Node *node) +{ + CreateSchemaStmt *stmt = castNode(CreateSchemaStmt, node); + + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendCreateSchemaStmt(&str, stmt); + + return str.data; +} + + +char * +DeparseDropSchemaStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendDropSchemaStmt(&str, stmt); + + return str.data; +} + + char * DeparseGrantOnSchemaStmt(Node *node) { @@ -53,6 +84,70 @@ DeparseAlterSchemaRenameStmt(Node *node) } +static void +AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt) +{ + if (stmt->schemaElts != NIL) + { + elog(ERROR, "schema creating is not supported with other create commands"); + } + + if (stmt->schemaname == NULL) + { + elog(ERROR, "schema name should be specified"); + } + + appendStringInfoString(buf, "CREATE SCHEMA "); + + if (stmt->if_not_exists) + { + appendStringInfoString(buf, "IF NOT EXISTS "); + } + + appendStringInfo(buf, "%s ", quote_identifier(stmt->schemaname)); + + if (stmt->authrole != NULL) + { + appendStringInfo(buf, "AUTHORIZATION %s", RoleSpecString(stmt->authrole, true)); + } +} + + +static void +AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt) +{ + Assert(stmt->removeType == OBJECT_SCHEMA); + + appendStringInfoString(buf, "DROP SCHEMA "); + + if (stmt->missing_ok) + { + appendStringInfoString(buf, "IF EXISTS "); + } + + Value *schemaValue = NULL; + foreach_ptr(schemaValue, stmt->objects) + { + const char *schemaString = quote_identifier(strVal(schemaValue)); + appendStringInfo(buf, "%s", schemaString); + + if (schemaValue != llast(stmt->objects)) + { + appendStringInfoString(buf, ", "); + } + } + + if (stmt->behavior == DROP_CASCADE) + { + appendStringInfoString(buf, " CASCADE"); + } + else if (stmt->behavior == DROP_RESTRICT) + { + appendStringInfoString(buf, " RESTRICT"); + } +} + + static void AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt) { diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 41141ee8a..31601dc2a 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -366,6 +366,8 @@ extern ObjectAddress AlterRoleSetStmtObjectAddress(Node *node, extern List * GenerateCreateOrAlterRoleCommand(Oid roleOid); /* schema.c - forward declarations */ +extern List * PreprocessCreateSchemaStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); extern List * PreprocessDropSchemaStmt(Node *dropSchemaStatement, const char *queryString, ProcessUtilityContext processUtilityContext); @@ -375,6 +377,7 @@ extern List * PreprocessGrantOnSchemaStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); extern List * PreprocessAlterSchemaRenameStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern ObjectAddress CreateSchemaStmtObjectAddress(Node *node, bool missing_ok); extern ObjectAddress AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok); /* sequence.c - forward declarations */ diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 8934323f0..b91fba87e 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -60,6 +60,8 @@ extern char * DeparseAlterTableStmt(Node *node); extern void QualifyAlterTableSchemaStmt(Node *stmt); /* forward declarations for deparse_schema_stmts.c */ +extern char * DeparseCreateSchemaStmt(Node *node); +extern char * DeparseDropSchemaStmt(Node *node); extern char * DeparseGrantOnSchemaStmt(Node *stmt); extern char * DeparseAlterSchemaRenameStmt(Node *stmt); diff --git a/src/test/regress/expected/citus_local_table_triggers.out b/src/test/regress/expected/citus_local_table_triggers.out index 1a269c649..007986fab 100644 --- a/src/test/regress/expected/citus_local_table_triggers.out +++ b/src/test/regress/expected/citus_local_table_triggers.out @@ -193,8 +193,8 @@ FOR EACH STATEMENT EXECUTE FUNCTION dummy_function(); NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1507008, 'interesting!schema', E'CREATE TRIGGER "trigger\\''name" BEFORE INSERT ON "interesting!schema"."citus_local!_table" FOR EACH STATEMENT EXECUTE FUNCTION dummy_function();') +CREATE EXTENSION seg; BEGIN; - CREATE EXTENSION seg; -- ALTER TRIGGER DEPENDS ON ALTER TRIGGER "trigger\'name" ON "interesting!schema"."citus_local!_table" DEPENDS ON EXTENSION seg; NOTICE: executing the command locally: SELECT worker_apply_shard_ddl_command (1507008, 'interesting!schema', E'ALTER TRIGGER "trigger\\''name" ON "interesting!schema"."citus_local!_table" DEPENDS ON EXTENSION seg;') @@ -578,5 +578,5 @@ NOTICE: executing the command locally: SELECT val FROM citus_local_table_trigge ROLLBACK; -- cleanup at exit +SET client_min_messages TO ERROR; DROP SCHEMA citus_local_table_triggers, "interesting!schema" CASCADE; -NOTICE: drop cascades to 22 other objects diff --git a/src/test/regress/expected/columnar_truncate.out b/src/test/regress/expected/columnar_truncate.out index 5914cfb0f..fe5169535 100644 --- a/src/test/regress/expected/columnar_truncate.out +++ b/src/test/regress/expected/columnar_truncate.out @@ -265,6 +265,13 @@ set columnar.compression = 'pglz'; INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100); set columnar.compression to default; -- create a user that can not truncate +SELECT run_command_on_workers($$CREATE USER truncate_user;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE ROLE") + (localhost,57638,t,"CREATE ROLE") +(2 rows) + CREATE USER truncate_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. diff --git a/src/test/regress/expected/distributed_collations.out b/src/test/regress/expected/distributed_collations.out index 7ca02b9b1..f2413d0e5 100644 --- a/src/test/regress/expected/distributed_collations.out +++ b/src/test/regress/expected/distributed_collations.out @@ -155,16 +155,6 @@ ORDER BY 1,2,3; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA collation_tests CASCADE; DROP SCHEMA collation_tests2 CASCADE; --- This is hacky, but we should clean-up the resources as below -\c - - - :worker_1_port -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA collation_tests CASCADE; -DROP SCHEMA collation_tests2 CASCADE; -\c - - - :worker_2_port -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA collation_tests CASCADE; -DROP SCHEMA collation_tests2 CASCADE; -\c - - - :master_port DROP USER collationuser; SELECT run_command_on_workers($$DROP USER collationuser;$$); run_command_on_workers diff --git a/src/test/regress/expected/distributed_collations_conflict.out b/src/test/regress/expected/distributed_collations_conflict.out index 821521457..818de2697 100644 --- a/src/test/regress/expected/distributed_collations_conflict.out +++ b/src/test/regress/expected/distributed_collations_conflict.out @@ -1,11 +1,4 @@ CREATE SCHEMA collation_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - \c - - - :worker_1_port SET search_path TO collation_conflict; SET citus.enable_metadata_sync TO off; diff --git a/src/test/regress/expected/distributed_functions.out b/src/test/regress/expected/distributed_functions.out index 5068cf709..614f19288 100644 --- a/src/test/regress/expected/distributed_functions.out +++ b/src/test/regress/expected/distributed_functions.out @@ -1063,16 +1063,9 @@ SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isa \c - - - :worker_1_port UPDATE pg_dist_local_group SET groupid = 0; TRUNCATE pg_dist_node; -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA function_tests CASCADE; -DROP SCHEMA function_tests2 CASCADE; -SET search_path TO function_tests, function_tests2; \c - - - :worker_2_port UPDATE pg_dist_local_group SET groupid = 0; TRUNCATE pg_dist_node; -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA function_tests CASCADE; -DROP SCHEMA function_tests2 CASCADE; \c - - - :master_port SET client_min_messages TO ERROR; DROP USER functionuser; diff --git a/src/test/regress/expected/distributed_functions_conflict.out b/src/test/regress/expected/distributed_functions_conflict.out index 537da8c4e..b961fb955 100644 --- a/src/test/regress/expected/distributed_functions_conflict.out +++ b/src/test/regress/expected/distributed_functions_conflict.out @@ -1,13 +1,6 @@ -- This is designed to test worker_create_or_replace_object in PG11 with aggregates -- Note in PG12 we use CREATE OR REPLACE AGGREGATE, thus the renaming does not occur CREATE SCHEMA proc_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - \c - - - :worker_1_port SET search_path TO proc_conflict; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ diff --git a/src/test/regress/expected/distributed_procedure.out b/src/test/regress/expected/distributed_procedure.out index 9331badff..3a14ed048 100644 --- a/src/test/regress/expected/distributed_procedure.out +++ b/src/test/regress/expected/distributed_procedure.out @@ -173,21 +173,7 @@ SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello'); SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - DROP SCHEMA procedure_tests2 CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - DROP USER procedureuser; SELECT 1 FROM run_command_on_workers($$DROP USER procedureuser;$$); ?column? diff --git a/src/test/regress/expected/distributed_types.out b/src/test/regress/expected/distributed_types.out index 6d22ee2a2..dcf8dd8b2 100644 --- a/src/test/regress/expected/distributed_types.out +++ b/src/test/regress/expected/distributed_types.out @@ -456,21 +456,7 @@ SELECT * FROM field_indirection_test_2 ORDER BY 1,2,3; -- clear objects SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - DROP SCHEMA type_tests2 CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - DROP USER typeuser; SELECT run_command_on_workers($$DROP USER typeuser;$$); run_command_on_workers diff --git a/src/test/regress/expected/distributed_types_conflict.out b/src/test/regress/expected/distributed_types_conflict.out index c106f64c9..17d259e76 100644 --- a/src/test/regress/expected/distributed_types_conflict.out +++ b/src/test/regress/expected/distributed_types_conflict.out @@ -1,12 +1,5 @@ SET citus.next_shard_id TO 20020000; CREATE SCHEMA type_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - -- create a type on a worker that should not cause data loss once overwritten with a type -- from the coordinator \c - - :public_worker_1_host :worker_1_port diff --git a/src/test/regress/expected/distributed_types_xact_add_enum_value.out b/src/test/regress/expected/distributed_types_xact_add_enum_value.out index fbfee78ac..abc2c88d0 100644 --- a/src/test/regress/expected/distributed_types_xact_add_enum_value.out +++ b/src/test/regress/expected/distributed_types_xact_add_enum_value.out @@ -64,10 +64,3 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso -- clear objects SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA xact_enum_type CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - diff --git a/src/test/regress/expected/drop_partitioned_table.out b/src/test/regress/expected/drop_partitioned_table.out index b77e6f80b..edc535020 100644 --- a/src/test/regress/expected/drop_partitioned_table.out +++ b/src/test/regress/expected/drop_partitioned_table.out @@ -47,7 +47,6 @@ WHERE n.nspname IN ('drop_partitioned_table', 'schema1') AND c.relkind IN ('r','p') ORDER BY 1, 2; \c - - - :worker_1_port -CREATE SCHEMA drop_partitioned_table; SET search_path = drop_partitioned_table; CREATE VIEW tables_info AS SELECT n.nspname as "Schema", @@ -395,11 +394,4 @@ NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK DROP SCHEMA drop_partitioned_table CASCADE; NOTICE: drop cascades to 3 other objects -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS drop_partitioned_table CASCADE'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - SET search_path TO public; diff --git a/src/test/regress/expected/failure_add_disable_node.out b/src/test/regress/expected/failure_add_disable_node.out index ef78e451b..ca1c8f838 100644 --- a/src/test/regress/expected/failure_add_disable_node.out +++ b/src/test/regress/expected/failure_add_disable_node.out @@ -253,11 +253,3 @@ ORDER BY placementid; RESET SEARCH_PATH; DROP SCHEMA add_remove_node CASCADE; NOTICE: drop cascades to table add_remove_node.user_table -SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASCADE') -ORDER BY nodeport; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 9060 | t | DROP SCHEMA - localhost | 57637 | t | DROP SCHEMA -(2 rows) - diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index 13f6cdffa..3a88e0192 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -4,7 +4,9 @@ -- We have to keep two copies of this failure test -- because if the shards are created via the executor -- cancellations are processed, otherwise they are not +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA create_distributed_table_non_empty_failure; +SET citus.enable_ddl_propagation TO ON; SET search_path TO 'create_distributed_table_non_empty_failure'; SET citus.next_shard_id TO 11000000; SELECT citus.mitmproxy('conn.allow()'); @@ -100,13 +102,6 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata (localhost,57637,t,1) (2 rows) -SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_non_empty_failure$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,9060,t,"DROP SCHEMA") - (localhost,57637,t,"DROP SCHEMA") -(2 rows) - -- this triggers a schema creation which prevents further transactions around dependency propagation SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_create_reference_table.out b/src/test/regress/expected/failure_create_reference_table.out index bb9ce32f5..432290c4b 100644 --- a/src/test/regress/expected/failure_create_reference_table.out +++ b/src/test/regress/expected/failure_create_reference_table.out @@ -193,7 +193,9 @@ SELECT citus.mitmproxy('conn.allow()'); DROP TABLE ref_table; DROP SCHEMA failure_reference_table; +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA failure_reference_table; +SET citus.enable_ddl_propagation TO ON; CREATE TABLE ref_table(id int); INSERT INTO ref_table VALUES(1),(2),(3); -- Test in transaction diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index 14b3daa66..4a575ed19 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -1,7 +1,9 @@ -- -- failure_create_table adds failure tests for creating table without data. -- +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA failure_create_table; +SET citus.enable_ddl_propagation TO ON; SET search_path TO 'failure_create_table'; SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/fkeys_between_local_ref.out b/src/test/regress/expected/fkeys_between_local_ref.out index b142bd4d3..3df007cb3 100644 --- a/src/test/regress/expected/fkeys_between_local_ref.out +++ b/src/test/regress/expected/fkeys_between_local_ref.out @@ -332,6 +332,7 @@ BEGIN; ERROR: insert or update on table "local_table_5_1518073" violates foreign key constraint "local_table_5_col_1_fkey1_1518073" ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA another_schema_fkeys_between_local_ref; CREATE TABLE another_schema_fkeys_between_local_ref.local_table_6 (col_1 INT PRIMARY KEY); -- first convert local tables to citus local tables in graph @@ -376,6 +377,7 @@ BEGIN; ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE TABLE local_table_6 (col_1 INT PRIMARY KEY); -- first convert local tables to citus local tables in graph ALTER TABLE local_table_2 ADD CONSTRAINT fkey_11 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_1) ON DELETE CASCADE; diff --git a/src/test/regress/expected/grant_on_schema_propagation.out b/src/test/regress/expected/grant_on_schema_propagation.out index 96816714d..97c02f074 100644 --- a/src/test/regress/expected/grant_on_schema_propagation.out +++ b/src/test/regress/expected/grant_on_schema_propagation.out @@ -6,7 +6,9 @@ CREATE SCHEMA dist_schema; CREATE TABLE dist_schema.dist_table (id int); CREATE SCHEMA another_dist_schema; CREATE TABLE another_dist_schema.dist_table (id int); +SET citus.enable_ddl_propagation TO off; CREATE SCHEMA non_dist_schema; +SET citus.enable_ddl_propagation TO on; -- create roles on all nodes SELECT run_command_on_coordinator_and_workers('CREATE USER role_1'); NOTICE: not propagating CREATE ROLE/USER commands to worker nodes @@ -193,24 +195,9 @@ SET citus.enable_alter_role_propagation TO ON; ALTER ROLE role_1 NOSUPERUSER; SET citus.enable_alter_role_propagation TO OFF; DROP TABLE dist_schema.dist_table, another_dist_schema.dist_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA another_dist_schema'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA non_dist_schema'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - +DROP SCHEMA dist_schema; +DROP SCHEMA another_dist_schema; +DROP SCHEMA non_dist_schema; -- test if the grantors are propagated correctly -- first remove one of the worker nodes SET citus.shard_replication_factor TO 1; @@ -319,12 +306,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER \c - - - :master_port DROP TABLE grantor_schema.grantor_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA grantor_schema CASCADE'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - +DROP SCHEMA grantor_schema CASCADE; -- test distributing the schema with another user CREATE SCHEMA dist_schema; GRANT ALL ON SCHEMA dist_schema TO role_1 WITH GRANT OPTION; @@ -352,12 +334,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY \c - - - :master_port DROP TABLE dist_schema.dist_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema CASCADE'); - run_command_on_coordinator_and_workers ---------------------------------------------------------------------- - -(1 row) - +DROP SCHEMA dist_schema CASCADE; -- test grants on public schema -- first remove one of the worker nodes SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out index d036468b7..d8c1a5a74 100644 --- a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out +++ b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out @@ -1,6 +1,6 @@ Parsed test spec with 3 sessions -starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -164,22 +164,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -187,7 +171,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-public-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -357,22 +341,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -380,7 +348,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-public-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -550,22 +518,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -573,7 +525,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -739,22 +691,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -762,7 +698,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s1-add-worker s2-create-schema s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -934,22 +870,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -957,7 +877,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-table s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1129,22 +1049,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -1152,7 +1056,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s1-add-worker s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s2-create-schema s1-begin s2-begin s1-add-worker s2-create-table s1-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1324,22 +1228,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -1347,7 +1235,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-add-worker s2-create-schema s2-begin s3-begin s3-use-schema s2-create-table s3-create-table s2-commit s3-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1534,22 +1422,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -1557,7 +1429,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s3-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s3-begin s1-add-worker s2-create-schema s3-create-schema2 s2-create-table s3-create-table s1-commit s3-commit s2-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1753,22 +1625,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -1776,7 +1632,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-type s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-create-type s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -1932,22 +1788,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -1955,7 +1795,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-create-type s1-add-worker s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-create-type s1-add-worker s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2110,22 +1950,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -2133,7 +1957,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-type s2-create-table-with-type s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-begin s2-create-schema s2-create-type s2-create-table-with-type s1-add-worker s2-commit s1-commit s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2309,22 +2133,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -2332,7 +2140,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s1-add-worker s2-public-schema s2-distribute-function s1-commit s2-begin s2-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2508,22 +2316,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -2531,7 +2323,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s1-begin s2-public-schema s2-distribute-function s2-begin s2-commit s3-wait-for-metadata-sync s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2714,22 +2506,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- @@ -2737,7 +2513,7 @@ master_remove_node (2 rows) -starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas s3-drop-worker-schemas +starting permutation: s1-print-distributed-objects s2-begin s2-create-schema s2-distribute-function s2-commit s3-wait-for-metadata-sync s1-begin s1-add-worker s1-commit s3-wait-for-metadata-sync s2-print-distributed-objects s3-drop-coordinator-schemas ?column? --------------------------------------------------------------------- 1 @@ -2922,22 +2698,6 @@ step s3-drop-coordinator-schemas: DROP SCHEMA IF EXISTS myschema CASCADE; DROP SCHEMA IF EXISTS myschema2 CASCADE; -step s3-drop-worker-schemas: - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"DROP SCHEMA") -(localhost,57638,t,"DROP SCHEMA") -(2 rows) - master_remove_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_extension_commands.out b/src/test/regress/expected/isolation_extension_commands.out index 4c5bfe3f0..711c854c7 100644 --- a/src/test/regress/expected/isolation_extension_commands.out +++ b/src/test/regress/expected/isolation_extension_commands.out @@ -28,7 +28,7 @@ step s1-print: count --------------------------------------------------------------------- - 4 + 7 (1 row) extname|extversion|nspname @@ -89,7 +89,7 @@ step s1-print: count --------------------------------------------------------------------- - 4 + 7 (1 row) extname|extversion|nspname @@ -158,7 +158,7 @@ step s1-print: count --------------------------------------------------------------------- - 3 + 6 (1 row) extname|extversion|nspname @@ -214,7 +214,7 @@ step s1-print: count --------------------------------------------------------------------- - 5 + 7 (1 row) extname|extversion|nspname @@ -275,7 +275,7 @@ step s1-print: count --------------------------------------------------------------------- - 4 + 6 (1 row) extname|extversion|nspname diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 26d300e84..24aa7302f 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -5,6 +5,13 @@ SET citus.next_placement_id TO 1500000; -- supress notice messages to make sure that the tests -- do not diverge with enterprise SET client_min_messages TO WARNING; +SELECT run_command_on_workers($$CREATE ROLE metadata_sync_helper_role WITH LOGIN;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE ROLE") + (localhost,57638,t,"CREATE ROLE") +(2 rows) + CREATE ROLE metadata_sync_helper_role WITH LOGIN; GRANT ALL ON SCHEMA metadata_sync_helpers TO metadata_sync_helper_role; RESET client_min_messages; @@ -40,26 +47,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; --- connect back as super user, and then connect to the worker --- with the superuser to make sure we can ingest metadata with --- a regular user under the certain conditions \c - postgres - --- we don't need the table/schema anymore -SET client_min_messages TO ERROR; -DROP SCHEMA metadata_sync_helpers CASCADE; -DROP ROLE metadata_sync_helper_role; \c - - - :worker_1_port -CREATE SCHEMA metadata_sync_helpers; SET search_path TO metadata_sync_helpers; CREATE TABLE test(col_1 int, col_2 int); --- supress notice messages to make sure that the tests --- do not diverge with enterprise -SET client_min_messages TO WARNING; -SET citus.enable_ddl_propagation TO OFF; -CREATE ROLE metadata_sync_helper_role WITH LOGIN; -GRANT ALL ON SCHEMA metadata_sync_helpers TO metadata_sync_helper_role; -RESET client_min_messages; -RESET citus.enable_ddl_propagation; -- connect back with the regular user \c - metadata_sync_helper_role - :worker_1_port SET search_path TO metadata_sync_helpers; @@ -596,7 +587,7 @@ ROLLBACK; -- Now let's check valid pg_dist_object updates -- check with non-existing object type BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id --------------------------------------------------------------------- @@ -640,7 +631,7 @@ ERROR: colocationId must be a positive number ROLLBACK; -- check with non-existing object BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id --------------------------------------------------------------------- @@ -656,7 +647,7 @@ ROLLBACK; -- since citus_internal_add_object_metadata is strict function returns NULL -- if any parameter is NULL BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; - SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); + SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id --------------------------------------------------------------------- @@ -1364,4 +1355,7 @@ SET client_min_messages TO ERROR; SET citus.enable_ddl_propagation TO OFF; DROP OWNED BY metadata_sync_helper_role; DROP ROLE metadata_sync_helper_role; +\c - - - :master_port +-- cleanup +SET client_min_messages TO ERROR; DROP SCHEMA metadata_sync_helpers CASCADE; diff --git a/src/test/regress/expected/multi_deparse_function.out b/src/test/regress/expected/multi_deparse_function.out index cdf002e8e..b85078258 100644 --- a/src/test/regress/expected/multi_deparse_function.out +++ b/src/test/regress/expected/multi_deparse_function.out @@ -520,19 +520,11 @@ CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE (localhost,57638,t,"DROP FUNCTION") (2 rows) +set citus.enable_ddl_propagation to on; -- create schema with weird names CREATE SCHEMA "CiTuS.TeeN"; CREATE SCHEMA "CiTUS.TEEN2"; -SELECT run_command_on_workers($$ - CREATE SCHEMA IF NOT EXISTS "CiTuS.TeeN"; - CREATE SCHEMA IF NOT EXISTS "CiTUS.TEEN2"; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - +set citus.enable_ddl_propagation to off; -- create table with weird names CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() RETURNS TEXT AS $$ SELECT 'test function without params' $$ @@ -737,15 +729,4 @@ SET client_min_messages TO WARNING; -- suppress cascading objects dropping DROP SCHEMA "CiTuS.TeeN" CASCADE; DROP SCHEMA "CiTUS.TEEN2" CASCADE; DROP SCHEMA function_tests CASCADE; -SELECT run_command_on_workers($$ - DROP SCHEMA "CiTuS.TeeN" CASCADE; - DROP SCHEMA "CiTUS.TEEN2" CASCADE; - DROP SCHEMA function_tests CASCADE; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - DROP ROLE function_role; diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index 98e5437a6..ab0e62964 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -741,10 +741,3 @@ SELECT citus_remove_node('localhost', :master_port); (1 row) -SELECT run_command_on_workers($$ DROP SCHEMA IF EXISTS fix_idx_names CASCADE $$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - diff --git a/src/test/regress/expected/multi_foreign_key_relation_graph.out b/src/test/regress/expected/multi_foreign_key_relation_graph.out index 41de227c7..735cb0596 100644 --- a/src/test/regress/expected/multi_foreign_key_relation_graph.out +++ b/src/test/regress/expected/multi_foreign_key_relation_graph.out @@ -440,6 +440,7 @@ drop cascades to constraint fkey_1 on table test_4 ROLLBACK; -- Test schemas BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA fkey_intermediate_schema_1; CREATE SCHEMA fkey_intermediate_schema_2; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; @@ -526,6 +527,7 @@ drop cascades to constraint test_8_id_fkey on table test_8 ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA fkey_intermediate_schema_1; CREATE SCHEMA fkey_intermediate_schema_2; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index 01417a974..2c7be4c90 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -75,11 +75,6 @@ GRANT ALL ON TABLE test_1420000 TO full_access; GRANT SELECT ON TABLE test_1420000 TO read_access; GRANT ALL ON TABLE test_1420002 TO full_access; GRANT SELECT ON TABLE test_1420002 TO read_access; -CREATE SCHEMA full_access_user_schema; -REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC; -GRANT USAGE ON SCHEMA full_access_user_schema TO full_access; -GRANT ALL ON SCHEMA full_access_user_schema TO full_access; -GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access; \c - - - :worker_2_port CREATE USER full_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes @@ -102,11 +97,6 @@ GRANT ALL ON TABLE test_1420001 TO full_access; GRANT SELECT ON TABLE test_1420001 TO read_access; GRANT ALL ON TABLE test_1420003 TO full_access; GRANT SELECT ON TABLE test_1420003 TO read_access; -CREATE SCHEMA full_access_user_schema; -REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC; -GRANT USAGE ON SCHEMA full_access_user_schema TO full_access; -GRANT ALL ON SCHEMA full_access_user_schema TO full_access; -GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access; \c - - - :master_port SET citus.shard_replication_factor TO 1; -- create prepare tests diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index 92c43b363..ef67a626e 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -63,9 +63,6 @@ CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. --- create schema to test schema support -CREATE SCHEMA citus_mx_test_schema_join_1; -CREATE SCHEMA citus_mx_test_schema_join_2; -- create UDFs in worker node CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) RETURNS text AS @@ -101,9 +98,6 @@ CREATE OPERATOR citus_mx_test_schema.=== ( ); -- now create required stuff in the worker 2 \c - - - :worker_2_port --- create schema to test schema support -CREATE SCHEMA citus_mx_test_schema_join_1; -CREATE SCHEMA citus_mx_test_schema_join_2; -- create UDF CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) RETURNS text AS diff --git a/src/test/regress/expected/multi_mx_ddl.out b/src/test/regress/expected/multi_mx_ddl.out index 14318c3d8..e6f3f6e0f 100644 --- a/src/test/regress/expected/multi_mx_ddl.out +++ b/src/test/regress/expected/multi_mx_ddl.out @@ -304,13 +304,6 @@ SELECT * FROM seg_test; \c - - - :master_port CREATE SCHEMA ext_owned_tables; -SELECT run_command_on_workers($$CREATE SCHEMA ext_owned_tables;$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - SET search_path TO ext_owned_tables; CREATE sequence my_seq_ext_1; SELECT run_command_on_workers($$CREATE sequence ext_owned_tables.my_seq_ext_1;$$); diff --git a/src/test/regress/expected/multi_mx_schema_support.out b/src/test/regress/expected/multi_mx_schema_support.out index bc41b5c24..71dfde3fd 100644 --- a/src/test/regress/expected/multi_mx_schema_support.out +++ b/src/test/regress/expected/multi_mx_schema_support.out @@ -444,11 +444,9 @@ ALTER TABLE "CiTuS.TeAeN"."TeeNTabLE.1!?!" ADD COLUMN new_col INT; -- set the search_path to not existing schema SET search_path TO not_existing_schema; ALTER TABLE "CiTuS.TeAeN"."TeeNTabLE.1!?!" DROP COLUMN new_col; +SET client_min_messages TO ERROR; DROP SCHEMA mx_ddl_schema_1, mx_ddl_schema_2, "CiTuS.TeAeN" CASCADE; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to table "CiTuS.TeAeN"."TeeNTabLE.1!?!" -drop cascades to table mx_ddl_schema_2.table_2 -drop cascades to table mx_ddl_schema_1.table_1 +RESET client_min_messages; -- test if ALTER TABLE SET SCHEMA sets the original table in the worker SET search_path TO public; CREATE SCHEMA mx_old_schema; @@ -462,11 +460,13 @@ SELECT create_distributed_table('mx_old_schema.table_set_schema', 'id'); CREATE SCHEMA mx_new_schema; SELECT objid::oid::regnamespace as "Distributed Schemas" FROM citus.pg_dist_object - WHERE objid::oid::regnamespace IN ('mx_old_schema', 'mx_new_schema'); + WHERE objid::oid::regnamespace IN ('mx_old_schema', 'mx_new_schema') + ORDER BY "Distributed Schemas"; Distributed Schemas --------------------------------------------------------------------- mx_old_schema -(1 row) + mx_new_schema +(2 rows) \c - - - :worker_1_port SELECT table_schema AS "Table's Schema" FROM information_schema.tables WHERE table_name='table_set_schema'; diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index b23c609ec..ee4879edf 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -941,7 +941,8 @@ SELECT objid::oid::regnamespace as "Distributed Schemas" Distributed Schemas --------------------------------------------------------------------- old_schema -(1 row) + new_schema +(2 rows) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema" @@ -999,7 +1000,8 @@ SELECT objid::oid::regnamespace as "Distributed Schemas" WHERE objid='new_schema'::regnamespace::oid; Distributed Schemas --------------------------------------------------------------------- -(0 rows) + new_schema +(1 row) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema" @@ -1065,7 +1067,8 @@ SELECT objid::oid::regnamespace as "Distributed Schemas" Distributed Schemas --------------------------------------------------------------------- old_schema -(1 row) + new_schema +(2 rows) \c - - - :worker_1_port SELECT table_schema AS "Shards' Schema", COUNT(*) AS "Counts" @@ -1426,7 +1429,7 @@ BEGIN; ALTER SCHEMA bar RENAME TO foo; ERROR: cannot create or modify schema because there was a parallel operation on a distributed table in the transaction -DETAIL: When creating or altering a schema, Citus needs to perform all operations over a single connection per node to ensure consistency. +DETAIL: When creating, altering, or dropping a schema, Citus needs to perform all operations over a single connection per node to ensure consistency. HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" ROLLBACK; BEGIN; @@ -1441,10 +1444,18 @@ BEGIN; ROLLBACK; -- Clean up the created schema SET client_min_messages TO WARNING; -DROP SCHEMA run_test_schema CASCADE; -DROP SCHEMA test_schema_support_join_1 CASCADE; -DROP SCHEMA test_schema_support_join_2 CASCADE; -DROP SCHEMA "Citus'Teen123" CASCADE; -DROP SCHEMA "CiTUS.TEEN2" CASCADE; -DROP SCHEMA bar CASCADE; -DROP SCHEMA test_schema_support CASCADE; +SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object + WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema'); + pg_identify_object_as_address +--------------------------------------------------------------------- + (schema,{run_test_schema},{}) +(1 row) + +DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE; +-- verify that the dropped schema is removed from worker's pg_dist_object +SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object + WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema'); + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + diff --git a/src/test/regress/expected/multi_sequence_default.out b/src/test/regress/expected/multi_sequence_default.out index 7f3c23673..27f083e92 100644 --- a/src/test/regress/expected/multi_sequence_default.out +++ b/src/test/regress/expected/multi_sequence_default.out @@ -370,70 +370,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); DROP SEQUENCE sequence_default_0.sequence_3 CASCADE; NOTICE: drop cascades to default value for column y of table seq_test_3 DROP SCHEMA sequence_default_0; --- DROP SCHEMA problem: expected since we don't propagate DROP SCHEMA -CREATE TABLE seq_test_5 (x int, y int); -SELECT create_distributed_table('seq_test_5','x'); - create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -CREATE SCHEMA sequence_default_1; -CREATE SEQUENCE sequence_default_1.seq_5; -ALTER TABLE seq_test_5 ADD COLUMN a bigint DEFAULT nextval('sequence_default_1.seq_5'); -DROP SCHEMA sequence_default_1 CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to sequence sequence_default_1.seq_5 -drop cascades to default value for column a of table seq_test_5 --- sequence is gone from coordinator -INSERT INTO seq_test_5 VALUES (1, 2) RETURNING *; - x | y | a ---------------------------------------------------------------------- - 1 | 2 | -(1 row) - --- but is still present on worker -\c - - - :worker_1_port -INSERT INTO sequence_default.seq_test_5 VALUES (1, 2) RETURNING *; - x | y | a ---------------------------------------------------------------------- - 1 | 2 | 281474976710657 -(1 row) - -\c - - - :master_port -SET citus.shard_replication_factor TO 1; -SET search_path = sequence_default, public; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - --- apply workaround -SELECT run_command_on_workers('DROP SCHEMA sequence_default_1 CASCADE'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - --- now the sequence is gone from the worker as well -\c - - - :worker_1_port -INSERT INTO sequence_default.seq_test_5 VALUES (1, 2) RETURNING *; - x | y | a ---------------------------------------------------------------------- - 1 | 2 | -(1 row) - -\c - - - :master_port -SET citus.shard_replication_factor TO 1; -SET search_path = sequence_default, public; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - start_metadata_sync_to_node ---------------------------------------------------------------------- - -(1 row) - -- check some more complex cases CREATE SEQUENCE seq_6; CREATE TABLE seq_test_6 (x int, t timestamptz DEFAULT now(), s int DEFAULT nextval('seq_6'), m int) PARTITION BY RANGE (t); @@ -569,14 +505,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); (1 row) -DROP SCHEMA sequence_default_8; -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default_8 CASCADE'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - +DROP SCHEMA sequence_default_8 CASCADE; -- cannot use more than one sequence in a column default CREATE SEQUENCE seq_9; CREATE SEQUENCE seq_10; @@ -914,17 +843,6 @@ ERROR: nextval: reached maximum value of sequence "seq_14" (32767) -- table with default column is added CREATE SCHEMA test_schema_for_sequence_default_propagation; CREATE SEQUENCE test_schema_for_sequence_default_propagation.seq_10; --- Both should return 0 rows -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation.seq_10'::regclass); - pg_identify_object_as_address ---------------------------------------------------------------------- -(0 rows) - -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation'::regnamespace); - pg_identify_object_as_address ---------------------------------------------------------------------- -(0 rows) - -- Create distributed table with default column to propagate dependencies CREATE TABLE test_seq_dist(a int, x BIGINT DEFAULT nextval('test_schema_for_sequence_default_propagation.seq_10')); SELECT create_distributed_table('test_seq_dist', 'a'); @@ -998,13 +916,6 @@ DROP TABLE test_seq_dist; DROP TABLE sequence_default.seq_test_7_par; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA sequence_default CASCADE; -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default CASCADE'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP SCHEMA") - (localhost,57638,t,"DROP SCHEMA") -(2 rows) - SELECT master_remove_node('localhost', :master_port); master_remove_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/mx_regular_user.out b/src/test/regress/expected/mx_regular_user.out index 2538929cc..9b60132e0 100644 --- a/src/test/regress/expected/mx_regular_user.out +++ b/src/test/regress/expected/mx_regular_user.out @@ -174,16 +174,6 @@ ERROR: unsupported lockmode 8 \c - postgres - :master_port; SET client_min_messages TO ERROR; DROP SCHEMA "Mx Super User" CASCADE; -\c - postgres - :worker_1_port; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO OFF; -CREATE SCHEMA "Mx Regular User"; -GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; -\c - postgres - :worker_2_port; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO OFF; -CREATE SCHEMA "Mx Regular User"; -GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; -- now connect with that user \c - regular_mx_user - :master_port SET search_path TO "Mx Regular User"; diff --git a/src/test/regress/expected/non_super_user_object_metadata.out b/src/test/regress/expected/non_super_user_object_metadata.out index 0ff03c0b8..0923d873a 100644 --- a/src/test/regress/expected/non_super_user_object_metadata.out +++ b/src/test/regress/expected/non_super_user_object_metadata.out @@ -412,7 +412,7 @@ SELECT * FROM run_command_on_workers($$ SELECT distribution_argument_index FROM localhost | 57638 | t | 1 (2 rows) --- Show that dropping schema doesn't affect the worker node +-- Show that the schema is dropped on worker node as well DROP SCHEMA local_schema CASCADE; NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to function plpgsql_dist_function(text) @@ -426,8 +426,8 @@ SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as SELECT * FROM run_command_on_workers($$SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{local_schema}%';$$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | (schema,{local_schema},{}) - localhost | 57638 | t | (schema,{local_schema},{}) + localhost | 57637 | t | + localhost | 57638 | t | (2 rows) -- Show that extension and dependent sequence also created and marked as distributed diff --git a/src/test/regress/expected/object_propagation_debug.out b/src/test/regress/expected/object_propagation_debug.out index 248ec4fd4..020fa2629 100644 --- a/src/test/regress/expected/object_propagation_debug.out +++ b/src/test/regress/expected/object_propagation_debug.out @@ -81,22 +81,5 @@ ON TRUE --------------------------------------------------------------------- (0 rows) --- find non-distributed dependencies of the local table test2 -CREATE SCHEMA objectprop2; -create table objectprop2.test2(a int, b t1); -SET search_path TO objectprop2; -SELECT - pg_identify_object(t.classid, t.objid, t.objsubid) -FROM - (SELECT * FROM pg_get_object_address('table', '{test2}', '{}')) as addr -JOIN LATERAL - "object prop".citus_get_dependencies_for_object(addr.classid, addr.objid, addr.objsubid) as t(classid oid, objid oid, objsubid int) -ON TRUE - ORDER BY 1; - pg_identify_object ---------------------------------------------------------------------- - (schema,,objectprop2,objectprop2) -(1 row) - SET client_min_messages TO ERROR; -DROP SCHEMA "object prop", objectprop2 CASCADE; +DROP SCHEMA "object prop" CASCADE; diff --git a/src/test/regress/expected/subquery_prepared_statements.out b/src/test/regress/expected/subquery_prepared_statements.out index 91e5d6642..29e76db03 100644 --- a/src/test/regress/expected/subquery_prepared_statements.out +++ b/src/test/regress/expected/subquery_prepared_statements.out @@ -2,13 +2,6 @@ -- test recursive planning functionality on prepared statements -- =================================================================== CREATE SCHEMA subquery_prepared_statements; -SELECT run_command_on_workers('CREATE SCHEMA subquery_prepared_statements;'); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE SCHEMA") - (localhost,57638,t,"CREATE SCHEMA") -(2 rows) - SET search_path TO subquery_prepared_statements, public; CREATE TYPE subquery_prepared_statements.xy AS (x int, y int); SET client_min_messages TO DEBUG1; diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index 659000306..fb719a243 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -645,10 +645,6 @@ SELECT create_distributed_table('table_without_sequence', 'a'); CREATE SCHEMA test_schema_for_sequence_propagation; CREATE SEQUENCE test_schema_for_sequence_propagation.seq_10; --- Both should have zero rows -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace); - ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_schema_for_sequence_propagation.seq_10'); -- Should be distributed along with the sequence diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index db44baa61..1a071da8a 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -268,7 +268,6 @@ test: multi_foreign_key_relation_graph # and rerun some of the tests. # -------- test: add_coordinator -test: foreign_key_to_reference_table test: replicate_reference_tables_to_coordinator test: citus_local_tables test: mixed_relkind_tests diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index 6c2b04289..f1132e9d2 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -1269,17 +1269,6 @@ SELECT create_distributed_table('table_without_sequence', 'a'); CREATE SCHEMA test_schema_for_sequence_propagation; CREATE SEQUENCE test_schema_for_sequence_propagation.seq_10; --- Both should have zero rows -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); - pg_identify_object_as_address -------------------------------- -(0 rows) - -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace); - pg_identify_object_as_address -------------------------------- -(0 rows) - ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_schema_for_sequence_propagation.seq_10'); -- Should be distributed along with the sequence SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); diff --git a/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec b/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec index c8aad9a79..f46f80753 100644 --- a/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec +++ b/src/test/regress/spec/isolation_ensure_dependency_activate_node.spec @@ -188,40 +188,34 @@ step "s3-drop-coordinator-schemas" DROP SCHEMA IF EXISTS myschema2 CASCADE; } -step "s3-drop-worker-schemas" -{ - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema CASCADE;$$); - SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS myschema2 CASCADE;$$); -} - // schema only tests -permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-public-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-public-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-schema" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-table" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // concurrency tests with multi schema distribution -permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s3-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" +permutation "s1-print-distributed-objects" "s2-create-schema" "s1-begin" "s2-begin" "s1-add-worker" "s2-create-table" "s1-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-add-worker" "s2-create-schema" "s2-begin" "s3-begin" "s3-use-schema" "s2-create-table" "s3-create-table" "s2-commit" "s3-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s3-begin" "s1-add-worker" "s2-create-schema" "s3-create-schema2" "s2-create-table" "s3-create-table" "s1-commit" "s3-commit" "s2-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // type and schema tests -permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-create-type" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-create-type" "s1-add-worker" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-begin" "s2-create-schema" "s2-create-type" "s2-create-table-with-type" "s1-add-worker" "s2-commit" "s1-commit" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // distributed function tests // isolation tests are not very simple psql, so trigger NOTIFY reliably for // s3-wait-for-metadata-sync step, we do "s2-begin" followed directly by // "s2-commit", because "COMMIT" syncs the messages -permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" -permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s1-add-worker" "s2-public-schema" "s2-distribute-function" "s1-commit" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" +permutation "s1-print-distributed-objects" "s1-begin" "s2-public-schema" "s2-distribute-function" "s2-begin" "s2-commit" "s3-wait-for-metadata-sync" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" // we cannot run the following operations concurrently // the problem is that NOTIFY event doesn't (reliably) happen before COMMIT // so we have to commit s2 before s1 starts -permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" "s3-drop-worker-schemas" +permutation "s1-print-distributed-objects" "s2-begin" "s2-create-schema" "s2-distribute-function" "s2-commit" "s3-wait-for-metadata-sync" "s1-begin" "s1-add-worker" "s1-commit" "s3-wait-for-metadata-sync" "s2-print-distributed-objects" "s3-drop-coordinator-schemas" diff --git a/src/test/regress/sql/citus_local_table_triggers.sql b/src/test/regress/sql/citus_local_table_triggers.sql index 76b192388..9bb1ee81d 100644 --- a/src/test/regress/sql/citus_local_table_triggers.sql +++ b/src/test/regress/sql/citus_local_table_triggers.sql @@ -160,8 +160,9 @@ CREATE TRIGGER "trigger\'name" BEFORE INSERT ON "interesting!schema"."citus_local!_table" FOR EACH STATEMENT EXECUTE FUNCTION dummy_function(); +CREATE EXTENSION seg; + BEGIN; - CREATE EXTENSION seg; -- ALTER TRIGGER DEPENDS ON ALTER TRIGGER "trigger\'name" ON "interesting!schema"."citus_local!_table" DEPENDS ON EXTENSION seg; @@ -384,4 +385,5 @@ BEGIN; ROLLBACK; -- cleanup at exit +SET client_min_messages TO ERROR; DROP SCHEMA citus_local_table_triggers, "interesting!schema" CASCADE; diff --git a/src/test/regress/sql/columnar_truncate.sql b/src/test/regress/sql/columnar_truncate.sql index f9532c00d..9cdc44d55 100644 --- a/src/test/regress/sql/columnar_truncate.sql +++ b/src/test/regress/sql/columnar_truncate.sql @@ -128,6 +128,7 @@ set columnar.compression = 'pglz'; INSERT INTO truncate_schema.truncate_tbl SELECT generate_series(1, 100); set columnar.compression to default; -- create a user that can not truncate +SELECT run_command_on_workers($$CREATE USER truncate_user;$$); CREATE USER truncate_user; GRANT USAGE ON SCHEMA truncate_schema TO truncate_user; GRANT SELECT ON TABLE truncate_schema.truncate_tbl TO truncate_user; diff --git a/src/test/regress/sql/distributed_collations.sql b/src/test/regress/sql/distributed_collations.sql index 2bf85448a..8b2bffc7e 100644 --- a/src/test/regress/sql/distributed_collations.sql +++ b/src/test/regress/sql/distributed_collations.sql @@ -91,20 +91,5 @@ ORDER BY 1,2,3; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA collation_tests CASCADE; DROP SCHEMA collation_tests2 CASCADE; - --- This is hacky, but we should clean-up the resources as below - -\c - - - :worker_1_port -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA collation_tests CASCADE; -DROP SCHEMA collation_tests2 CASCADE; - -\c - - - :worker_2_port -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA collation_tests CASCADE; -DROP SCHEMA collation_tests2 CASCADE; - -\c - - - :master_port - DROP USER collationuser; SELECT run_command_on_workers($$DROP USER collationuser;$$); diff --git a/src/test/regress/sql/distributed_collations_conflict.sql b/src/test/regress/sql/distributed_collations_conflict.sql index c84e241a4..eceee4dd6 100644 --- a/src/test/regress/sql/distributed_collations_conflict.sql +++ b/src/test/regress/sql/distributed_collations_conflict.sql @@ -1,5 +1,4 @@ CREATE SCHEMA collation_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA collation_conflict;$$); \c - - - :worker_1_port SET search_path TO collation_conflict; diff --git a/src/test/regress/sql/distributed_functions.sql b/src/test/regress/sql/distributed_functions.sql index ce39d50c6..97a97fd9d 100644 --- a/src/test/regress/sql/distributed_functions.sql +++ b/src/test/regress/sql/distributed_functions.sql @@ -670,16 +670,9 @@ SELECT stop_metadata_sync_to_node(nodename,nodeport) FROM pg_dist_node WHERE isa \c - - - :worker_1_port UPDATE pg_dist_local_group SET groupid = 0; TRUNCATE pg_dist_node; -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA function_tests CASCADE; -DROP SCHEMA function_tests2 CASCADE; -SET search_path TO function_tests, function_tests2; \c - - - :worker_2_port UPDATE pg_dist_local_group SET groupid = 0; TRUNCATE pg_dist_node; -SET client_min_messages TO error; -- suppress cascading objects dropping -DROP SCHEMA function_tests CASCADE; -DROP SCHEMA function_tests2 CASCADE; \c - - - :master_port SET client_min_messages TO ERROR; diff --git a/src/test/regress/sql/distributed_functions_conflict.sql b/src/test/regress/sql/distributed_functions_conflict.sql index ebbb6c6aa..b4877880f 100644 --- a/src/test/regress/sql/distributed_functions_conflict.sql +++ b/src/test/regress/sql/distributed_functions_conflict.sql @@ -2,7 +2,6 @@ -- Note in PG12 we use CREATE OR REPLACE AGGREGATE, thus the renaming does not occur CREATE SCHEMA proc_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA proc_conflict;$$); \c - - - :worker_1_port SET search_path TO proc_conflict; diff --git a/src/test/regress/sql/distributed_procedure.sql b/src/test/regress/sql/distributed_procedure.sql index f8c2ac7b5..e8819a900 100644 --- a/src/test/regress/sql/distributed_procedure.sql +++ b/src/test/regress/sql/distributed_procedure.sql @@ -79,9 +79,7 @@ SELECT * FROM run_command_on_workers($$CALL procedure_tests.raise_info('hello'); SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA procedure_tests CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA procedure_tests CASCADE;$$); DROP SCHEMA procedure_tests2 CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA procedure_tests2 CASCADE;$$); DROP USER procedureuser; SELECT 1 FROM run_command_on_workers($$DROP USER procedureuser;$$); diff --git a/src/test/regress/sql/distributed_types.sql b/src/test/regress/sql/distributed_types.sql index 5448fb181..a136e2fa8 100644 --- a/src/test/regress/sql/distributed_types.sql +++ b/src/test/regress/sql/distributed_types.sql @@ -284,8 +284,6 @@ SELECT * FROM field_indirection_test_2 ORDER BY 1,2,3; -- clear objects SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA type_tests CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA type_tests CASCADE;$$); DROP SCHEMA type_tests2 CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA type_tests2 CASCADE;$$); DROP USER typeuser; SELECT run_command_on_workers($$DROP USER typeuser;$$); diff --git a/src/test/regress/sql/distributed_types_conflict.sql b/src/test/regress/sql/distributed_types_conflict.sql index d5f5da761..4b220b088 100644 --- a/src/test/regress/sql/distributed_types_conflict.sql +++ b/src/test/regress/sql/distributed_types_conflict.sql @@ -1,7 +1,6 @@ SET citus.next_shard_id TO 20020000; CREATE SCHEMA type_conflict; -SELECT run_command_on_workers($$CREATE SCHEMA type_conflict;$$); -- create a type on a worker that should not cause data loss once overwritten with a type -- from the coordinator diff --git a/src/test/regress/sql/distributed_types_xact_add_enum_value.sql b/src/test/regress/sql/distributed_types_xact_add_enum_value.sql index e9020f140..c3eb19beb 100644 --- a/src/test/regress/sql/distributed_types_xact_add_enum_value.sql +++ b/src/test/regress/sql/distributed_types_xact_add_enum_value.sql @@ -32,4 +32,3 @@ SELECT run_command_on_workers($$SELECT string_agg(enumlabel, ',' ORDER BY enumso -- clear objects SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA xact_enum_type CASCADE; -SELECT run_command_on_workers($$DROP SCHEMA xact_enum_type CASCADE;$$); diff --git a/src/test/regress/sql/drop_partitioned_table.sql b/src/test/regress/sql/drop_partitioned_table.sql index a3d5e9752..fc6e3ac4d 100644 --- a/src/test/regress/sql/drop_partitioned_table.sql +++ b/src/test/regress/sql/drop_partitioned_table.sql @@ -53,7 +53,6 @@ WHERE n.nspname IN ('drop_partitioned_table', 'schema1') ORDER BY 1, 2; \c - - - :worker_1_port -CREATE SCHEMA drop_partitioned_table; SET search_path = drop_partitioned_table; CREATE VIEW tables_info AS SELECT n.nspname as "Schema", @@ -240,5 +239,4 @@ DROP TABLE parent, child1; ROLLBACK; DROP SCHEMA drop_partitioned_table CASCADE; -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS drop_partitioned_table CASCADE'); SET search_path TO public; diff --git a/src/test/regress/sql/failure_add_disable_node.sql b/src/test/regress/sql/failure_add_disable_node.sql index 1aa8f9526..42d6e1adb 100644 --- a/src/test/regress/sql/failure_add_disable_node.sql +++ b/src/test/regress/sql/failure_add_disable_node.sql @@ -124,5 +124,3 @@ ORDER BY placementid; RESET SEARCH_PATH; DROP SCHEMA add_remove_node CASCADE; -SELECT * FROM run_command_on_workers('DROP SCHEMA IF EXISTS add_remove_node CASCADE') -ORDER BY nodeport; diff --git a/src/test/regress/sql/failure_create_distributed_table_non_empty.sql b/src/test/regress/sql/failure_create_distributed_table_non_empty.sql index 29dc7a2d7..946903cd0 100644 --- a/src/test/regress/sql/failure_create_distributed_table_non_empty.sql +++ b/src/test/regress/sql/failure_create_distributed_table_non_empty.sql @@ -6,7 +6,9 @@ -- because if the shards are created via the executor -- cancellations are processed, otherwise they are not +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA create_distributed_table_non_empty_failure; +SET citus.enable_ddl_propagation TO ON; SET search_path TO 'create_distributed_table_non_empty_failure'; SET citus.next_shard_id TO 11000000; @@ -44,7 +46,6 @@ SELECT citus.mitmproxy('conn.onQuery(query="^CREATE SCHEMA").cancel(' || pg_bac SELECT create_distributed_table('test_table', 'id'); SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.schemata WHERE schema_name = 'create_distributed_table_non_empty_failure'$$); -SELECT run_command_on_workers($$DROP SCHEMA IF EXISTS create_distributed_table_non_empty_failure$$); -- this triggers a schema creation which prevents further transactions around dependency propagation SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/sql/failure_create_reference_table.sql b/src/test/regress/sql/failure_create_reference_table.sql index 36fa10a93..21552538b 100644 --- a/src/test/regress/sql/failure_create_reference_table.sql +++ b/src/test/regress/sql/failure_create_reference_table.sql @@ -81,7 +81,9 @@ SET client_min_messages TO NOTICE; SELECT citus.mitmproxy('conn.allow()'); DROP TABLE ref_table; DROP SCHEMA failure_reference_table; +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA failure_reference_table; +SET citus.enable_ddl_propagation TO ON; CREATE TABLE ref_table(id int); INSERT INTO ref_table VALUES(1),(2),(3); diff --git a/src/test/regress/sql/failure_create_table.sql b/src/test/regress/sql/failure_create_table.sql index a4035b431..968d44bc4 100644 --- a/src/test/regress/sql/failure_create_table.sql +++ b/src/test/regress/sql/failure_create_table.sql @@ -2,7 +2,9 @@ -- failure_create_table adds failure tests for creating table without data. -- +SET citus.enable_ddl_propagation TO OFF; CREATE SCHEMA failure_create_table; +SET citus.enable_ddl_propagation TO ON; SET search_path TO 'failure_create_table'; SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/sql/fkeys_between_local_ref.sql b/src/test/regress/sql/fkeys_between_local_ref.sql index 6afce7f3f..a04040474 100644 --- a/src/test/regress/sql/fkeys_between_local_ref.sql +++ b/src/test/regress/sql/fkeys_between_local_ref.sql @@ -247,6 +247,7 @@ BEGIN; ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA another_schema_fkeys_between_local_ref; CREATE TABLE another_schema_fkeys_between_local_ref.local_table_6 (col_1 INT PRIMARY KEY); @@ -276,6 +277,7 @@ BEGIN; ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE TABLE local_table_6 (col_1 INT PRIMARY KEY); -- first convert local tables to citus local tables in graph ALTER TABLE local_table_2 ADD CONSTRAINT fkey_11 FOREIGN KEY (col_1) REFERENCES reference_table_1(col_1) ON DELETE CASCADE; diff --git a/src/test/regress/sql/grant_on_schema_propagation.sql b/src/test/regress/sql/grant_on_schema_propagation.sql index 85a92c2b6..cf8832309 100644 --- a/src/test/regress/sql/grant_on_schema_propagation.sql +++ b/src/test/regress/sql/grant_on_schema_propagation.sql @@ -7,7 +7,9 @@ CREATE SCHEMA dist_schema; CREATE TABLE dist_schema.dist_table (id int); CREATE SCHEMA another_dist_schema; CREATE TABLE another_dist_schema.dist_table (id int); +SET citus.enable_ddl_propagation TO off; CREATE SCHEMA non_dist_schema; +SET citus.enable_ddl_propagation TO on; -- create roles on all nodes SELECT run_command_on_coordinator_and_workers('CREATE USER role_1'); @@ -89,9 +91,9 @@ ALTER ROLE role_1 NOSUPERUSER; SET citus.enable_alter_role_propagation TO OFF; DROP TABLE dist_schema.dist_table, another_dist_schema.dist_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema'); -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA another_dist_schema'); -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA non_dist_schema'); +DROP SCHEMA dist_schema; +DROP SCHEMA another_dist_schema; +DROP SCHEMA non_dist_schema; -- test if the grantors are propagated correctly -- first remove one of the worker nodes @@ -157,7 +159,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'grantor_schema' ORDER \c - - - :master_port DROP TABLE grantor_schema.grantor_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA grantor_schema CASCADE'); +DROP SCHEMA grantor_schema CASCADE; -- test distributing the schema with another user CREATE SCHEMA dist_schema; @@ -175,7 +177,7 @@ SELECT nspname, nspacl FROM pg_namespace WHERE nspname = 'dist_schema' ORDER BY \c - - - :master_port DROP TABLE dist_schema.dist_table; -SELECT run_command_on_coordinator_and_workers('DROP SCHEMA dist_schema CASCADE'); +DROP SCHEMA dist_schema CASCADE; -- test grants on public schema -- first remove one of the worker nodes diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index 8e7f13ce5..1ab302ba2 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -6,6 +6,7 @@ SET citus.next_placement_id TO 1500000; -- supress notice messages to make sure that the tests -- do not diverge with enterprise SET client_min_messages TO WARNING; +SELECT run_command_on_workers($$CREATE ROLE metadata_sync_helper_role WITH LOGIN;$$); CREATE ROLE metadata_sync_helper_role WITH LOGIN; GRANT ALL ON SCHEMA metadata_sync_helpers TO metadata_sync_helper_role; RESET client_min_messages; @@ -31,33 +32,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; - --- connect back as super user, and then connect to the worker --- with the superuser to make sure we can ingest metadata with --- a regular user under the certain conditions \c - postgres - - --- we don't need the table/schema anymore -SET client_min_messages TO ERROR; -DROP SCHEMA metadata_sync_helpers CASCADE; -DROP ROLE metadata_sync_helper_role; - \c - - - :worker_1_port -CREATE SCHEMA metadata_sync_helpers; SET search_path TO metadata_sync_helpers; CREATE TABLE test(col_1 int, col_2 int); --- supress notice messages to make sure that the tests --- do not diverge with enterprise -SET client_min_messages TO WARNING; -SET citus.enable_ddl_propagation TO OFF; -CREATE ROLE metadata_sync_helper_role WITH LOGIN; -GRANT ALL ON SCHEMA metadata_sync_helpers TO metadata_sync_helper_role; -RESET client_min_messages; -RESET citus.enable_ddl_propagation; - -- connect back with the regular user \c - metadata_sync_helper_role - :worker_1_port SET search_path TO metadata_sync_helpers; @@ -875,4 +856,8 @@ SET client_min_messages TO ERROR; SET citus.enable_ddl_propagation TO OFF; DROP OWNED BY metadata_sync_helper_role; DROP ROLE metadata_sync_helper_role; + +\c - - - :master_port +-- cleanup +SET client_min_messages TO ERROR; DROP SCHEMA metadata_sync_helpers CASCADE; diff --git a/src/test/regress/sql/multi_deparse_function.sql b/src/test/regress/sql/multi_deparse_function.sql index ba823f669..1574ff22f 100644 --- a/src/test/regress/sql/multi_deparse_function.sql +++ b/src/test/regress/sql/multi_deparse_function.sql @@ -258,14 +258,11 @@ SELECT deparse_and_run_on_workers($cmd$ DROP FUNCTION IF EXISTS missing_func_without_args; $cmd$); +set citus.enable_ddl_propagation to on; -- create schema with weird names CREATE SCHEMA "CiTuS.TeeN"; CREATE SCHEMA "CiTUS.TEEN2"; - -SELECT run_command_on_workers($$ - CREATE SCHEMA IF NOT EXISTS "CiTuS.TeeN"; - CREATE SCHEMA IF NOT EXISTS "CiTUS.TEEN2"; -$$); +set citus.enable_ddl_propagation to off; -- create table with weird names CREATE FUNCTION "CiTuS.TeeN"."TeeNFunCT10N.1!?!"() RETURNS TEXT @@ -387,10 +384,4 @@ DROP SCHEMA "CiTuS.TeeN" CASCADE; DROP SCHEMA "CiTUS.TEEN2" CASCADE; DROP SCHEMA function_tests CASCADE; -SELECT run_command_on_workers($$ - DROP SCHEMA "CiTuS.TeeN" CASCADE; - DROP SCHEMA "CiTUS.TEEN2" CASCADE; - DROP SCHEMA function_tests CASCADE; -$$); - DROP ROLE function_role; diff --git a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql index 8604480a0..3ee453074 100644 --- a/src/test/regress/sql/multi_fix_partition_shard_index_names.sql +++ b/src/test/regress/sql/multi_fix_partition_shard_index_names.sql @@ -342,4 +342,3 @@ ALTER TABLE parent_table DROP CONSTRAINT unique_cst CASCADE; DROP SCHEMA fix_idx_names CASCADE; SELECT citus_remove_node('localhost', :master_port); -SELECT run_command_on_workers($$ DROP SCHEMA IF EXISTS fix_idx_names CASCADE $$); diff --git a/src/test/regress/sql/multi_foreign_key_relation_graph.sql b/src/test/regress/sql/multi_foreign_key_relation_graph.sql index 818b5b34f..4adf9bae4 100644 --- a/src/test/regress/sql/multi_foreign_key_relation_graph.sql +++ b/src/test/regress/sql/multi_foreign_key_relation_graph.sql @@ -161,6 +161,7 @@ ROLLBACK; -- Test schemas BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA fkey_intermediate_schema_1; CREATE SCHEMA fkey_intermediate_schema_2; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; @@ -192,6 +193,7 @@ BEGIN; ROLLBACK; BEGIN; + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; CREATE SCHEMA fkey_intermediate_schema_1; CREATE SCHEMA fkey_intermediate_schema_2; SET search_path TO fkey_graph, fkey_intermediate_schema_1, fkey_intermediate_schema_2; diff --git a/src/test/regress/sql/multi_multiuser.sql b/src/test/regress/sql/multi_multiuser.sql index aed7fe20b..d22fae1ec 100644 --- a/src/test/regress/sql/multi_multiuser.sql +++ b/src/test/regress/sql/multi_multiuser.sql @@ -54,12 +54,6 @@ GRANT SELECT ON TABLE test_1420000 TO read_access; GRANT ALL ON TABLE test_1420002 TO full_access; GRANT SELECT ON TABLE test_1420002 TO read_access; -CREATE SCHEMA full_access_user_schema; -REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC; -GRANT USAGE ON SCHEMA full_access_user_schema TO full_access; -GRANT ALL ON SCHEMA full_access_user_schema TO full_access; -GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access; - \c - - - :worker_2_port CREATE USER full_access; CREATE USER usage_access; @@ -75,12 +69,6 @@ GRANT SELECT ON TABLE test_1420001 TO read_access; GRANT ALL ON TABLE test_1420003 TO full_access; GRANT SELECT ON TABLE test_1420003 TO read_access; -CREATE SCHEMA full_access_user_schema; -REVOKE ALL ON SCHEMA full_access_user_schema FROM PUBLIC; -GRANT USAGE ON SCHEMA full_access_user_schema TO full_access; -GRANT ALL ON SCHEMA full_access_user_schema TO full_access; -GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access; - \c - - - :master_port SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql index be391318a..4aa5a0164 100644 --- a/src/test/regress/sql/multi_mx_create_table.sql +++ b/src/test/regress/sql/multi_mx_create_table.sql @@ -63,10 +63,6 @@ CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); --- create schema to test schema support -CREATE SCHEMA citus_mx_test_schema_join_1; -CREATE SCHEMA citus_mx_test_schema_join_2; - -- create UDFs in worker node CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) RETURNS text AS @@ -107,11 +103,6 @@ CREATE OPERATOR citus_mx_test_schema.=== ( -- now create required stuff in the worker 2 \c - - - :worker_2_port --- create schema to test schema support -CREATE SCHEMA citus_mx_test_schema_join_1; -CREATE SCHEMA citus_mx_test_schema_join_2; - - -- create UDF CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) RETURNS text AS diff --git a/src/test/regress/sql/multi_mx_ddl.sql b/src/test/regress/sql/multi_mx_ddl.sql index 5d1622d35..d0452c3d2 100644 --- a/src/test/regress/sql/multi_mx_ddl.sql +++ b/src/test/regress/sql/multi_mx_ddl.sql @@ -173,7 +173,6 @@ SELECT * FROM seg_test; \c - - - :master_port CREATE SCHEMA ext_owned_tables; -SELECT run_command_on_workers($$CREATE SCHEMA ext_owned_tables;$$); SET search_path TO ext_owned_tables; diff --git a/src/test/regress/sql/multi_mx_schema_support.sql b/src/test/regress/sql/multi_mx_schema_support.sql index c6be56db8..555794f92 100644 --- a/src/test/regress/sql/multi_mx_schema_support.sql +++ b/src/test/regress/sql/multi_mx_schema_support.sql @@ -295,7 +295,9 @@ ALTER TABLE "CiTuS.TeAeN"."TeeNTabLE.1!?!" ADD COLUMN new_col INT; SET search_path TO not_existing_schema; ALTER TABLE "CiTuS.TeAeN"."TeeNTabLE.1!?!" DROP COLUMN new_col; +SET client_min_messages TO ERROR; DROP SCHEMA mx_ddl_schema_1, mx_ddl_schema_2, "CiTuS.TeAeN" CASCADE; +RESET client_min_messages; -- test if ALTER TABLE SET SCHEMA sets the original table in the worker SET search_path TO public; @@ -307,7 +309,8 @@ CREATE SCHEMA mx_new_schema; SELECT objid::oid::regnamespace as "Distributed Schemas" FROM citus.pg_dist_object - WHERE objid::oid::regnamespace IN ('mx_old_schema', 'mx_new_schema'); + WHERE objid::oid::regnamespace IN ('mx_old_schema', 'mx_new_schema') + ORDER BY "Distributed Schemas"; \c - - - :worker_1_port SELECT table_schema AS "Table's Schema" FROM information_schema.tables WHERE table_name='table_set_schema'; SELECT table_schema AS "Shards' Schema" diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql index b472f0083..fdb922102 100644 --- a/src/test/regress/sql/multi_schema_support.sql +++ b/src/test/regress/sql/multi_schema_support.sql @@ -1026,10 +1026,10 @@ ROLLBACK; -- Clean up the created schema SET client_min_messages TO WARNING; -DROP SCHEMA run_test_schema CASCADE; -DROP SCHEMA test_schema_support_join_1 CASCADE; -DROP SCHEMA test_schema_support_join_2 CASCADE; -DROP SCHEMA "Citus'Teen123" CASCADE; -DROP SCHEMA "CiTUS.TEEN2" CASCADE; -DROP SCHEMA bar CASCADE; -DROP SCHEMA test_schema_support CASCADE; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object + WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema'); +DROP SCHEMA run_test_schema, test_schema_support_join_1, test_schema_support_join_2, "Citus'Teen123", "CiTUS.TEEN2", bar, test_schema_support CASCADE; +-- verify that the dropped schema is removed from worker's pg_dist_object +SELECT pg_identify_object_as_address(classid, objid, objsubid) FROM citus.pg_dist_object + WHERE classid=2615 and objid IN (select oid from pg_namespace where nspname='run_test_schema'); diff --git a/src/test/regress/sql/multi_sequence_default.sql b/src/test/regress/sql/multi_sequence_default.sql index c8d09738f..8c664f393 100644 --- a/src/test/regress/sql/multi_sequence_default.sql +++ b/src/test/regress/sql/multi_sequence_default.sql @@ -187,34 +187,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port); DROP SEQUENCE sequence_default_0.sequence_3 CASCADE; DROP SCHEMA sequence_default_0; - --- DROP SCHEMA problem: expected since we don't propagate DROP SCHEMA -CREATE TABLE seq_test_5 (x int, y int); -SELECT create_distributed_table('seq_test_5','x'); -CREATE SCHEMA sequence_default_1; -CREATE SEQUENCE sequence_default_1.seq_5; -ALTER TABLE seq_test_5 ADD COLUMN a bigint DEFAULT nextval('sequence_default_1.seq_5'); -DROP SCHEMA sequence_default_1 CASCADE; --- sequence is gone from coordinator -INSERT INTO seq_test_5 VALUES (1, 2) RETURNING *; --- but is still present on worker -\c - - - :worker_1_port -INSERT INTO sequence_default.seq_test_5 VALUES (1, 2) RETURNING *; -\c - - - :master_port -SET citus.shard_replication_factor TO 1; -SET search_path = sequence_default, public; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); --- apply workaround -SELECT run_command_on_workers('DROP SCHEMA sequence_default_1 CASCADE'); --- now the sequence is gone from the worker as well -\c - - - :worker_1_port -INSERT INTO sequence_default.seq_test_5 VALUES (1, 2) RETURNING *; -\c - - - :master_port -SET citus.shard_replication_factor TO 1; -SET search_path = sequence_default, public; -SELECT start_metadata_sync_to_node('localhost', :worker_1_port); - - -- check some more complex cases CREATE SEQUENCE seq_6; CREATE TABLE seq_test_6 (x int, t timestamptz DEFAULT now(), s int DEFAULT nextval('seq_6'), m int) PARTITION BY RANGE (t); @@ -287,8 +259,7 @@ ALTER TABLE sequence_default_8.seq_8 SET SCHEMA sequence_default; SET citus.shard_replication_factor TO 1; SET search_path = sequence_default, public; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -DROP SCHEMA sequence_default_8; -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default_8 CASCADE'); +DROP SCHEMA sequence_default_8 CASCADE; -- cannot use more than one sequence in a column default @@ -442,10 +413,6 @@ SELECT nextval('seq_14'); CREATE SCHEMA test_schema_for_sequence_default_propagation; CREATE SEQUENCE test_schema_for_sequence_default_propagation.seq_10; --- Both should return 0 rows -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation.seq_10'::regclass); -SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object WHERE objid IN ('test_schema_for_sequence_default_propagation'::regnamespace); - -- Create distributed table with default column to propagate dependencies CREATE TABLE test_seq_dist(a int, x BIGINT DEFAULT nextval('test_schema_for_sequence_default_propagation.seq_10')); SELECT create_distributed_table('test_seq_dist', 'a'); @@ -487,6 +454,5 @@ DROP TABLE test_seq_dist; DROP TABLE sequence_default.seq_test_7_par; SET client_min_messages TO error; -- suppress cascading objects dropping DROP SCHEMA sequence_default CASCADE; -SELECT run_command_on_workers('DROP SCHEMA IF EXISTS sequence_default CASCADE'); SELECT master_remove_node('localhost', :master_port); SET search_path TO public; diff --git a/src/test/regress/sql/mx_regular_user.sql b/src/test/regress/sql/mx_regular_user.sql index 1c52ebc80..2dbd85c28 100644 --- a/src/test/regress/sql/mx_regular_user.sql +++ b/src/test/regress/sql/mx_regular_user.sql @@ -100,18 +100,6 @@ SELECT lock_shard_resources(8, ARRAY[2980000]); SET client_min_messages TO ERROR; DROP SCHEMA "Mx Super User" CASCADE; -\c - postgres - :worker_1_port; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO OFF; -CREATE SCHEMA "Mx Regular User"; -GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; - -\c - postgres - :worker_2_port; -SET client_min_messages TO ERROR; -SET citus.enable_ddl_propagation TO OFF; -CREATE SCHEMA "Mx Regular User"; -GRANT ALL ON SCHEMA "Mx Regular User" TO regular_mx_user; - -- now connect with that user \c - regular_mx_user - :master_port SET search_path TO "Mx Regular User"; diff --git a/src/test/regress/sql/non_super_user_object_metadata.sql b/src/test/regress/sql/non_super_user_object_metadata.sql index be965288f..63d16800b 100644 --- a/src/test/regress/sql/non_super_user_object_metadata.sql +++ b/src/test/regress/sql/non_super_user_object_metadata.sql @@ -179,7 +179,7 @@ SELECT create_distributed_function('metadata_dist_test_proc(integer, integer)', SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'metadata_dist_test_proc'::regproc; SELECT * FROM run_command_on_workers($$ SELECT distribution_argument_index FROM citus.pg_dist_object WHERE objid = 'local_schema.metadata_dist_test_proc'::regproc;$$) ORDER BY 1,2; --- Show that dropping schema doesn't affect the worker node +-- Show that the schema is dropped on worker node as well DROP SCHEMA local_schema CASCADE; SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from citus.pg_dist_object) as obj_identifiers where obj_identifier::text like '%{local_schema}%'; diff --git a/src/test/regress/sql/object_propagation_debug.sql b/src/test/regress/sql/object_propagation_debug.sql index 0b676bd38..566b80501 100644 --- a/src/test/regress/sql/object_propagation_debug.sql +++ b/src/test/regress/sql/object_propagation_debug.sql @@ -60,18 +60,5 @@ JOIN LATERAL ON TRUE ORDER BY 1; --- find non-distributed dependencies of the local table test2 -CREATE SCHEMA objectprop2; -create table objectprop2.test2(a int, b t1); -SET search_path TO objectprop2; -SELECT - pg_identify_object(t.classid, t.objid, t.objsubid) -FROM - (SELECT * FROM pg_get_object_address('table', '{test2}', '{}')) as addr -JOIN LATERAL - "object prop".citus_get_dependencies_for_object(addr.classid, addr.objid, addr.objsubid) as t(classid oid, objid oid, objsubid int) -ON TRUE - ORDER BY 1; - SET client_min_messages TO ERROR; -DROP SCHEMA "object prop", objectprop2 CASCADE; +DROP SCHEMA "object prop" CASCADE; diff --git a/src/test/regress/sql/subquery_prepared_statements.sql b/src/test/regress/sql/subquery_prepared_statements.sql index 1152f7212..4f20e6905 100644 --- a/src/test/regress/sql/subquery_prepared_statements.sql +++ b/src/test/regress/sql/subquery_prepared_statements.sql @@ -2,7 +2,6 @@ -- test recursive planning functionality on prepared statements -- =================================================================== CREATE SCHEMA subquery_prepared_statements; -SELECT run_command_on_workers('CREATE SCHEMA subquery_prepared_statements;'); SET search_path TO subquery_prepared_statements, public; From dc6c194916bfd469f6cfcda50151bf6577134b10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=96nder=20Kalac=C4=B1?= Date: Thu, 10 Feb 2022 17:59:28 +0100 Subject: [PATCH 28/28] Show IDLE backends in citus_dist_stat_activity (#5700) * Break the dependency to CitusInitiatedBackend infrastructure With this change, we start to show non-distributed backends as well in citus_dist_stat_activity. I think that (a) it is essential for making citus_lock_waits to work for blocked on DDL commands. (b) it is more expected from the user's perspective. The name of the view is a little inconsistent now (e.g., citus_dist_stat_activity) but we are already planning to improve the names with followup PRs. Also, we have global pids assigned, the CitusInitiatedBackend becomes obsolete. --- .../transaction/citus_dist_stat_activity.c | 9 ++- .../isolation_citus_dist_activity.out | 60 +++++++++---------- .../regress/expected/isolation_global_pid.out | 4 +- ...licate_reference_tables_to_coordinator.out | 13 ++-- .../spec/isolation_citus_dist_activity.spec | 2 +- .../regress/spec/isolation_global_pid.spec | 2 +- ...icate_reference_tables_to_coordinator.spec | 3 +- 7 files changed, 51 insertions(+), 42 deletions(-) diff --git a/src/backend/distributed/transaction/citus_dist_stat_activity.c b/src/backend/distributed/transaction/citus_dist_stat_activity.c index 04014693e..3fb15295f 100644 --- a/src/backend/distributed/transaction/citus_dist_stat_activity.c +++ b/src/backend/distributed/transaction/citus_dist_stat_activity.c @@ -155,7 +155,12 @@ FROM \ get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_pid) \ ON pg_stat_activity.pid = dist_txs.process_id \ WHERE \ - dist_txs.worker_query = false;" + backend_type = 'client backend' \ + AND \ + pg_stat_activity.query NOT ILIKE '%stat_activity%' \ + AND \ + pg_stat_activity.application_name NOT SIMILAR TO 'citus_internal gpid=\\d+'; \ +" #define CITUS_WORKER_STAT_ACTIVITY_QUERY \ "\ @@ -186,7 +191,7 @@ SELECT \ dist_txs.global_id \ FROM \ pg_stat_activity \ - LEFT JOIN \ + JOIN \ get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_id) \ ON pg_stat_activity.pid = dist_txs.process_id \ WHERE \ diff --git a/src/test/regress/expected/isolation_citus_dist_activity.out b/src/test/regress/expected/isolation_citus_dist_activity.out index c003d5ec9..f6cd7da9b 100644 --- a/src/test/regress/expected/isolation_citus_dist_activity.out +++ b/src/test/regress/expected/isolation_citus_dist_activity.out @@ -15,16 +15,16 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-alter-table: ALTER TABLE test_table ADD COLUMN x INT; step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep --------------------------------------------------------------------- @@ -32,7 +32,7 @@ pg_sleep (1 row) step s2-view-dist: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC; query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -42,7 +42,7 @@ query |query_hostname |query_hostport|d (1 row) step s3-view-worker: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -61,13 +61,13 @@ SELECT worker_apply_shard_ddl_command (1300001, 'public', ' (4 rows) step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-insert s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback @@ -85,16 +85,16 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-insert: - INSERT INTO test_table VALUES (100, 100); + INSERT INTO test_table VALUES (100, 100); step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep --------------------------------------------------------------------- @@ -102,17 +102,17 @@ pg_sleep (1 row) step s2-view-dist: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC; query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- - INSERT INTO test_table VALUES (100, 100); + INSERT INTO test_table VALUES (100, 100); |coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression (1 row) step s3-view-worker: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -120,13 +120,13 @@ INSERT INTO public.test_table_1300008 (column1, column2) VALUES (100, 100)|local (1 row) step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback @@ -144,10 +144,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-select: SELECT count(*) FROM test_table; @@ -158,7 +158,7 @@ count (1 row) step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep --------------------------------------------------------------------- @@ -166,7 +166,7 @@ pg_sleep (1 row) step s2-view-dist: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC; query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -176,7 +176,7 @@ query |query_hostname |query_hostport|distribute (1 row) step s3-view-worker: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -187,13 +187,13 @@ SELECT count(*) AS count FROM public.test_table_1300011 test_table WHERE true|lo (4 rows) step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; starting permutation: s1-cache-connections s1-begin s2-begin s3-begin s1-select-router s2-sleep s2-view-dist s3-view-worker s2-rollback s1-commit s3-rollback @@ -211,10 +211,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s3-begin: - BEGIN; + BEGIN; step s1-select-router: SELECT count(*) FROM test_table WHERE column1 = 55; @@ -225,7 +225,7 @@ count (1 row) step s2-sleep: - SELECT pg_sleep(0.5); + SELECT pg_sleep(0.5); pg_sleep --------------------------------------------------------------------- @@ -233,7 +233,7 @@ pg_sleep (1 row) step s2-view-dist: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC; query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -243,7 +243,7 @@ query |query_hostname |query_ (1 row) step s3-view-worker: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- @@ -251,11 +251,11 @@ SELECT count(*) AS count FROM public.test_table_1300017 test_table WHERE (column (1 row) step s2-rollback: - ROLLBACK; + ROLLBACK; step s1-commit: COMMIT; step s3-rollback: - ROLLBACK; + ROLLBACK; diff --git a/src/test/regress/expected/isolation_global_pid.out b/src/test/regress/expected/isolation_global_pid.out index 19e055079..99ca48693 100644 --- a/src/test/regress/expected/isolation_global_pid.out +++ b/src/test/regress/expected/isolation_global_pid.out @@ -31,7 +31,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-coordinator-citus_dist_stat_activity: - SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%'; + SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' and query NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; ?column? --------------------------------------------------------------------- @@ -93,7 +93,7 @@ a|b (0 rows) step s2-coordinator-citus_dist_stat_activity: - SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%'; + SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' and query NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; ?column? --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index 04e427ce6..e83e71919 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -83,14 +83,17 @@ pg_sleep (1 row) step s2-view-dist: - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' AND query NOT ILIKE '%pg_isolation_test_session_is_blocked%' AND query NOT ILIKE '%BEGIN%' ORDER BY query DESC; -query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname +query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- + SELECT check_distributed_deadlocks(); +|coordinator_host| 57636| | 0|idle |Client |ClientRead|postgres|regression + update ref_table set a = a + 1; -|coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression -(1 row) + |coordinator_host| 57636|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression +(2 rows) step s2-view-worker: SELECT query, query_hostname, query_hostport, distributed_query_host_name, @@ -101,7 +104,7 @@ step s2-view-worker: query NOT ILIKE '%dump_local_wait_edges%' ORDER BY query, query_hostport DESC; -query |query_hostname |query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname +query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression diff --git a/src/test/regress/spec/isolation_citus_dist_activity.spec b/src/test/regress/spec/isolation_citus_dist_activity.spec index 5d8558a84..c41c671f0 100644 --- a/src/test/regress/spec/isolation_citus_dist_activity.spec +++ b/src/test/regress/spec/isolation_citus_dist_activity.spec @@ -71,7 +71,7 @@ step "s2-sleep" step "s2-view-dist" { - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' and query not ILIKE '%BEGIN%' and query NOT ILIKE '%pg_catalog.pg_isolation_test_session_is_blocked%' ORDER BY query DESC; } diff --git a/src/test/regress/spec/isolation_global_pid.spec b/src/test/regress/spec/isolation_global_pid.spec index 4306d2640..62f45d1e9 100644 --- a/src/test/regress/spec/isolation_global_pid.spec +++ b/src/test/regress/spec/isolation_global_pid.spec @@ -62,7 +62,7 @@ session "s2" step "s2-coordinator-citus_dist_stat_activity" { - SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%'; + SELECT global_pid != 0 FROM citus_dist_stat_activity() WHERE query LIKE '%SELECT * FROM dist\_table%' and query NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; } step "s2-coordinator-citus_worker_stat_activity" diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index 12f11bef0..0defcf549 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -83,7 +83,7 @@ step "s2-lock-ref-table-placement-on-coordinator" step "s2-view-dist" { - SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' ORDER BY query DESC; + SELECT query, query_hostname, query_hostport, distributed_query_host_name, distributed_query_host_port, state, wait_event_type, wait_event, usename, datname FROM citus_dist_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' AND query NOT ILIKE '%pg_isolation_test_session_is_blocked%' AND query NOT ILIKE '%BEGIN%' ORDER BY query DESC; } step "s2-view-worker" @@ -114,6 +114,7 @@ step "s2-active-transactions" // thus we manually issue the deadlock detection session "deadlock-checker" + // we issue the checker not only when there are deadlocks to ensure that we never cancel // backend inappropriately step "deadlock-checker-call"