From 1d55debb988d3e630205467f6e2e903294a5eb5d Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Mon, 8 Jan 2024 19:47:49 +0300 Subject: [PATCH] Support CREATE / DROP database commands from any node (#7359) DESCRIPTION: Adds support for issuing `CREATE`/`DROP` DATABASE commands from worker nodes With this commit, we allow issuing CREATE / DROP DATABASE commands from worker nodes too. As in #7278, this is not allowed when the coordinator is not added to metadata because we don't ever sync metadata changes to coordinator when adding coordinator to the metadata via `SELECT citus_set_coordinator_host('')`, or equivalently, via `SELECT citus_add_node(, , 0)`. We serialize database management commands by acquiring a Citus specific advisory lock on the first primary worker node if there are any workers in the cluster. As opposed to what we've done in https://github.com/citusdata/citus/pull/7278 for role management commands, we try to avoid from running into distributed deadlocks as much as possible. This is because, while distributed deadlocks that can happen around role management commands can be detected by Citus, this is not the case for database management commands because most of them cannot be run inside in a transaction block. In that case, Citus cannot even detect the distributed deadlock because the command is not part of a distributed transaction at all, then the command execution might not return the control back to the user for an indefinite amount of time. --- src/backend/distributed/commands/database.c | 86 ++++- .../commands/distribute_object_ops.c | 2 +- .../distributed/commands/utility_hook.c | 4 +- .../distributed/metadata/node_metadata.c | 14 +- .../distributed/serialize_distributed_ddls.c | 275 ++++++++++++++ .../distributed/sql/citus--12.1-1--12.2-1.sql | 2 + .../sql/downgrades/citus--12.2-1--12.1-1.sql | 1 + .../12.2-1.sql | 5 + .../latest.sql | 5 + src/include/distributed/commands.h | 3 + src/include/distributed/resource_lock.h | 70 +++- .../distributed/serialize_distributed_ddls.h | 37 ++ src/include/pg_version_compat.h | 34 ++ src/test/regress/citus_tests/run_test.py | 5 + .../expected/alter_database_propagation.out | 4 +- .../expected/alter_role_propagation.out | 4 +- src/test/regress/expected/columnar_drop.out | 4 +- .../create_drop_database_propagation.out | 352 +++++++++++++++++- src/test/regress/expected/drop_database.out | 16 +- .../expected/failure_non_main_db_2pc.out | 8 +- .../grant_on_database_propagation.out | 4 +- .../isolation_database_cmd_from_any_node.out | 211 +++++++++++ src/test/regress/expected/multi_extension.out | 17 +- ...ransaction_recovery_multiple_databases.out | 24 +- .../expected/multi_utility_warnings.out | 5 +- src/test/regress/expected/other_databases.out | 8 +- src/test/regress/expected/pg16.out | 4 +- .../expected/role_command_from_any_node.out | 10 +- .../expected/upgrade_list_citus_objects.out | 3 +- src/test/regress/isolation_schedule | 1 + .../isolation_database_cmd_from_any_node.spec | 102 +++++ .../sql/create_drop_database_propagation.sql | 202 +++++++++- .../regress/sql/multi_utility_warnings.sql | 1 + 33 files changed, 1438 insertions(+), 85 deletions(-) create mode 100644 src/backend/distributed/serialize_distributed_ddls.c create mode 100644 src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql create mode 100644 src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/latest.sql create mode 100644 src/include/distributed/serialize_distributed_ddls.h create mode 100644 src/test/regress/expected/isolation_database_cmd_from_any_node.out create mode 100644 src/test/regress/spec/isolation_database_cmd_from_any_node.spec diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index fd83ae0fa..049af3a64 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -41,6 +41,7 @@ #include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" +#include "distributed/serialize_distributed_ddls.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" @@ -248,6 +249,9 @@ IsSetTablespaceStatement(AlterDatabaseStmt *stmt) * * In this stage we can prepare the commands that need to be run on all workers to grant * on databases. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. */ List * PreprocessAlterDatabaseStmt(Node *node, const char *queryString, @@ -264,6 +268,7 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString, } EnsureCoordinator(); + SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname); char *sql = DeparseTreeNode((Node *) stmt); @@ -291,11 +296,14 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString, #if PG_VERSION_NUM >= PG_VERSION_15 /* - * PreprocessAlterDatabaseSetStmt is executed before the statement is applied to the local - * postgres instance. + * PreprocessAlterDatabaseRefreshCollStmt is executed before the statement is applied to + * the local postgres instance. * * In this stage we can prepare the commands that need to be run on all workers to grant * on databases. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. */ List * PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString, @@ -312,6 +320,7 @@ PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString, } EnsureCoordinator(); + SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname); char *sql = DeparseTreeNode((Node *) stmt); @@ -325,8 +334,51 @@ PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString, #endif + /* - * PreprocessAlterDatabaseRenameStmt is executed before the statement is applied to the local + * PreprocessAlterDatabaseRenameStmt is executed before the statement is applied to + * the local postgres instance. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. + * + * We acquire this lock here instead of PostprocessAlterDatabaseRenameStmt because the + * command renames the database and SerializeDistributedDDLsOnObjectClass resolves the + * object on workers based on database name. For this reason, we need to acquire the lock + * before the command is applied to the local postgres instance. + */ +List * +PreprocessAlterDatabaseRenameStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + bool missingOk = true; + RenameStmt *stmt = castNode(RenameStmt, node); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->subname, + missingOk); + + if (!ShouldPropagate() || !IsAnyObjectDistributed(list_make1(dbAddress))) + { + return NIL; + } + + EnsureCoordinator(); + + /* + * Different than other ALTER DATABASE commands, we first acquire a lock + * by providing InvalidOid because we want ALTER TABLE .. RENAME TO .. + * commands to block not only with ALTER DATABASE operations but also + * with CREATE DATABASE operations because they might cause name conflicts + * and that could also cause deadlocks too. + */ + SerializeDistributedDDLsOnObjectClass(OCLASS_DATABASE); + SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->subname); + + return NIL; +} + + +/* + * PostprocessAlterDatabaseRenameStmt is executed after the statement is applied to the local * postgres instance. In this stage we prepare ALTER DATABASE RENAME statement to be run on * all workers. */ @@ -361,6 +413,9 @@ PostprocessAlterDatabaseRenameStmt(Node *node, const char *queryString) * * In this stage we can prepare the commands that need to be run on all workers to grant * on databases. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. */ List * PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, @@ -377,6 +432,7 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, } EnsureCoordinator(); + SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname); char *sql = DeparseTreeNode((Node *) stmt); @@ -389,12 +445,15 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, /* - * PostprocessAlterDatabaseStmt is executed before the statement is applied to the local + * PreprocessCreateDatabaseStmt is executed before the statement is applied to the local * Postgres instance. * * In this stage, we perform validations that we want to ensure before delegating to * previous utility hooks because it might not be convenient to throw an error in an * implicit transaction that creates a database. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. */ List * PreprocessCreateDatabaseStmt(Node *node, const char *queryString, @@ -405,11 +464,13 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); CreatedbStmt *stmt = castNode(CreatedbStmt, node); EnsureSupportedCreateDatabaseCommand(stmt); + SerializeDistributedDDLsOnObjectClass(OCLASS_DATABASE); + return NIL; } @@ -430,7 +491,7 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString) return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); /* * Given that CREATE DATABASE doesn't support "IF NOT EXISTS" and we're @@ -448,16 +509,19 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString) (void *) createDatabaseCommand, ENABLE_DDL_PROPAGATION); - return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands); } /* - * PreprocessDropDatabaseStmt is executed after the statement is applied to the local + * PreprocessDropDatabaseStmt is executed before the statement is applied to the local * postgres instance. In this stage we can prepare the commands that need to be run on * all workers to drop the database. Since the DROP DATABASE statement gives error in * transaction context, we need to use NontransactionalNodeDDLTaskList to send the * DROP DATABASE statement to the workers. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. */ List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, @@ -468,7 +532,7 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); DropdbStmt *stmt = (DropdbStmt *) node; @@ -488,13 +552,15 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, return NIL; } + SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname); + char *dropDatabaseCommand = DeparseTreeNode(node); List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) dropDatabaseCommand, ENABLE_DDL_PROPAGATION); - return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands); } diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index eb454d70d..fe1f422b6 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -536,7 +536,7 @@ static DistributeObjectOps Database_Set = { static DistributeObjectOps Database_Rename = { .deparse = DeparseAlterDatabaseRenameStmt, .qualify = NULL, - .preprocess = NULL, + .preprocess = PreprocessAlterDatabaseRenameStmt, .postprocess = PostprocessAlterDatabaseRenameStmt, .objectType = OBJECT_DATABASE, .operationType = DIST_OPS_ALTER, diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 9e6b66e3e..c2155383a 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -726,9 +726,9 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, ereport(NOTICE, (errmsg("Citus partially supports CREATE DATABASE for " "distributed databases"), errdetail("Citus does not propagate CREATE DATABASE " - "command to workers"), + "command to other nodes"), errhint("You can manually create a database and its " - "extensions on workers."))); + "extensions on other nodes."))); } } else if (IsA(parsetree, CreateRoleStmt) && !EnableCreateRolePropagation) diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index 2bd2b8a09..a2df0a410 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -2771,12 +2771,24 @@ EnsureCoordinatorIsInMetadata(void) { bool isCoordinatorInMetadata = false; PrimaryNodeForGroup(COORDINATOR_GROUP_ID, &isCoordinatorInMetadata); - if (!isCoordinatorInMetadata) + if (isCoordinatorInMetadata) + { + return; + } + + /* be more descriptive when we're not on coordinator */ + if (IsCoordinator()) { ereport(ERROR, (errmsg("coordinator is not added to the metadata"), errhint("Use SELECT citus_set_coordinator_host('') " "to configure the coordinator hostname"))); } + else + { + ereport(ERROR, (errmsg("coordinator is not added to the metadata"), + errhint("Use SELECT citus_set_coordinator_host('') " + "on coordinator to configure the coordinator hostname"))); + } } diff --git a/src/backend/distributed/serialize_distributed_ddls.c b/src/backend/distributed/serialize_distributed_ddls.c new file mode 100644 index 000000000..fa8d5ebc2 --- /dev/null +++ b/src/backend/distributed/serialize_distributed_ddls.c @@ -0,0 +1,275 @@ +/*------------------------------------------------------------------------- + * + * serialize_distributed_ddls.c + * + * This file contains functions for serializing distributed DDLs. + * + * If you're adding support for serializing a new DDL, you should + * extend the following functions to support the new object class: + * AcquireCitusAdvisoryObjectClassLockGetOid() + * AcquireCitusAdvisoryObjectClassLockCheckPrivileges() + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "miscadmin.h" + +#include "catalog/dependency.h" +#include "catalog/pg_database_d.h" +#include "commands/dbcommands.h" +#include "storage/lock.h" +#include "utils/builtins.h" + +#include "pg_version_compat.h" + +#include "distributed/adaptive_executor.h" +#include "distributed/argutils.h" +#include "distributed/deparse_shard_query.h" +#include "distributed/resource_lock.h" +#include "distributed/serialize_distributed_ddls.h" + + +PG_FUNCTION_INFO_V1(citus_internal_acquire_citus_advisory_object_class_lock); + + +static void SerializeDistributedDDLsOnObjectClassInternal(ObjectClass objectClass, + char *qualifiedObjectName); +static char * AcquireCitusAdvisoryObjectClassLockCommand(ObjectClass objectClass, + char *qualifiedObjectName); +static void AcquireCitusAdvisoryObjectClassLock(ObjectClass objectClass, + char *qualifiedObjectName); +static Oid AcquireCitusAdvisoryObjectClassLockGetOid(ObjectClass objectClass, + char *qualifiedObjectName); +static void AcquireCitusAdvisoryObjectClassLockCheckPrivileges(ObjectClass objectClass, + Oid oid); + + +/* + * citus_internal_acquire_citus_advisory_object_class_lock is an internal UDF + * to call AcquireCitusAdvisoryObjectClassLock(). + */ +Datum +citus_internal_acquire_citus_advisory_object_class_lock(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + PG_ENSURE_ARGNOTNULL(0, "object_class"); + ObjectClass objectClass = PG_GETARG_INT32(0); + + char *qualifiedObjectName = PG_ARGISNULL(1) ? NULL : PG_GETARG_CSTRING(1); + + AcquireCitusAdvisoryObjectClassLock(objectClass, qualifiedObjectName); + + PG_RETURN_VOID(); +} + + +/* + * SerializeDistributedDDLsOnObjectClass is a wrapper around + * SerializeDistributedDDLsOnObjectClassInternal to acquire the lock on given + * object class itself, see the comment in header file for more details about + * the difference between this function and + * SerializeDistributedDDLsOnObjectClassObject(). + */ +void +SerializeDistributedDDLsOnObjectClass(ObjectClass objectClass) +{ + SerializeDistributedDDLsOnObjectClassInternal(objectClass, NULL); +} + + +/* + * SerializeDistributedDDLsOnObjectClassObject is a wrapper around + * SerializeDistributedDDLsOnObjectClassInternal to acquire the lock on given + * object that belongs to given object class, see the comment in header file + * for more details about the difference between this function and + * SerializeDistributedDDLsOnObjectClass(). + */ +void +SerializeDistributedDDLsOnObjectClassObject(ObjectClass objectClass, + char *qualifiedObjectName) +{ + if (qualifiedObjectName == NULL) + { + elog(ERROR, "qualified object name cannot be NULL"); + } + + SerializeDistributedDDLsOnObjectClassInternal(objectClass, qualifiedObjectName); +} + + +/* + * SerializeDistributedDDLsOnObjectClassInternal serializes distributed DDLs + * that target given object class by acquiring a Citus specific advisory lock + * on the first primary worker node if there are any workers in the cluster. + * + * The lock is acquired via a coordinated transaction. For this reason, + * it automatically gets released when (maybe implicit) transaction on + * current server commits or rolls back. + * + * If qualifiedObjectName is provided to be non-null, then the oid of the + * object is first resolved on the first primary worker node and then the + * lock is acquired on that oid. If qualifiedObjectName is null, then the + * lock is acquired on the object class itself. + * + * Note that those two lock types don't conflict with each other and are + * acquired for different purposes. The lock on the object class + * (qualifiedObjectName = NULL) is used to serialize DDLs that target the + * object class itself, e.g., when creating a new object of that class, and + * the latter is used to serialize DDLs that target a specific object of + * that class, e.g., when altering an object. + * + * In some cases, we may want to acquire both locks at the same time. For + * example, when renaming a database, we want to acquire both lock types + * because while the object class lock is used to ensure that another session + * doesn't create a new database with the same name, the object lock is used + * to ensure that another session doesn't alter the same database. + */ +static void +SerializeDistributedDDLsOnObjectClassInternal(ObjectClass objectClass, + char *qualifiedObjectName) +{ + WorkerNode *firstWorkerNode = GetFirstPrimaryWorkerNode(); + if (firstWorkerNode == NULL) + { + /* + * If there are no worker nodes in the cluster, then we don't need + * to acquire the lock at all; and we cannot indeed. + */ + return; + } + + /* + * Indeed we would already ensure permission checks in remote node + * --via AcquireCitusAdvisoryObjectClassLock()-- but we first do so on + * the local node to avoid from reporting confusing error messages. + */ + Oid oid = AcquireCitusAdvisoryObjectClassLockGetOid(objectClass, qualifiedObjectName); + AcquireCitusAdvisoryObjectClassLockCheckPrivileges(objectClass, oid); + + Task *task = CitusMakeNode(Task); + task->taskType = DDL_TASK; + + char *command = AcquireCitusAdvisoryObjectClassLockCommand(objectClass, + qualifiedObjectName); + SetTaskQueryString(task, command); + + ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement); + SetPlacementNodeMetadata(targetPlacement, firstWorkerNode); + task->taskPlacementList = list_make1(targetPlacement); + + /* need to be in a transaction to acquire a lock that's bound to transactions */ + UseCoordinatedTransaction(); + + bool localExecutionSupported = true; + ExecuteUtilityTaskList(list_make1(task), localExecutionSupported); +} + + +/* + * AcquireCitusAdvisoryObjectClassLockCommand returns a command to call + * pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(). + */ +static char * +AcquireCitusAdvisoryObjectClassLockCommand(ObjectClass objectClass, + char *qualifiedObjectName) +{ + /* safe to cast to int as it's an enum */ + int objectClassInt = (int) objectClass; + + char *quotedObjectName = + !qualifiedObjectName ? "NULL" : + quote_literal_cstr(qualifiedObjectName); + + StringInfo command = makeStringInfo(); + appendStringInfo(command, + "SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(%d, %s)", + objectClassInt, quotedObjectName); + + return command->data; +} + + +/* + * AcquireCitusAdvisoryObjectClassLock acquires a Citus specific advisory + * ExclusiveLock based on given object class. + */ +static void +AcquireCitusAdvisoryObjectClassLock(ObjectClass objectClass, char *qualifiedObjectName) +{ + Oid oid = AcquireCitusAdvisoryObjectClassLockGetOid(objectClass, qualifiedObjectName); + + AcquireCitusAdvisoryObjectClassLockCheckPrivileges(objectClass, oid); + + LOCKTAG locktag; + SET_LOCKTAG_GLOBAL_DDL_SERIALIZATION(locktag, objectClass, oid); + + LOCKMODE lockmode = ExclusiveLock; + bool sessionLock = false; + bool dontWait = false; + LockAcquire(&locktag, lockmode, sessionLock, dontWait); +} + + +/* + * AcquireCitusAdvisoryObjectClassLockGetOid returns the oid of given object + * that belongs to given object class. If qualifiedObjectName is NULL, then + * it returns InvalidOid. + */ +static Oid +AcquireCitusAdvisoryObjectClassLockGetOid(ObjectClass objectClass, + char *qualifiedObjectName) +{ + if (qualifiedObjectName == NULL) + { + return InvalidOid; + } + + bool missingOk = false; + + switch (objectClass) + { + case OCLASS_DATABASE: + { + return get_database_oid(qualifiedObjectName, missingOk); + } + + default: + elog(ERROR, "unsupported object class: %d", objectClass); + } +} + + +/* + * AcquireCitusAdvisoryObjectClassLockCheckPrivileges is used to perform privilege checks + * before acquiring the Citus specific advisory lock on given object class and oid. + */ +static void +AcquireCitusAdvisoryObjectClassLockCheckPrivileges(ObjectClass objectClass, Oid oid) +{ + switch (objectClass) + { + case OCLASS_DATABASE: + { + if (OidIsValid(oid) && !object_ownercheck(DatabaseRelationId, oid, + GetUserId())) + { + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, + get_database_name(oid)); + } + else if (!OidIsValid(oid) && !have_createdb_privilege()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied to create / rename database"))); + } + + break; + } + + default: + elog(ERROR, "unsupported object class: %d", objectClass); + } +} diff --git a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql index 2ce2d7a21..72ef46e6f 100644 --- a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql +++ b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql @@ -10,3 +10,5 @@ #include "udfs/commit_management_command_2pc/12.2-1.sql" ALTER TABLE pg_catalog.pg_dist_transaction ADD COLUMN outer_xid xid8; + +#include "udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql index 0a6f68b06..f889a0095 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql @@ -1,6 +1,7 @@ -- citus--12.2-1--12.1-1 DROP FUNCTION pg_catalog.citus_internal_database_command(text); +DROP FUNCTION pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(int, cstring); #include "../udfs/citus_add_rebalance_strategy/10.1-1.sql" diff --git a/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql new file mode 100644 index 000000000..8dc6f940e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql @@ -0,0 +1,5 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(objectClass int, qualifiedObjectName cstring) + RETURNS void + LANGUAGE C + VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_acquire_citus_advisory_object_class_lock$$; diff --git a/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/latest.sql new file mode 100644 index 000000000..8dc6f940e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/latest.sql @@ -0,0 +1,5 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(objectClass int, qualifiedObjectName cstring) + RETURNS void + LANGUAGE C + VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_acquire_citus_advisory_object_class_lock$$; diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 99bf81843..4eb6df8bf 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -245,6 +245,9 @@ extern List * DropDatabaseStmtObjectAddress(Node *node, bool missingOk, extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missingOk, bool isPostprocess); extern List * GenerateGrantDatabaseCommandList(void); +extern List * PreprocessAlterDatabaseRenameStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext); extern List * PostprocessAlterDatabaseRenameStmt(Node *node, const char *queryString); extern void EnsureSupportedCreateDatabaseCommand(CreatedbStmt *stmt); extern char * CreateDatabaseDDLCommand(Oid dbId); diff --git a/src/include/distributed/resource_lock.h b/src/include/distributed/resource_lock.h index 8e0e36735..576d2bf15 100644 --- a/src/include/distributed/resource_lock.h +++ b/src/include/distributed/resource_lock.h @@ -14,6 +14,7 @@ #include "c.h" +#include "catalog/dependency.h" #include "nodes/pg_list.h" #include "storage/lock.h" #include "tcop/utility.h" @@ -45,7 +46,8 @@ typedef enum AdvisoryLocktagClass ADV_LOCKTAG_CLASS_CITUS_CLEANUP_OPERATION_ID = 10, ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12, ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION = 13, - ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK = 14 + ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK = 14, + ADV_LOCKTAG_CLASS_CITUS_GLOBAL_DDL_SERIALIZATION = 15 } AdvisoryLocktagClass; /* CitusOperations has constants for citus operations */ @@ -142,6 +144,72 @@ typedef enum CitusOperations (uint32) (taskId), \ ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK) +/* + * IsNodeWideObjectClass returns true if the given object class is node-wide, + * i.e., that is not bound to a particular database but to whole server. + * + * Defined here as an inlined function so that SET_LOCKTAG_GLOBAL_DDL_SERIALIZATION + * macro can use it. + */ +static inline bool +IsNodeWideObjectClass(ObjectClass objectClass) +{ + if ((int) objectClass < 0 || objectClass > LAST_OCLASS) + { + elog(ERROR, "invalid object class: %d", objectClass); + } + + /* + * We don't expect Postgres to change an object class to a node-wide one in the + * future, but a newly added object class may be node-wide. + * + * So we put a static assert here to make sure that the developer who adds support + * for a new Postgres version is aware of this. + * + * If new object classes are added and none of them are node-wide, then update + * this assertion check based on latest supported major Postgres version. + */ + StaticAssertStmt(PG_MAJORVERSION_NUM <= 16, + "better to check if any of newly added ObjectClass'es are node-wide"); + + switch (objectClass) + { + case OCLASS_ROLE: + case OCLASS_DATABASE: + case OCLASS_TBLSPACE: +#if PG_VERSION_NUM >= PG_VERSION_15 + case OCLASS_PARAMETER_ACL: +#endif +#if PG_VERSION_NUM >= PG_VERSION_16 + case OCLASS_ROLE_MEMBERSHIP: +#endif + { + return true; + } + + default: + return false; + } +} + + +/* + * Automatically sets databaseId to InvalidOid if the object class is + * node-wide, i.e., that is not bound to a particular database but to + * whole server. If the object class is not node-wide, sets databaseId + * to MyDatabaseId. + * + * That way, the lock is local to each database if the object class is + * not node-wide, and global if it is. + */ +#define SET_LOCKTAG_GLOBAL_DDL_SERIALIZATION(tag, objectClass, oid) \ + SET_LOCKTAG_ADVISORY(tag, \ + (uint32) (IsNodeWideObjectClass(objectClass) ? InvalidOid : \ + MyDatabaseId), \ + (uint32) objectClass, \ + (uint32) oid, \ + ADV_LOCKTAG_CLASS_CITUS_GLOBAL_DDL_SERIALIZATION) + /* * DistLockConfigs are used to configure the locking behaviour of AcquireDistributedLockOnRelations */ diff --git a/src/include/distributed/serialize_distributed_ddls.h b/src/include/distributed/serialize_distributed_ddls.h new file mode 100644 index 000000000..c62c617d1 --- /dev/null +++ b/src/include/distributed/serialize_distributed_ddls.h @@ -0,0 +1,37 @@ +/*------------------------------------------------------------------------- + * + * serialize_distributed_ddls.h + * + * Declarations for public functions related to serializing distributed + * DDLs. + * + *------------------------------------------------------------------------- + */ + +#ifndef SERIALIZE_DDLS_OVER_CATALOG_H +#define SERIALIZE_DDLS_OVER_CATALOG_H + +#include "postgres.h" + +#include "catalog/dependency.h" + +/* + * Note that those two lock types don't conflict with each other and are + * acquired for different purposes. The lock on the object class + * --SerializeDistributedDDLsOnObjectClass()-- is used to serialize DDLs + * that target the object class itself, e.g., when creating a new object + * of that class, and the latter one --SerializeDistributedDDLsOnObjectClassObject()-- + * is used to serialize DDLs that target a specific object of that class, + * e.g., when altering an object. + * + * In some cases, we may want to acquire both locks at the same time. For + * example, when renaming a database, we want to acquire both lock types + * because while the object class lock is used to ensure that another session + * doesn't create a new database with the same name, the object lock is used + * to ensure that another session doesn't alter the same database. + */ +extern void SerializeDistributedDDLsOnObjectClass(ObjectClass objectClass); +extern void SerializeDistributedDDLsOnObjectClassObject(ObjectClass objectClass, + char *qualifiedObjectName); + +#endif /* SERIALIZE_DDLS_OVER_CATALOG_H */ diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index 4e874e2ee..665cd30c2 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -48,14 +48,21 @@ get_guc_variables_compat(int *gucCount) #define pgstat_fetch_stat_local_beentry(a) pgstat_get_local_beentry_by_index(a) +#define have_createdb_privilege() have_createdb_privilege() + #else +#include "miscadmin.h" + +#include "catalog/pg_authid.h" #include "catalog/pg_class_d.h" +#include "catalog/pg_database_d.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc_d.h" #include "storage/relfilenode.h" #include "utils/guc.h" #include "utils/guc_tables.h" +#include "utils/syscache.h" #define pg_clean_ascii_compat(a, b) pg_clean_ascii(a) @@ -105,6 +112,11 @@ object_ownercheck(Oid classid, Oid objectid, Oid roleid) return pg_proc_ownercheck(objectid, roleid); } + case DatabaseRelationId: + { + return pg_database_ownercheck(objectid, roleid); + } + default: { ereport(ERROR, @@ -140,6 +152,28 @@ object_aclcheck(Oid classid, Oid objectid, Oid roleid, AclMode mode) } +static inline bool +have_createdb_privilege(void) +{ + bool result = false; + HeapTuple utup; + + /* Superusers can always do everything */ + if (superuser()) + { + return true; + } + + utup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(GetUserId())); + if (HeapTupleIsValid(utup)) + { + result = ((Form_pg_authid) GETSTRUCT(utup))->rolcreatedb; + ReleaseSysCache(utup); + } + return result; +} + + typedef bool TU_UpdateIndexes; /* diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index 9979a2494..b30d3edfa 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -204,6 +204,11 @@ DEPS = { "minimal_schedule", ["remove_coordinator_from_metadata"] ), "limit_intermediate_size": TestDeps("base_schedule"), + "columnar_drop": TestDeps( + "minimal_schedule", + ["columnar_create", "columnar_load"], + repeatable=False, + ), } diff --git a/src/test/regress/expected/alter_database_propagation.out b/src/test/regress/expected/alter_database_propagation.out index 1a56f1338..f01d39ab9 100644 --- a/src/test/regress/expected/alter_database_propagation.out +++ b/src/test/regress/expected/alter_database_propagation.out @@ -182,8 +182,8 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx set citus.enable_create_database_propagation=off; CREATE database local_regression; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. alter DATABASE local_regression with CONNECTION LIMIT 100; alter DATABASE local_regression rename to local_regression2; drop database local_regression2; diff --git a/src/test/regress/expected/alter_role_propagation.out b/src/test/regress/expected/alter_role_propagation.out index 82310f477..4beea2a51 100644 --- a/src/test/regress/expected/alter_role_propagation.out +++ b/src/test/regress/expected/alter_role_propagation.out @@ -254,8 +254,8 @@ SELECT run_command_on_workers('SHOW enable_hashagg'); -- also test case sensitivity CREATE DATABASE "REGRESSION"; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. ALTER ROLE CURRENT_USER IN DATABASE "REGRESSION" SET public.myguc TO "Hello from coordinator only"; SELECT d.datname, r.setconfig FROM pg_db_role_setting r LEFT JOIN pg_database d ON r.setdatabase=d.oid WHERE r.setconfig::text LIKE '%Hello from coordinator only%'; datname | setconfig diff --git a/src/test/regress/expected/columnar_drop.out b/src/test/regress/expected/columnar_drop.out index 75333c1e8..2e7998b69 100644 --- a/src/test/regress/expected/columnar_drop.out +++ b/src/test/regress/expected/columnar_drop.out @@ -39,8 +39,8 @@ SELECT :columnar_stripes_before_drop - count(distinct storage_id) FROM columnar. SELECT current_database() datname \gset CREATE DATABASE db_to_drop; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c db_to_drop CREATE EXTENSION citus_columnar; SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index 8104f6666..6c9be95bc 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -64,8 +64,8 @@ CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace \c - - - :master_port CREATE DATABASE local_database; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. -- check that it's only created for coordinator SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type; node_type | result @@ -88,8 +88,8 @@ SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node \c - - - :worker_1_port CREATE DATABASE local_database; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. -- check that it's only created for coordinator SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type; node_type | result @@ -452,18 +452,14 @@ drop database "mydatabase#1'2"; ERROR: database "mydatabase#1'2" does not exist \c - - - :worker_1_port SET citus.enable_create_database_propagation TO ON; --- show that dropping the database from workers is not allowed when citus.enable_create_database_propagation is on +-- show that dropping the database from workers is allowed when citus.enable_create_database_propagation is on DROP DATABASE db_needs_escape; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -- and the same applies to create database too create database error_test; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. +drop database error_test; \c - - - :master_port SET citus.enable_create_database_propagation TO ON; DROP DATABASE test_node_activation; -DROP DATABASE db_needs_escape; DROP USER "role-needs\!escape"; -- drop database with force options test create database db_force_test; @@ -494,8 +490,8 @@ select 1 from citus_remove_node('localhost', :worker_2_port); SET citus.enable_create_database_propagation TO off; CREATE DATABASE non_distributed_db; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. SET citus.enable_create_database_propagation TO on; create database distributed_db; select 1 from citus_add_node('localhost', :worker_2_port); @@ -932,8 +928,8 @@ DROP ROLE propagated_role, non_propagated_role; SET citus.enable_create_database_propagation TO OFF; CREATE DATABASE local_database_1; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. SET citus.enable_create_database_propagation TO ON; CREATE ROLE local_role_1; GRANT CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 TO local_role_1; @@ -941,6 +937,334 @@ ALTER DATABASE local_database_1 SET default_transaction_read_only = 'true'; REVOKE CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 FROM local_role_1; DROP ROLE local_role_1; DROP DATABASE local_database_1; +-- test create / drop database commands from workers +-- remove one of the workers to test node activation too +SELECT 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_1_port +CREATE DATABASE local_worker_db; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE db_created_from_worker + WITH template=template1 + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + CONNECTION LIMIT = 42 + TABLESPACE = "ts-needs\!escape" + ALLOW_CONNECTIONS = false; +\c - - - :master_port +SET citus.enable_create_database_propagation TO ON; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_1_port +SET citus.enable_create_database_propagation TO ON; +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": {"datacl": null, "datname": "local_worker_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": {"datacl": null, "datname": "db_created_from_worker", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 42, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": {"datacl": null, "datname": "db_created_from_worker", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 42, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_created_from_worker", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 42, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE db_created_from_worker; +SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +-- drop the local database while the GUC is on +DROP DATABASE local_worker_db; +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE local_worker_db; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +-- drop the local database while the GUC is off +DROP DATABASE local_worker_db; +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE another_db_created_from_worker; +\c - - - :master_port +SELECT 1 FROM citus_remove_node('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_1_port +SET citus.enable_create_database_propagation TO ON; +-- fails because coordinator is not added into metadata +DROP DATABASE another_db_created_from_worker; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname +-- fails because coordinator is not added into metadata +CREATE DATABASE new_db; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname +\c - - - :master_port +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET client_min_messages; +SET citus.enable_create_database_propagation TO ON; +-- dropping a database that was created from a worker via a different node works fine +DROP DATABASE another_db_created_from_worker; +SELECT * FROM public.check_database_on_all_nodes('another_db_created_from_worker') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +-- Show that we automatically propagate the dependencies (only roles atm) when +-- creating a database from workers too. +SELECT 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_1_port +set citus.enable_create_role_propagation TO off; +create role non_propagated_role; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +set citus.enable_create_role_propagation TO on; +set citus.enable_create_database_propagation TO on; +create database test_db OWNER non_propagated_role; +create role propagated_role; +\c - - - :master_port +-- not supported from workers, so need to execute this via coordinator +grant connect on database test_db to propagated_role; +SET citus.enable_create_database_propagation TO ON; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM public.check_database_on_all_nodes('test_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +REVOKE CONNECT ON DATABASE test_db FROM propagated_role; +DROP DATABASE test_db; +DROP ROLE propagated_role, non_propagated_role; +-- test pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock with null input +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(null, 'regression'); +ERROR: object_class cannot be NULL +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), null); + citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +-- OCLASS_DATABASE +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), NULL); + citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'regression'); + citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), ''); +ERROR: database "" does not exist +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'no_such_db'); +ERROR: database "no_such_db" does not exist +-- invalid OCLASS +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(-1, NULL); +ERROR: unsupported object class: -1 +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(-1, 'regression'); +ERROR: unsupported object class: -1 +-- invalid OCLASS +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(100, NULL); +ERROR: unsupported object class: 100 +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(100, 'regression'); +ERROR: unsupported object class: 100 +-- another valid OCLASS, but not implemented yet +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(10, NULL); +ERROR: unsupported object class: 10 +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(10, 'regression'); +ERROR: unsupported object class: 10 +SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO ON'); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +-- only one of them succeeds and we don't run into a distributed deadlock +SELECT COUNT(*) FROM run_command_on_all_nodes('CREATE DATABASE concurrent_create_db') WHERE success; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "concurrent_create_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "concurrent_create_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "concurrent_create_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT COUNT(*) FROM run_command_on_all_nodes('DROP DATABASE concurrent_create_db') WHERE success; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +-- revert the system wide change that enables citus.enable_create_database_propagation on all nodes +SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO OFF'); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +-- but keep it enabled for coordinator for the rest of the tests +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE distributed_db; +CREATE USER no_createdb; +SET ROLE no_createdb; +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE no_createdb; +ERROR: permission denied to create / rename database +ALTER DATABASE distributed_db RENAME TO rename_test; +ERROR: permission denied to create / rename database +DROP DATABASE distributed_db; +ERROR: must be owner of database distributed_db +ALTER DATABASE distributed_db SET TABLESPACE pg_default; +ERROR: must be owner of database distributed_db +ALTER DATABASE distributed_db SET timezone TO 'UTC'; +ERROR: must be owner of database distributed_db +ALTER DATABASE distributed_db RESET timezone; +ERROR: must be owner of database distributed_db +GRANT ALL ON DATABASE distributed_db TO postgres; +WARNING: no privileges were granted for "distributed_db" +RESET ROLE; +ALTER ROLE no_createdb createdb; +SET ROLE no_createdb; +CREATE DATABASE no_createdb; +ALTER DATABASE distributed_db RENAME TO rename_test; +ERROR: must be owner of database distributed_db +RESET ROLE; +SELECT 1 FROM run_command_on_all_nodes($$GRANT ALL ON TABLESPACE pg_default TO no_createdb$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +ALTER DATABASE distributed_db OWNER TO no_createdb; +SET ROLE no_createdb; +ALTER DATABASE distributed_db SET TABLESPACE pg_default; +ALTER DATABASE distributed_db SET timezone TO 'UTC'; +ALTER DATABASE distributed_db RESET timezone; +GRANT ALL ON DATABASE distributed_db TO postgres; +ALTER DATABASE distributed_db RENAME TO rename_test; +DROP DATABASE rename_test; +RESET ROLE; +SELECT 1 FROM run_command_on_all_nodes($$REVOKE ALL ON TABLESPACE pg_default FROM no_createdb$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +DROP DATABASE no_createdb; +DROP USER no_createdb; +SET citus.enable_create_database_propagation TO ON; --clean up resources created by this test -- DROP TABLESPACE is not supported, so we need to drop it manually. SELECT result FROM run_command_on_all_nodes( diff --git a/src/test/regress/expected/drop_database.out b/src/test/regress/expected/drop_database.out index d150cc8d3..4d68f4c0d 100644 --- a/src/test/regress/expected/drop_database.out +++ b/src/test/regress/expected/drop_database.out @@ -6,14 +6,14 @@ SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 35137400; CREATE DATABASE citus_created; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c citus_created CREATE EXTENSION citus; CREATE DATABASE citus_not_created; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c citus_not_created DROP DATABASE citus_created; \c regression @@ -26,14 +26,14 @@ SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 35137400; CREATE DATABASE citus_created; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c citus_created CREATE EXTENSION citus; CREATE DATABASE citus_not_created; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c citus_not_created DROP DATABASE citus_created; \c regression diff --git a/src/test/regress/expected/failure_non_main_db_2pc.out b/src/test/regress/expected/failure_non_main_db_2pc.out index 1e8558136..673ce45a5 100644 --- a/src/test/regress/expected/failure_non_main_db_2pc.out +++ b/src/test/regress/expected/failure_non_main_db_2pc.out @@ -8,8 +8,8 @@ CREATE SCHEMA failure_non_main_db_2pc; SET SEARCH_PATH TO 'failure_non_main_db_2pc'; CREATE DATABASE other_db1; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. SELECT citus.mitmproxy('conn.onQuery(query="COMMIT PREPARED").kill()'); mitmproxy --------------------------------------------------------------------- @@ -101,8 +101,8 @@ SELECT citus_set_coordinator_host('localhost'); \c - - - :worker_1_port CREATE DATABASE other_db2; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. SELECT citus.mitmproxy('conn.onQuery(query="COMMIT PREPARED").kill()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/grant_on_database_propagation.out b/src/test/regress/expected/grant_on_database_propagation.out index b3b8b4b90..6fb363cfa 100644 --- a/src/test/regress/expected/grant_on_database_propagation.out +++ b/src/test/regress/expected/grant_on_database_propagation.out @@ -542,8 +542,8 @@ create user myuser; create user myuser_1; create database test_db; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. SELECT result FROM run_command_on_workers($$create database test_db$$); result --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_database_cmd_from_any_node.out b/src/test/regress/expected/isolation_database_cmd_from_any_node.out new file mode 100644 index 000000000..5ca30fb1e --- /dev/null +++ b/src/test/regress/expected/isolation_database_cmd_from_any_node.out @@ -0,0 +1,211 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-begin s2-begin s1-acquire-citus-adv-oclass-lock s2-acquire-citus-adv-oclass-lock s1-commit s2-commit +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-acquire-citus-adv-oclass-lock: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; +citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-acquire-citus-adv-oclass-lock: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; +step s1-commit: COMMIT; +step s2-acquire-citus-adv-oclass-lock: <... completed> +citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: COMMIT; + +starting permutation: s1-create-testdb1 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock-with-oid-testdb1 s2-acquire-citus-adv-oclass-lock-with-oid-testdb1 s1-commit s2-commit s1-drop-testdb1 +step s1-create-testdb1: CREATE DATABASE testdb1; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-acquire-citus-adv-oclass-lock-with-oid-testdb1: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; +citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-acquire-citus-adv-oclass-lock-with-oid-testdb1: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; +step s1-commit: COMMIT; +step s2-acquire-citus-adv-oclass-lock-with-oid-testdb1: <... completed> +citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: COMMIT; +step s1-drop-testdb1: DROP DATABASE testdb1; + +starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock-with-oid-testdb1 s2-acquire-citus-adv-oclass-lock-with-oid-testdb2 s1-commit s2-commit s1-drop-testdb1 s2-drop-testdb2 +step s1-create-testdb1: CREATE DATABASE testdb1; +step s2-create-testdb2: CREATE DATABASE testdb2; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-acquire-citus-adv-oclass-lock-with-oid-testdb1: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; +citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-acquire-citus-adv-oclass-lock-with-oid-testdb2: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb2') FROM oclass_database; +citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: COMMIT; +step s2-commit: COMMIT; +step s1-drop-testdb1: DROP DATABASE testdb1; +step s2-drop-testdb2: DROP DATABASE testdb2; + +starting permutation: s2-create-testdb2 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock s2-acquire-citus-adv-oclass-lock-with-oid-testdb2 s1-commit s2-commit s2-drop-testdb2 +step s2-create-testdb2: CREATE DATABASE testdb2; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-acquire-citus-adv-oclass-lock: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; +citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-acquire-citus-adv-oclass-lock-with-oid-testdb2: SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb2') FROM oclass_database; +citus_internal_acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: COMMIT; +step s2-commit: COMMIT; +step s2-drop-testdb2: DROP DATABASE testdb2; + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-db1 s2-rollback s2-drop-testdb2 s1-drop-db1 +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; +step s1-create-db1: CREATE DATABASE db1; +step s2-rollback: ROLLBACK; +step s2-drop-testdb2: DROP DATABASE testdb2; +step s1-drop-db1: DROP DATABASE db1; + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-user-dbuser s1-grant-on-testdb2-to-dbuser s2-rollback s2-drop-testdb2 s1-drop-user-dbuser +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; +step s1-create-user-dbuser: CREATE USER dbuser; +step s1-grant-on-testdb2-to-dbuser: GRANT ALL ON DATABASE testdb2 TO dbuser; +step s2-rollback: ROLLBACK; +step s2-drop-testdb2: DROP DATABASE testdb2; +step s1-drop-user-dbuser: DROP USER dbuser; + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-testdb1 s1-create-user-dbuser s1-grant-on-testdb1-to-dbuser s2-rollback s2-drop-testdb2 s1-drop-testdb1 s1-drop-user-dbuser +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; +step s1-create-testdb1: CREATE DATABASE testdb1; +step s1-create-user-dbuser: CREATE USER dbuser; +step s1-grant-on-testdb1-to-dbuser: GRANT ALL ON DATABASE testdb1 TO dbuser; +step s2-rollback: ROLLBACK; +step s2-drop-testdb2: DROP DATABASE testdb2; +step s1-drop-testdb1: DROP DATABASE testdb1; +step s1-drop-user-dbuser: DROP USER dbuser; + +starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb2-rename-to-db1 s1-commit s2-rollback s1-drop-db1 s2-drop-testdb2 +step s1-create-testdb1: CREATE DATABASE testdb1; +step s2-create-testdb2: CREATE DATABASE testdb2; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; +step s1-commit: COMMIT; +step s2-alter-testdb2-rename-to-db1: <... completed> +ERROR: database "db1" already exists +step s2-rollback: ROLLBACK; +step s1-drop-db1: DROP DATABASE db1; +step s2-drop-testdb2: DROP DATABASE testdb2; + +starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb2-rename-to-db1 s1-rollback s2-commit s1-drop-testdb1 s2-drop-db1 +step s1-create-testdb1: CREATE DATABASE testdb1; +step s2-create-testdb2: CREATE DATABASE testdb2; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; +step s1-rollback: ROLLBACK; +step s2-alter-testdb2-rename-to-db1: <... completed> +step s2-commit: COMMIT; +step s1-drop-testdb1: DROP DATABASE testdb1; +step s2-drop-db1: DROP DATABASE db1; + +starting permutation: s1-create-testdb1 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb1-rename-to-db1 s1-commit s2-rollback s1-drop-db1 +step s1-create-testdb1: CREATE DATABASE testdb1; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s2-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s1-commit: COMMIT; +step s2-alter-testdb1-rename-to-db1: <... completed> +ERROR: database "testdb1" does not exist +step s2-rollback: ROLLBACK; +step s1-drop-db1: DROP DATABASE db1; + +starting permutation: s1-create-testdb1 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb1-rename-to-db1 s1-rollback s2-commit s2-drop-db1 +step s1-create-testdb1: CREATE DATABASE testdb1; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s2-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s1-rollback: ROLLBACK; +step s2-alter-testdb1-rename-to-db1: <... completed> +step s2-commit: COMMIT; +step s2-drop-db1: DROP DATABASE db1; + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-create-db1 s2-rollback s2-drop-testdb2 s1-drop-db1 +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; +step s1-create-db1: CREATE DATABASE db1; +step s2-rollback: ROLLBACK; +step s1-create-db1: <... completed> +step s2-drop-testdb2: DROP DATABASE testdb2; +step s1-drop-db1: DROP DATABASE db1; + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-create-db1 s2-commit s2-drop-db1 +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; +step s1-create-db1: CREATE DATABASE db1; +step s2-commit: COMMIT; +step s1-create-db1: <... completed> +ERROR: database "db1" already exists +step s2-drop-db1: DROP DATABASE db1; + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db2 s1-create-db1 s2-commit s2-drop-db2 s1-drop-db1 +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-rename-to-db2: ALTER DATABASE testdb2 RENAME TO db2; +step s1-create-db1: CREATE DATABASE db1; +step s2-commit: COMMIT; +step s1-create-db1: <... completed> +step s2-drop-db2: DROP DATABASE db2; +step s1-drop-db1: DROP DATABASE db1; + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-drop-testdb2 s2-rollback +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; +step s1-drop-testdb2: DROP DATABASE testdb2; +step s2-rollback: ROLLBACK; +step s1-drop-testdb2: <... completed> + +starting permutation: s2-create-testdb2 s1-create-db1 s2-begin s2-alter-testdb2-rename-to-db2 s1-drop-db1 s2-commit s2-drop-db2 +step s2-create-testdb2: CREATE DATABASE testdb2; +step s1-create-db1: CREATE DATABASE db1; +step s2-begin: BEGIN; +step s2-alter-testdb2-rename-to-db2: ALTER DATABASE testdb2 RENAME TO db2; +step s1-drop-db1: DROP DATABASE db1; +step s2-commit: COMMIT; +step s2-drop-db2: DROP DATABASE db2; diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index b9f489a1f..60e283800 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1420,14 +1420,15 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Snapshot of state at 12.2-1 ALTER EXTENSION citus UPDATE TO '12.2-1'; SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object + previous_object | current_object --------------------------------------------------------------------- | function citus_internal.commit_management_command_2pc() void | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void | function citus_internal.mark_object_distributed(oid,text,oid) void | function citus_internal.start_management_transaction(xid8) void + | function citus_internal_acquire_citus_advisory_object_class_lock(integer,cstring) void | function citus_internal_database_command(text) void -(5 rows) +(6 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version @@ -1623,8 +1624,8 @@ CREATE EXTENSION citus; -- Check that maintenance daemon can also be started in another database CREATE DATABASE another; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c another CREATE EXTENSION citus; CREATE SCHEMA test; @@ -1682,13 +1683,13 @@ NOTICE: drop cascades to function test_daemon.maintenance_daemon_died(text) -- create a test database, configure citus with single node CREATE DATABASE another; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c - - - :worker_1_port CREATE DATABASE another; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c - - - :master_port \c another CREATE EXTENSION citus; diff --git a/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out b/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out index 2e396da7d..a3a374131 100644 --- a/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out +++ b/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out @@ -33,8 +33,8 @@ $definition$ create_function_test_maintenance_worker \gset CREATE DATABASE db1; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. SELECT oid AS db1_oid FROM pg_database WHERE datname = 'db1' @@ -42,13 +42,13 @@ WHERE datname = 'db1' \c - - - :worker_1_port CREATE DATABASE db1; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c - - - :worker_2_port CREATE DATABASE db1; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c db1 - - :worker_1_port CREATE EXTENSION citus; \c db1 - - :worker_2_port @@ -94,8 +94,8 @@ FROM pg_dist_node; CREATE DATABASE db2; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. SELECT oid AS db2_oid FROM pg_database WHERE datname = 'db2' @@ -103,13 +103,13 @@ WHERE datname = 'db2' \c - - - :worker_1_port CREATE DATABASE db2; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c - - - :worker_2_port CREATE DATABASE db2; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c db2 - - :worker_1_port CREATE EXTENSION citus; \c db2 - - :worker_2_port diff --git a/src/test/regress/expected/multi_utility_warnings.out b/src/test/regress/expected/multi_utility_warnings.out index 89899b0f1..880614c58 100644 --- a/src/test/regress/expected/multi_utility_warnings.out +++ b/src/test/regress/expected/multi_utility_warnings.out @@ -5,5 +5,6 @@ -- databases. CREATE DATABASE new_database; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +DROP DATABASE new_database; diff --git a/src/test/regress/expected/other_databases.out b/src/test/regress/expected/other_databases.out index 67d7dad3f..1b81af3b7 100644 --- a/src/test/regress/expected/other_databases.out +++ b/src/test/regress/expected/other_databases.out @@ -3,8 +3,8 @@ SET search_path TO other_databases; SET citus.next_shard_id TO 10231023; CREATE DATABASE other_db1; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c other_db1 SHOW citus.main_db; citus.main_db @@ -94,8 +94,8 @@ DROP USER other_db_user9, nonsuperuser; \c - - - :worker_1_port CREATE DATABASE other_db2; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c other_db2 CREATE USER worker_user1; BEGIN; diff --git a/src/test/regress/expected/pg16.out b/src/test/regress/expected/pg16.out index 6ed218cb8..a035fcfc4 100644 --- a/src/test/regress/expected/pg16.out +++ b/src/test/regress/expected/pg16.out @@ -207,8 +207,8 @@ DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VAL -- https://github.com/postgres/postgres/commit/30a53b7 CREATE DATABASE test_db WITH LOCALE_PROVIDER = 'icu' LOCALE = '' ICU_RULES = '&a < g' TEMPLATE = 'template0'; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. NOTICE: using standard form "und" for ICU locale "" SELECT result FROM run_command_on_workers ($$CREATE DATABASE test_db WITH LOCALE_PROVIDER = 'icu' LOCALE = '' ICU_RULES = '&a < g' TEMPLATE = 'template0'$$); diff --git a/src/test/regress/expected/role_command_from_any_node.out b/src/test/regress/expected/role_command_from_any_node.out index e8700a204..a5e22f40b 100644 --- a/src/test/regress/expected/role_command_from_any_node.out +++ b/src/test/regress/expected/role_command_from_any_node.out @@ -255,19 +255,19 @@ SELECT citus_remove_node('localhost', :master_port); -- they fail because the coordinator is not added to metadata DROP ROLE test_role_renamed; ERROR: coordinator is not added to the metadata -HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname ALTER ROLE test_role_renamed RENAME TO test_role; ERROR: coordinator is not added to the metadata -HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname ALTER ROLE test_role_renamed CREATEDB; ERROR: coordinator is not added to the metadata -HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; ERROR: coordinator is not added to the metadata -HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname GRANT another_user TO test_role_renamed; ERROR: coordinator is not added to the metadata -HINT: Use SELECT citus_set_coordinator_host('') to configure the coordinator hostname +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname \c - - - :master_port DROP ROLE test_role_renamed, another_user; SET client_min_messages TO WARNING; diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 6d41ac058..97e5c0928 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -67,6 +67,7 @@ ORDER BY 1; function citus_internal.replace_isolation_tester_func() function citus_internal.restore_isolation_tester_func() function citus_internal.start_management_transaction(xid8) + function citus_internal_acquire_citus_advisory_object_class_lock(integer,cstring) function citus_internal_add_colocation_metadata(integer,integer,integer,regtype,oid) function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") @@ -348,5 +349,5 @@ ORDER BY 1; view citus_stat_tenants_local view pg_dist_shard_placement view time_partitions -(338 rows) +(339 rows) diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index d8cc77c73..1b0f1427a 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -77,6 +77,7 @@ test: isolation_global_pid test: isolation_citus_locks test: isolation_reference_table test: isolation_schema_based_sharding +test: isolation_database_cmd_from_any_node test: isolation_citus_pause_node test: isolation_citus_schema_distribute_undistribute diff --git a/src/test/regress/spec/isolation_database_cmd_from_any_node.spec b/src/test/regress/spec/isolation_database_cmd_from_any_node.spec new file mode 100644 index 000000000..85dc27433 --- /dev/null +++ b/src/test/regress/spec/isolation_database_cmd_from_any_node.spec @@ -0,0 +1,102 @@ +setup +{ + -- OCLASS for database changed in PG 16 from 25 to 26 + SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS value INTO oclass_database; +} + +teardown +{ + DROP TABLE IF EXISTS oclass_database; +} + +session "s1" + +setup { SET citus.enable_create_database_propagation TO ON; } + +step "s1-begin" { BEGIN; } +step "s1-commit" { COMMIT; } +step "s1-rollback" { ROLLBACK; } + +step "s1-create-user-dbuser" { CREATE USER dbuser; } +step "s1-drop-user-dbuser" { DROP USER dbuser; } + +step "s1-acquire-citus-adv-oclass-lock" { SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; } +step "s1-acquire-citus-adv-oclass-lock-with-oid-testdb1" { SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; } + +step "s1-create-testdb1" { CREATE DATABASE testdb1; } +step "s1-drop-testdb1" { DROP DATABASE testdb1; } +step "s1-alter-testdb1-rename-to-db1" { ALTER DATABASE testdb1 RENAME TO db1; } +step "s1-grant-on-testdb1-to-dbuser" { GRANT ALL ON DATABASE testdb1 TO dbuser;} + +step "s1-drop-testdb2" { DROP DATABASE testdb2; } +step "s1-grant-on-testdb2-to-dbuser" { GRANT ALL ON DATABASE testdb2 TO dbuser;} + +step "s1-create-db1" { CREATE DATABASE db1; } +step "s1-drop-db1" { DROP DATABASE db1; } + +session "s2" + +setup { SET citus.enable_create_database_propagation TO ON; } + +step "s2-begin" { BEGIN; } +step "s2-commit" { COMMIT; } +step "s2-rollback" { ROLLBACK; } + +step "s2-acquire-citus-adv-oclass-lock" { SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; } +step "s2-acquire-citus-adv-oclass-lock-with-oid-testdb1" { SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; } +step "s2-acquire-citus-adv-oclass-lock-with-oid-testdb2" { SELECT citus_internal_acquire_citus_advisory_object_class_lock(value, 'testdb2') FROM oclass_database; } + +step "s2-alter-testdb1-rename-to-db1" { ALTER DATABASE testdb1 RENAME TO db1; } + +step "s2-create-testdb2" { CREATE DATABASE testdb2; } +step "s2-drop-testdb2" { DROP DATABASE testdb2; } +step "s2-alter-testdb2-rename-to-db1" { ALTER DATABASE testdb2 RENAME TO db1; } +step "s2-alter-testdb2-rename-to-db2" { ALTER DATABASE testdb2 RENAME TO db2; } +step "s2-alter-testdb2-set-lc_monetary" { ALTER DATABASE testdb2 SET lc_monetary TO 'C'; } + +step "s2-drop-db1" { DROP DATABASE db1; } + +step "s2-drop-db2" { DROP DATABASE db2; } + +// Given that we cannot execute CREATE / DROP DATABASE commands in a transaction block, we instead acquire the +// underlying advisory lock in some of below tests. + +// e.g., CREATE DATABASE vs CREATE DATABASE +permutation "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock" "s2-acquire-citus-adv-oclass-lock" "s1-commit" "s2-commit" + +// e.g., DROP DATABASE vs DROP DATABASE +// dropping the same database +permutation "s1-create-testdb1" "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock-with-oid-testdb1" "s2-acquire-citus-adv-oclass-lock-with-oid-testdb1" "s1-commit" "s2-commit" "s1-drop-testdb1" +// dropping a different database +permutation "s1-create-testdb1" "s2-create-testdb2" "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock-with-oid-testdb1" "s2-acquire-citus-adv-oclass-lock-with-oid-testdb2" "s1-commit" "s2-commit" "s1-drop-testdb1" "s2-drop-testdb2" + +// CREATE DATABASE vs DROP DATABASE +permutation "s2-create-testdb2" "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock" "s2-acquire-citus-adv-oclass-lock-with-oid-testdb2" "s1-commit" "s2-commit" "s2-drop-testdb2" + +// CREATE DATABASE vs ALTER DATABASE SET +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-set-lc_monetary" "s1-create-db1" "s2-rollback" "s2-drop-testdb2" "s1-drop-db1" + +// GRANT .. ON DATABASE .. TO ... vs ALTER DATABASE SET +// on the same database +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-set-lc_monetary" "s1-create-user-dbuser" "s1-grant-on-testdb2-to-dbuser" "s2-rollback" "s2-drop-testdb2" "s1-drop-user-dbuser" +// on a different database +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-set-lc_monetary" "s1-create-testdb1" "s1-create-user-dbuser" "s1-grant-on-testdb1-to-dbuser" "s2-rollback" "s2-drop-testdb2" "s1-drop-testdb1" "s1-drop-user-dbuser" + +// ALTER DATABASE .. RENAME TO .. vs ALTER DATABASE .. RENAME TO .. +// try to rename different databases to the same name +permutation "s1-create-testdb1" "s2-create-testdb2" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb2-rename-to-db1" "s1-commit" "s2-rollback" "s1-drop-db1" "s2-drop-testdb2" +permutation "s1-create-testdb1" "s2-create-testdb2" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb2-rename-to-db1" "s1-rollback" "s2-commit" "s1-drop-testdb1" "s2-drop-db1" +// try to rename same database +permutation "s1-create-testdb1" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb1-rename-to-db1" "s1-commit" "s2-rollback" "s1-drop-db1" +permutation "s1-create-testdb1" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb1-rename-to-db1" "s1-rollback" "s2-commit" "s2-drop-db1" + +// CREATE DATABASE vs ALTER DATABASE .. RENAME TO .. +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db1" "s1-create-db1" "s2-rollback" "s2-drop-testdb2" "s1-drop-db1" +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db1" "s1-create-db1" "s2-commit" "s2-drop-db1" +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db2" "s1-create-db1" "s2-commit" "s2-drop-db2" "s1-drop-db1" + +// DROP DATABASE vs ALTER DATABASE .. RENAME TO .. +// try to rename the same database +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db1" "s1-drop-testdb2" "s2-rollback" +// try to rename a different database +permutation "s2-create-testdb2" "s1-create-db1" "s2-begin" "s2-alter-testdb2-rename-to-db2" "s1-drop-db1" "s2-commit" "s2-drop-db2" diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index aeb469c1e..26e61c7b8 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -242,18 +242,18 @@ drop database "mydatabase#1'2"; SET citus.enable_create_database_propagation TO ON; --- show that dropping the database from workers is not allowed when citus.enable_create_database_propagation is on +-- show that dropping the database from workers is allowed when citus.enable_create_database_propagation is on DROP DATABASE db_needs_escape; -- and the same applies to create database too create database error_test; +drop database error_test; \c - - - :master_port SET citus.enable_create_database_propagation TO ON; DROP DATABASE test_node_activation; -DROP DATABASE db_needs_escape; DROP USER "role-needs\!escape"; -- drop database with force options test @@ -550,6 +550,204 @@ REVOKE CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 FROM local_role_1 DROP ROLE local_role_1; DROP DATABASE local_database_1; +-- test create / drop database commands from workers + +-- remove one of the workers to test node activation too +SELECT 1 from citus_remove_node('localhost', :worker_2_port); + +\c - - - :worker_1_port + +CREATE DATABASE local_worker_db; + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE db_created_from_worker + WITH template=template1 + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + CONNECTION LIMIT = 42 + TABLESPACE = "ts-needs\!escape" + ALLOW_CONNECTIONS = false; + +\c - - - :master_port + +SET citus.enable_create_database_propagation TO ON; + +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +\c - - - :worker_1_port + +SET citus.enable_create_database_propagation TO ON; + +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type; + +DROP DATABASE db_created_from_worker; + +SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type; + +-- drop the local database while the GUC is on +DROP DATABASE local_worker_db; +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; + +SET citus.enable_create_database_propagation TO OFF; + +CREATE DATABASE local_worker_db; + +-- drop the local database while the GUC is off +DROP DATABASE local_worker_db; +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE another_db_created_from_worker; + +\c - - - :master_port + +SELECT 1 FROM citus_remove_node('localhost', :master_port); + +\c - - - :worker_1_port + +SET citus.enable_create_database_propagation TO ON; + +-- fails because coordinator is not added into metadata +DROP DATABASE another_db_created_from_worker; + +-- fails because coordinator is not added into metadata +CREATE DATABASE new_db; + +\c - - - :master_port + +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, 0); +RESET client_min_messages; + +SET citus.enable_create_database_propagation TO ON; + +-- dropping a database that was created from a worker via a different node works fine +DROP DATABASE another_db_created_from_worker; +SELECT * FROM public.check_database_on_all_nodes('another_db_created_from_worker') ORDER BY node_type; + +-- Show that we automatically propagate the dependencies (only roles atm) when +-- creating a database from workers too. + +SELECT 1 from citus_remove_node('localhost', :worker_2_port); + +\c - - - :worker_1_port + +set citus.enable_create_role_propagation TO off; +create role non_propagated_role; +set citus.enable_create_role_propagation TO on; + +set citus.enable_create_database_propagation TO on; + +create database test_db OWNER non_propagated_role; + +create role propagated_role; + +\c - - - :master_port + +-- not supported from workers, so need to execute this via coordinator +grant connect on database test_db to propagated_role; + +SET citus.enable_create_database_propagation TO ON; + +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +SELECT * FROM public.check_database_on_all_nodes('test_db') ORDER BY node_type; + +REVOKE CONNECT ON DATABASE test_db FROM propagated_role; +DROP DATABASE test_db; +DROP ROLE propagated_role, non_propagated_role; + +-- test pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock with null input +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(null, 'regression'); +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), null); + +-- OCLASS_DATABASE +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), NULL); +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'regression'); +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), ''); +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'no_such_db'); + +-- invalid OCLASS +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(-1, NULL); +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(-1, 'regression'); + +-- invalid OCLASS +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(100, NULL); +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(100, 'regression'); + +-- another valid OCLASS, but not implemented yet +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(10, NULL); +SELECT pg_catalog.citus_internal_acquire_citus_advisory_object_class_lock(10, 'regression'); + +SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO ON'); +SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); +SELECT pg_sleep(0.1); + +-- only one of them succeeds and we don't run into a distributed deadlock +SELECT COUNT(*) FROM run_command_on_all_nodes('CREATE DATABASE concurrent_create_db') WHERE success; +SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type; + +SELECT COUNT(*) FROM run_command_on_all_nodes('DROP DATABASE concurrent_create_db') WHERE success; +SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type; + +-- revert the system wide change that enables citus.enable_create_database_propagation on all nodes +SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO OFF'); +SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); +SELECT pg_sleep(0.1); + +-- but keep it enabled for coordinator for the rest of the tests +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE distributed_db; + +CREATE USER no_createdb; +SET ROLE no_createdb; +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE no_createdb; +ALTER DATABASE distributed_db RENAME TO rename_test; +DROP DATABASE distributed_db; +ALTER DATABASE distributed_db SET TABLESPACE pg_default; +ALTER DATABASE distributed_db SET timezone TO 'UTC'; +ALTER DATABASE distributed_db RESET timezone; +GRANT ALL ON DATABASE distributed_db TO postgres; + +RESET ROLE; + +ALTER ROLE no_createdb createdb; + +SET ROLE no_createdb; + +CREATE DATABASE no_createdb; + +ALTER DATABASE distributed_db RENAME TO rename_test; + +RESET ROLE; + +SELECT 1 FROM run_command_on_all_nodes($$GRANT ALL ON TABLESPACE pg_default TO no_createdb$$); +ALTER DATABASE distributed_db OWNER TO no_createdb; + +SET ROLE no_createdb; + +ALTER DATABASE distributed_db SET TABLESPACE pg_default; +ALTER DATABASE distributed_db SET timezone TO 'UTC'; +ALTER DATABASE distributed_db RESET timezone; +GRANT ALL ON DATABASE distributed_db TO postgres; +ALTER DATABASE distributed_db RENAME TO rename_test; +DROP DATABASE rename_test; + +RESET ROLE; + +SELECT 1 FROM run_command_on_all_nodes($$REVOKE ALL ON TABLESPACE pg_default FROM no_createdb$$); + +DROP DATABASE no_createdb; +DROP USER no_createdb; + +SET citus.enable_create_database_propagation TO ON; + --clean up resources created by this test -- DROP TABLESPACE is not supported, so we need to drop it manually. diff --git a/src/test/regress/sql/multi_utility_warnings.sql b/src/test/regress/sql/multi_utility_warnings.sql index 491c6557c..49855457c 100644 --- a/src/test/regress/sql/multi_utility_warnings.sql +++ b/src/test/regress/sql/multi_utility_warnings.sql @@ -6,3 +6,4 @@ -- databases. CREATE DATABASE new_database; +DROP DATABASE new_database;