From a6c2d2a4c433373670e598750aeaedf6da5e6461 Mon Sep 17 00:00:00 2001 From: Nils Dijk Date: Thu, 20 May 2021 13:27:44 +0200 Subject: [PATCH] Feature: alter database owner (#4986) DESCRIPTION: Add support for ALTER DATABASE OWNER This adds support for changing the database owner. It achieves this by marking the database as a distributed object. By marking the database as a distributed object it will look for its dependencies and order the user creation commands (enterprise only) before the alter of the database owner. This is mostly important when adding new nodes. By having the database marked as a distributed object it can easily understand for which `ALTER DATABASE ... OWNER TO ...` commands to propagate by resolving the object address of the database and verifying it is a distributed object, and hence should propagate changes of owner ship to all workers. Given the ownership of the database might have implications on subsequent commands in transactions we force sequential mode for transactions that have a `ALTER DATABASE ... OWNER TO ...` command in them. This will fail the transaction with meaningful help when the transaction already executed parallel statements. By default the feature is turned off since roles are not automatically propagated, having it turned on would cause hard to understand errors for the user. It can be turned on by the user via setting the `citus.enable_alter_database_owner`. --- src/backend/distributed/commands/database.c | 215 +++++++++++++ .../distributed/commands/dependencies.c | 15 + .../commands/distribute_object_ops.c | 12 + .../deparser/deparse_database_stmts.c | 49 +++ src/backend/distributed/metadata/dependency.c | 6 + src/backend/distributed/shared_library_init.c | 11 + .../distributed/sql/citus--10.0-3--10.1-1.sql | 9 + .../sql/downgrades/citus--10.1-1--10.0-3.sql | 5 + src/include/distributed/commands.h | 7 + .../distributed/commands/utility_hook.h | 1 + src/include/distributed/deparser.h | 3 + .../regress/expected/alter_database_owner.out | 301 ++++++++++++++++++ ...lation_ensure_dependency_activate_node.out | 30 ++ .../expected/isolation_extension_commands.out | 28 +- .../upgrade_pg_dist_object_test_after.out | 3 +- src/test/regress/multi_schedule | 1 + .../regress/spec/columnar_temp_tables.spec | 4 +- .../spec/columnar_write_concurrency.spec | 6 +- src/test/regress/sql/alter_database_owner.sql | 175 ++++++++++ 19 files changed, 861 insertions(+), 20 deletions(-) create mode 100644 src/backend/distributed/commands/database.c create mode 100644 src/backend/distributed/deparser/deparse_database_stmts.c create mode 100644 src/test/regress/expected/alter_database_owner.out create mode 100644 src/test/regress/sql/alter_database_owner.sql diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c new file mode 100644 index 000000000..b4fb15110 --- /dev/null +++ b/src/backend/distributed/commands/database.c @@ -0,0 +1,215 @@ +/*------------------------------------------------------------------------- + * + * database.c + * Commands to interact with the database object in a distributed + * environment. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/htup_details.h" +#include "access/xact.h" +#include "catalog/objectaddress.h" +#include "catalog/pg_database.h" +#include "commands/dbcommands.h" +#include "miscadmin.h" +#include "nodes/parsenodes.h" +#include "utils/syscache.h" + +#include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/deparser.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" +#include "distributed/multi_executor.h" +#include "distributed/relation_access_tracking.h" +#include "distributed/worker_transaction.h" + +static void EnsureSequentialModeForDatabaseDDL(void); +static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid); +static Oid get_database_owner(Oid db_oid); + +/* controlled via GUC */ +bool EnableAlterDatabaseOwner = false; + + +/* + * PreprocessAlterDatabaseOwnerStmt is called during the utility hook before the alter + * command is applied locally on the coordinator. This will verify if the command needs to + * be propagated to the workers and if so prepares a list of ddl commands to execute. + */ +List * +PreprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_DATABASE); + + ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&typeAddress)) + { + return NIL; + } + + if (!EnableAlterDatabaseOwner) + { + /* don't propagate if GUC is turned off */ + return NIL; + } + + EnsureCoordinator(); + + QualifyTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); + + EnsureSequentialModeForDatabaseDDL(); + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PostprocessAlterDatabaseOwnerStmt is called during the utility hook after the alter + * database command has been applied locally. + * + * Its main purpose is to propagate the newly formed dependencies onto the nodes before + * applying the change of owner of the databse. This ensures, for systems that have role + * management, that the roles will be created before applying the alter owner command. + */ +List * +PostprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_DATABASE); + + ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&typeAddress)) + { + return NIL; + } + + if (!EnableAlterDatabaseOwner) + { + /* don't propagate if GUC is turned off */ + return NIL; + } + + EnsureDependenciesExistOnAllNodes(&typeAddress); + return NIL; +} + + +/* + * AlterDatabaseOwnerObjectAddress returns the ObjectAddress of the database that is the + * object of the AlterOwnerStmt. Errors if missing_ok is false. + */ +ObjectAddress +AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_DATABASE); + + Oid databaseOid = get_database_oid(strVal((Value *) stmt->object), missing_ok); + ObjectAddress address = { 0 }; + ObjectAddressSet(address, DatabaseRelationId, databaseOid); + + return address; +} + + +/* + * DatabaseOwnerDDLCommands returns a list of sql statements to idempotently apply a + * change of the database owner on the workers so that the database is owned by the same + * user on all nodes in the cluster. + */ +List * +DatabaseOwnerDDLCommands(const ObjectAddress *address) +{ + Node *stmt = (Node *) RecreateAlterDatabaseOwnerStmt(address->objectId); + return list_make1(DeparseTreeNode(stmt)); +} + + +/* + * RecreateAlterDatabaseOwnerStmt creates an AlterOwnerStmt that represents the operation + * of changing the owner of the database to its current owner. + */ +static AlterOwnerStmt * +RecreateAlterDatabaseOwnerStmt(Oid databaseOid) +{ + AlterOwnerStmt *stmt = makeNode(AlterOwnerStmt); + + stmt->objectType = OBJECT_DATABASE; + stmt->object = (Node *) makeString(get_database_name(databaseOid)); + + Oid ownerOid = get_database_owner(databaseOid); + stmt->newowner = makeNode(RoleSpec); + stmt->newowner->roletype = ROLESPEC_CSTRING; + stmt->newowner->rolename = GetUserNameFromId(ownerOid, false); + + return stmt; +} + + +/* + * get_database_owner returns the Oid of the role owning the database + */ +static Oid +get_database_owner(Oid db_oid) +{ + HeapTuple tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(db_oid)); + if (!HeapTupleIsValid(tuple)) + { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_DATABASE), + errmsg("database with OID %u does not exist", db_oid))); + } + + Oid dba = ((Form_pg_database) GETSTRUCT(tuple))->datdba; + + ReleaseSysCache(tuple); + + return dba; +} + + +/* + * EnsureSequentialModeForDatabaseDDL makes sure that the current transaction is already + * in sequential mode, or can still safely be put in sequential mode, it errors if that is + * not possible. The error contains information for the user to retry the transaction with + * sequential mode set from the beginning. + */ +static void +EnsureSequentialModeForDatabaseDDL(void) +{ + if (!IsTransactionBlock()) + { + /* we do not need to switch to sequential mode if we are not in a transaction */ + return; + } + + if (ParallelQueryExecutedInTransaction()) + { + ereport(ERROR, (errmsg("cannot create or modify database because there was a " + "parallel operation on a distributed table in the " + "transaction"), + errdetail("When creating or altering a database, Citus needs to " + "perform all operations over a single connection per " + "node to ensure consistency."), + errhint("Try re-running the transaction with " + "\"SET LOCAL citus.multi_shard_modify_mode TO " + "\'sequential\';\""))); + } + + ereport(DEBUG1, (errmsg("switching to sequential query execution mode"), + errdetail("Database is created or altered. To make sure subsequent " + "commands see the type correctly we need to make sure to " + "use only one connection for all future commands"))); + SetLocalMultiShardModifyModeToSequential(); +} diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index c885d4429..60cb56492 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -14,6 +14,7 @@ #include "catalog/objectaddress.h" #include "commands/extension.h" #include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" #include "distributed/connection_management.h" #include "distributed/listutils.h" #include "distributed/metadata/dependency.h" @@ -191,6 +192,20 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) return CreateCollationDDLsIdempotent(dependency->objectId); } + case OCLASS_DATABASE: + { + List *databaseDDLCommands = NIL; + + /* only propagate the ownership of the database when the feature is on */ + if (EnableAlterDatabaseOwner) + { + List *ownerDDLCommands = DatabaseOwnerDDLCommands(dependency); + databaseDDLCommands = list_concat(databaseDDLCommands, ownerDDLCommands); + } + + return databaseDDLCommands; + } + case OCLASS_PROC: { return CreateFunctionDDLCommandsIdempotent(dependency); diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 5f4b94961..e88614377 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -240,6 +240,13 @@ static DistributeObjectOps Collation_Rename = { .postprocess = NULL, .address = RenameCollationStmtObjectAddress, }; +static DistributeObjectOps Database_AlterOwner = { + .deparse = DeparseAlterDatabaseOwnerStmt, + .qualify = NULL, + .preprocess = PreprocessAlterDatabaseOwnerStmt, + .postprocess = PostprocessAlterDatabaseOwnerStmt, + .address = AlterDatabaseOwnerObjectAddress, +}; static DistributeObjectOps Extension_AlterObjectSchema = { .deparse = DeparseAlterExtensionSchemaStmt, .qualify = NULL, @@ -658,6 +665,11 @@ GetDistributeObjectOps(Node *node) return &Collation_AlterOwner; } + case OBJECT_DATABASE: + { + return &Database_AlterOwner; + } + case OBJECT_FUNCTION: { return &Function_AlterOwner; diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c new file mode 100644 index 000000000..0ebc69238 --- /dev/null +++ b/src/backend/distributed/deparser/deparse_database_stmts.c @@ -0,0 +1,49 @@ +/*------------------------------------------------------------------------- + * + * deparse_database_stmts.c + * + * All routines to deparse database statements. + * + * Copyright (c), Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/namespace.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" + +#include "distributed/citus_ruleutils.h" +#include "distributed/deparser.h" + +static void AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt); + + +char * +DeparseAlterDatabaseOwnerStmt(Node *node) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + StringInfoData str = { 0 }; + initStringInfo(&str); + + Assert(stmt->objectType == OBJECT_DATABASE); + + AppendAlterDatabaseOwnerStmt(&str, stmt); + + return str.data; +} + + +static void +AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt) +{ + Assert(stmt->objectType == OBJECT_DATABASE); + + appendStringInfo(buf, + "ALTER DATABASE %s OWNER TO %s;", + quote_identifier(strVal((Value *) stmt->object)), + RoleSpecString(stmt->newowner, true)); +} diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index f84e1152a..e57dae987 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -570,6 +570,12 @@ SupportedDependencyByCitus(const ObjectAddress *address) return true; } + case OCLASS_DATABASE: + { + /* only to propagate its owner */ + return true; + } + case OCLASS_ROLE: { /* diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index f457fd0e2..f30e4e4ed 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -694,6 +694,17 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL, NULL, NULL, NULL); + DefineCustomBoolVariable( + "citus.enable_alter_database_owner", + gettext_noop("Enables propagating ALTER DATABASE ... OWNER TO ... statements to " + "workers"), + NULL, + &EnableAlterDatabaseOwner, + false, + PGC_USERSET, + GUC_NO_SHOW_ALL, + NULL, NULL, NULL); + DefineCustomBoolVariable( "citus.enable_binary_protocol", gettext_noop( diff --git a/src/backend/distributed/sql/citus--10.0-3--10.1-1.sql b/src/backend/distributed/sql/citus--10.0-3--10.1-1.sql index f98e005cc..f6b385472 100644 --- a/src/backend/distributed/sql/citus--10.0-3--10.1-1.sql +++ b/src/backend/distributed/sql/citus--10.0-3--10.1-1.sql @@ -1,5 +1,14 @@ -- citus--10.0-3--10.1-1 +-- add the current database to the distributed objects if not already in there. +-- this is to reliably propagate some of the alter database commands that might be +-- supported. +INSERT INTO citus.pg_dist_object SELECT + 'pg_catalog.pg_database'::regclass::oid AS oid, + (SELECT oid FROM pg_database WHERE datname = current_database()) as objid, + 0 as objsubid +ON CONFLICT DO NOTHING; + #include "../../columnar/sql/columnar--10.0-3--10.1-1.sql" #include "udfs/create_distributed_table/10.1-1.sql"; #include "udfs/worker_partitioned_relation_total_size/10.1-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--10.1-1--10.0-3.sql b/src/backend/distributed/sql/downgrades/citus--10.1-1--10.0-3.sql index c328eba43..c5a947553 100644 --- a/src/backend/distributed/sql/downgrades/citus--10.1-1--10.0-3.sql +++ b/src/backend/distributed/sql/downgrades/citus--10.1-1--10.0-3.sql @@ -1,5 +1,10 @@ -- citus--10.1-1--10.0-3 +-- remove databases as distributed objects to prevent unknown object types being managed +-- on older versions. +DELETE FROM citus.pg_dist_object + WHERE classid = 'pg_catalog.pg_database'::regclass::oid; + #include "../../../columnar/sql/downgrades/columnar--10.1-1--10.0-3.sql" DROP FUNCTION pg_catalog.create_distributed_table(regclass, text, citus.distribution_type, text, int); diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 8c1282129..fa409cfb8 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -149,6 +149,13 @@ extern char * GenerateBackupNameForCollationCollision(const ObjectAddress *addre extern ObjectAddress DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok); extern List * PostprocessDefineCollationStmt(Node *stmt, const char *queryString); +/* database.c - forward declarations */ +extern List * PreprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PostprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString); +extern ObjectAddress AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok); +extern List * DatabaseOwnerDDLCommands(const ObjectAddress *address); + /* extension.c - forward declarations */ extern bool IsDropCitusExtensionStmt(Node *parsetree); extern bool IsCreateAlterExtensionUpdateCitusStmt(Node *parsetree); diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index ee61ed690..24717986e 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -35,6 +35,7 @@ extern bool EnableDependencyCreation; extern bool EnableCreateTypePropagation; extern bool EnableAlterRolePropagation; extern bool EnableAlterRoleSetPropagation; +extern bool EnableAlterDatabaseOwner; extern int UtilityHookLevel; diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 7e264544c..92ef828fe 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -127,4 +127,7 @@ extern char * DeparseDropExtensionStmt(Node *stmt); extern char * DeparseAlterExtensionSchemaStmt(Node *stmt); extern char * DeparseAlterExtensionStmt(Node *stmt); +/* forward declarations for deparse_database_stmts.c */ +extern char * DeparseAlterDatabaseOwnerStmt(Node *node); + #endif /* CITUS_DEPARSER_H */ diff --git a/src/test/regress/expected/alter_database_owner.out b/src/test/regress/expected/alter_database_owner.out new file mode 100644 index 000000000..2e5e54aca --- /dev/null +++ b/src/test/regress/expected/alter_database_owner.out @@ -0,0 +1,301 @@ +CREATE SCHEMA alter_database_owner; +SET search_path TO alter_database_owner, public; +CREATE USER database_owner_1; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +CREATE USER database_owner_2; +NOTICE: not propagating CREATE ROLE/USER commands to worker nodes +HINT: Connect to worker nodes directly to manually create all necessary users and roles. +SELECT run_command_on_workers('CREATE USER database_owner_1'); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE ROLE") + (localhost,57638,t,"CREATE ROLE") +(2 rows) + +SELECT run_command_on_workers('CREATE USER database_owner_2'); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE ROLE") + (localhost,57638,t,"CREATE ROLE") +(2 rows) + +-- make sure the propagation of ALTER DATABASE ... OWNER TO ... is on +SET citus.enable_alter_database_owner TO on; +-- list the owners of the current database on all nodes +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,postgres) + (localhost,57638,t,postgres) +(2 rows) + +-- remove a node to verify addition later +SELECT master_remove_node('localhost', :worker_2_port); + master_remove_node +--------------------------------------------------------------------- + +(1 row) + +-- verify we can change the owner of a database +ALTER DATABASE regression OWNER TO database_owner_1; +-- list the owner of the current database on the coordinator +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); + rolname +--------------------------------------------------------------------- + database_owner_1 +(1 row) + +-- list the owners of the current database on all nodes +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,database_owner_1) +(1 row) + +-- turn off propagation to verify it does _not_ propagate to new nodes when turned off +SET citus.enable_alter_database_owner TO off; +-- add back second node to verify the owner of the database was set accordingly +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- list the owners of the current database on all nodes, should reflect on newly added node +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,database_owner_1) + (localhost,57638,t,postgres) +(2 rows) + +-- turn on propagation to verify it does propagate to new nodes when enabled +SET citus.enable_alter_database_owner TO on; +SELECT master_remove_node('localhost', :worker_2_port); -- remove so we can re add with propagation on + master_remove_node +--------------------------------------------------------------------- + +(1 row) + +-- add back second node to verify the owner of the database was set accordingly +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- list the owners of the current database on all nodes, should reflect on newly added node +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,database_owner_1) + (localhost,57638,t,database_owner_1) +(2 rows) + +-- test changing the owner in a transaction and rollback to cancel +BEGIN; +ALTER DATABASE regression OWNER TO database_owner_2; +ROLLBACK; +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); + rolname +--------------------------------------------------------------------- + database_owner_1 +(1 row) + +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,database_owner_1) + (localhost,57638,t,database_owner_1) +(2 rows) + +CREATE TABLE t (a int PRIMARY KEY); +SELECT create_distributed_table('t', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- test changing the owner in a xact that already had parallel execution +BEGIN; +SELECT count(*) FROM t; -- parallel execution; + count +--------------------------------------------------------------------- + 0 +(1 row) + +ALTER DATABASE regression OWNER TO database_owner_2; -- should ERROR +ERROR: cannot create or modify database because there was a parallel operation on a distributed table in the transaction +DETAIL: When creating or altering a database, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ROLLBACK; +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); + rolname +--------------------------------------------------------------------- + database_owner_1 +(1 row) + +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,database_owner_1) + (localhost,57638,t,database_owner_1) +(2 rows) + +BEGIN; +SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; +SELECT count(*) FROM t; -- parallel execution; + count +--------------------------------------------------------------------- + 0 +(1 row) + +ALTER DATABASE regression OWNER TO database_owner_2; +COMMIT; +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); + rolname +--------------------------------------------------------------------- + database_owner_2 +(1 row) + +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,database_owner_2) + (localhost,57638,t,database_owner_2) +(2 rows) + +-- turn propagation off and verify it does not propagate interactively when turned off +SET citus.enable_alter_database_owner TO off; +ALTER DATABASE regression OWNER TO database_owner_1; +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); + rolname +--------------------------------------------------------------------- + database_owner_1 +(1 row) + +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,database_owner_2) + (localhost,57638,t,database_owner_2) +(2 rows) + +-- reset state of cluster +SET citus.enable_alter_database_owner TO on; +ALTER DATABASE regression OWNER TO current_user; +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); + rolname +--------------------------------------------------------------------- + postgres +(1 row) + +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,postgres) + (localhost,57638,t,postgres) +(2 rows) + +DROP USER database_owner_1; +DROP USER database_owner_2; +SELECT run_command_on_workers('DROP USER database_owner_1'); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"DROP ROLE") + (localhost,57638,t,"DROP ROLE") +(2 rows) + +SELECT run_command_on_workers('DROP USER database_owner_2'); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"DROP ROLE") + (localhost,57638,t,"DROP ROLE") +(2 rows) + +SET client_min_messages TO warning; +DROP SCHEMA alter_database_owner CASCADE; diff --git a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out index 4bd7941eb..5233b698a 100644 --- a/src/test/regress/expected/isolation_ensure_dependency_activate_node.out +++ b/src/test/regress/expected/isolation_ensure_dependency_activate_node.out @@ -24,6 +24,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -91,6 +92,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -143,6 +145,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -216,6 +219,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -268,6 +272,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -341,6 +346,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -393,6 +399,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -461,6 +468,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) @@ -514,6 +522,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -588,6 +597,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) @@ -641,6 +651,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -715,6 +726,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) @@ -768,6 +780,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -861,6 +874,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) @@ -914,6 +928,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -1000,6 +1015,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) @@ -1053,6 +1069,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -1147,6 +1164,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{myschema},{}) (schema,{myschema2},{}) @@ -1201,6 +1219,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -1261,6 +1280,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) (type,{public.tt1},{}) @@ -1314,6 +1334,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -1373,6 +1394,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) (type,{public.tt1},{}) @@ -1426,6 +1448,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -1503,6 +1526,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{myschema},{}) (schema,{public},{}) @@ -1557,6 +1581,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -1633,6 +1658,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (function,"{public,add}","{integer,integer}") (role,{postgres},{}) (schema,{public},{}) @@ -1686,6 +1712,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -1767,6 +1794,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (function,"{public,add}","{integer,integer}") (role,{postgres},{}) (schema,{public},{}) @@ -1820,6 +1848,7 @@ step s1-print-distributed-objects: 1 pg_identify_object_as_address +(database,{regression},{}) (role,{postgres},{}) (schema,{public},{}) count @@ -1902,6 +1931,7 @@ step s2-print-distributed-objects: pg_identify_object_as_address +(database,{regression},{}) (function,"{myschema,add}","{integer,integer}") (role,{postgres},{}) (schema,{myschema},{}) diff --git a/src/test/regress/expected/isolation_extension_commands.out b/src/test/regress/expected/isolation_extension_commands.out index e2a5d6090..c41e9e16f 100644 --- a/src/test/regress/expected/isolation_extension_commands.out +++ b/src/test/regress/expected/isolation_extension_commands.out @@ -26,7 +26,7 @@ step s1-print: count -3 +4 extname extversion nspname seg 1.1 public @@ -73,7 +73,7 @@ step s1-print: count -3 +4 extname extversion nspname seg 1.2 public @@ -126,7 +126,7 @@ step s1-print: count -2 +3 extname extversion nspname run_command_on_workers @@ -168,7 +168,7 @@ step s1-print: count -4 +5 extname extversion nspname seg 1.3 schema1 @@ -215,7 +215,7 @@ step s1-print: count -3 +4 extname extversion nspname run_command_on_workers @@ -270,7 +270,7 @@ step s1-print: count -6 +7 extname extversion nspname seg 1.3 schema3 @@ -322,7 +322,7 @@ step s1-print: count -6 +7 extname extversion nspname seg 1.3 schema1 @@ -379,7 +379,7 @@ step s1-print: count -5 +6 extname extversion nspname seg 1.1 public @@ -444,7 +444,7 @@ step s1-print: count -6 +7 extname extversion nspname seg 1.2 public @@ -497,7 +497,7 @@ step s1-print: count -5 +6 extname extversion nspname run_command_on_workers @@ -538,7 +538,7 @@ step s1-print: count -5 +6 extname extversion nspname seg 1.3 schema1 @@ -597,7 +597,7 @@ step s1-print: count -6 +7 extname extversion nspname seg 1.3 schema2 @@ -648,7 +648,7 @@ step s1-print: count -5 +6 extname extversion nspname seg 1.1 public @@ -709,7 +709,7 @@ step s1-print: count -5 +6 extname extversion nspname run_command_on_workers diff --git a/src/test/regress/expected/upgrade_pg_dist_object_test_after.out b/src/test/regress/expected/upgrade_pg_dist_object_test_after.out index 5a5d8cb63..7437177f5 100644 --- a/src/test/regress/expected/upgrade_pg_dist_object_test_after.out +++ b/src/test/regress/expected/upgrade_pg_dist_object_test_after.out @@ -39,11 +39,12 @@ drop cascades to table upgrade_basic.t_append SELECT i.* FROM citus.pg_dist_object, pg_identify_object_as_address(classid, objid, objsubid) i ORDER BY 1, 2, 3; type | object_names | object_args --------------------------------------------------------------------- + database | {postgres} | {} extension | {isn} | {} role | {postgres} | {} schema | {fooschema} | {} schema | {new_schema} | {} schema | {public} | {} type | {fooschema.footype} | {} - (6 rows) + (7 rows) diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 338b14968..359036af6 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -25,6 +25,7 @@ test: alter_role_propagation test: propagate_extension_commands test: escape_extension_name test: ref_citus_local_fkeys +test: alter_database_owner test: multi_test_helpers multi_test_helpers_superuser test: multi_test_catalog_views diff --git a/src/test/regress/spec/columnar_temp_tables.spec b/src/test/regress/spec/columnar_temp_tables.spec index bffce93cb..456f1236e 100644 --- a/src/test/regress/spec/columnar_temp_tables.spec +++ b/src/test/regress/spec/columnar_temp_tables.spec @@ -42,6 +42,6 @@ step "s2-commit" COMMIT; } -# make sure that we allow creating same-named temporary columnar tables in different sessions -# also make sure that they don't block each other +// make sure that we allow creating same-named temporary columnar tables in different sessions +// also make sure that they don't block each other permutation "s1-begin" "s2-begin" "s1-create-temp" "s1-insert" "s2-create-temp" "s2-insert" "s1-commit" "s2-commit" diff --git a/src/test/regress/spec/columnar_write_concurrency.spec b/src/test/regress/spec/columnar_write_concurrency.spec index 4b2525bd5..5fe32b607 100644 --- a/src/test/regress/spec/columnar_write_concurrency.spec +++ b/src/test/regress/spec/columnar_write_concurrency.spec @@ -57,11 +57,11 @@ step "s2-commit" COMMIT; } -# writes shouldn't block writes or reads +// writes shouldn't block writes or reads permutation "s1-begin" "s2-begin" "s1-insert" "s2-insert" "s1-select" "s2-select" "s1-commit" "s2-commit" "s1-select" -# copy vs insert +// copy vs insert permutation "s1-begin" "s2-begin" "s1-copy" "s2-insert" "s1-select" "s2-select" "s1-commit" "s2-commit" "s1-select" -# insert vs copy +// insert vs copy permutation "s1-begin" "s2-begin" "s2-insert" "s1-copy" "s1-select" "s2-select" "s1-commit" "s2-commit" "s1-select" diff --git a/src/test/regress/sql/alter_database_owner.sql b/src/test/regress/sql/alter_database_owner.sql new file mode 100644 index 000000000..ae8418468 --- /dev/null +++ b/src/test/regress/sql/alter_database_owner.sql @@ -0,0 +1,175 @@ +CREATE SCHEMA alter_database_owner; +SET search_path TO alter_database_owner, public; + +CREATE USER database_owner_1; +CREATE USER database_owner_2; +SELECT run_command_on_workers('CREATE USER database_owner_1'); +SELECT run_command_on_workers('CREATE USER database_owner_2'); + +-- make sure the propagation of ALTER DATABASE ... OWNER TO ... is on +SET citus.enable_alter_database_owner TO on; + +-- list the owners of the current database on all nodes +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + +-- remove a node to verify addition later +SELECT master_remove_node('localhost', :worker_2_port); + +-- verify we can change the owner of a database +ALTER DATABASE regression OWNER TO database_owner_1; + +-- list the owner of the current database on the coordinator +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); + +-- list the owners of the current database on all nodes +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + +-- turn off propagation to verify it does _not_ propagate to new nodes when turned off +SET citus.enable_alter_database_owner TO off; + +-- add back second node to verify the owner of the database was set accordingly +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + +-- list the owners of the current database on all nodes, should reflect on newly added node +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + +-- turn on propagation to verify it does propagate to new nodes when enabled +SET citus.enable_alter_database_owner TO on; +SELECT master_remove_node('localhost', :worker_2_port); -- remove so we can re add with propagation on + +-- add back second node to verify the owner of the database was set accordingly +SELECT 1 FROM master_add_node('localhost', :worker_2_port); + +-- list the owners of the current database on all nodes, should reflect on newly added node +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + +-- test changing the owner in a transaction and rollback to cancel +BEGIN; +ALTER DATABASE regression OWNER TO database_owner_2; +ROLLBACK; +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + + +CREATE TABLE t (a int PRIMARY KEY); +SELECT create_distributed_table('t', 'a'); +-- test changing the owner in a xact that already had parallel execution +BEGIN; +SELECT count(*) FROM t; -- parallel execution; +ALTER DATABASE regression OWNER TO database_owner_2; -- should ERROR +ROLLBACK; + +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + +BEGIN; +SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; +SELECT count(*) FROM t; -- parallel execution; +ALTER DATABASE regression OWNER TO database_owner_2; +COMMIT; + +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + +-- turn propagation off and verify it does not propagate interactively when turned off +SET citus.enable_alter_database_owner TO off; + +ALTER DATABASE regression OWNER TO database_owner_1; +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + +-- reset state of cluster +SET citus.enable_alter_database_owner TO on; +ALTER DATABASE regression OWNER TO current_user; +-- list the owners of the current database on all nodes +SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +SELECT run_command_on_workers($$ + SELECT u.rolname + FROM pg_database d + JOIN pg_roles u + ON (d.datdba = u.oid) + WHERE d.datname = current_database(); +$$); + +DROP USER database_owner_1; +DROP USER database_owner_2; +SELECT run_command_on_workers('DROP USER database_owner_1'); +SELECT run_command_on_workers('DROP USER database_owner_2'); +SET client_min_messages TO warning; +DROP SCHEMA alter_database_owner CASCADE;