Feature: alter database owner (#4986)

DESCRIPTION: Add support for ALTER DATABASE OWNER

This adds support for changing the database owner. It achieves this by marking the database as a distributed object. By marking the database as a distributed object it will look for its dependencies and order the user creation commands (enterprise only) before the alter of the database owner. This is mostly important when adding new nodes.

By having the database marked as a distributed object it can easily understand for which `ALTER DATABASE ... OWNER TO ...` commands to propagate by resolving the object address of the database and verifying it is a distributed object, and hence should propagate changes of owner ship to all workers.

Given the ownership of the database might have implications on subsequent commands in transactions we force sequential mode for transactions that have a `ALTER DATABASE ... OWNER TO ...` command in them. This will fail the transaction with meaningful help when the transaction already executed parallel statements.

By default the feature is turned off since roles are not automatically propagated, having it turned on would cause hard to understand errors for the user. It can be turned on by the user via setting the `citus.enable_alter_database_owner`.
pull/4963/head
Nils Dijk 2021-05-20 13:27:44 +02:00 committed by GitHub
parent 1ce607dd23
commit a6c2d2a4c4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 861 additions and 20 deletions

View File

@ -0,0 +1,215 @@
/*-------------------------------------------------------------------------
*
* database.c
* Commands to interact with the database object in a distributed
* environment.
*
* Copyright (c) Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "access/xact.h"
#include "catalog/objectaddress.h"
#include "catalog/pg_database.h"
#include "commands/dbcommands.h"
#include "miscadmin.h"
#include "nodes/parsenodes.h"
#include "utils/syscache.h"
#include "distributed/commands.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/deparser.h"
#include "distributed/metadata_sync.h"
#include "distributed/metadata_utility.h"
#include "distributed/multi_executor.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/worker_transaction.h"
static void EnsureSequentialModeForDatabaseDDL(void);
static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid);
static Oid get_database_owner(Oid db_oid);
/* controlled via GUC */
bool EnableAlterDatabaseOwner = false;
/*
* PreprocessAlterDatabaseOwnerStmt is called during the utility hook before the alter
* command is applied locally on the coordinator. This will verify if the command needs to
* be propagated to the workers and if so prepares a list of ddl commands to execute.
*/
List *
PreprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_DATABASE);
ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&typeAddress))
{
return NIL;
}
if (!EnableAlterDatabaseOwner)
{
/* don't propagate if GUC is turned off */
return NIL;
}
EnsureCoordinator();
QualifyTreeNode((Node *) stmt);
const char *sql = DeparseTreeNode((Node *) stmt);
EnsureSequentialModeForDatabaseDDL();
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
(void *) sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* PostprocessAlterDatabaseOwnerStmt is called during the utility hook after the alter
* database command has been applied locally.
*
* Its main purpose is to propagate the newly formed dependencies onto the nodes before
* applying the change of owner of the databse. This ensures, for systems that have role
* management, that the roles will be created before applying the alter owner command.
*/
List *
PostprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_DATABASE);
ObjectAddress typeAddress = GetObjectAddressFromParseTree((Node *) stmt, false);
if (!ShouldPropagateObject(&typeAddress))
{
return NIL;
}
if (!EnableAlterDatabaseOwner)
{
/* don't propagate if GUC is turned off */
return NIL;
}
EnsureDependenciesExistOnAllNodes(&typeAddress);
return NIL;
}
/*
* AlterDatabaseOwnerObjectAddress returns the ObjectAddress of the database that is the
* object of the AlterOwnerStmt. Errors if missing_ok is false.
*/
ObjectAddress
AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
Assert(stmt->objectType == OBJECT_DATABASE);
Oid databaseOid = get_database_oid(strVal((Value *) stmt->object), missing_ok);
ObjectAddress address = { 0 };
ObjectAddressSet(address, DatabaseRelationId, databaseOid);
return address;
}
/*
* DatabaseOwnerDDLCommands returns a list of sql statements to idempotently apply a
* change of the database owner on the workers so that the database is owned by the same
* user on all nodes in the cluster.
*/
List *
DatabaseOwnerDDLCommands(const ObjectAddress *address)
{
Node *stmt = (Node *) RecreateAlterDatabaseOwnerStmt(address->objectId);
return list_make1(DeparseTreeNode(stmt));
}
/*
* RecreateAlterDatabaseOwnerStmt creates an AlterOwnerStmt that represents the operation
* of changing the owner of the database to its current owner.
*/
static AlterOwnerStmt *
RecreateAlterDatabaseOwnerStmt(Oid databaseOid)
{
AlterOwnerStmt *stmt = makeNode(AlterOwnerStmt);
stmt->objectType = OBJECT_DATABASE;
stmt->object = (Node *) makeString(get_database_name(databaseOid));
Oid ownerOid = get_database_owner(databaseOid);
stmt->newowner = makeNode(RoleSpec);
stmt->newowner->roletype = ROLESPEC_CSTRING;
stmt->newowner->rolename = GetUserNameFromId(ownerOid, false);
return stmt;
}
/*
* get_database_owner returns the Oid of the role owning the database
*/
static Oid
get_database_owner(Oid db_oid)
{
HeapTuple tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(db_oid));
if (!HeapTupleIsValid(tuple))
{
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database with OID %u does not exist", db_oid)));
}
Oid dba = ((Form_pg_database) GETSTRUCT(tuple))->datdba;
ReleaseSysCache(tuple);
return dba;
}
/*
* EnsureSequentialModeForDatabaseDDL makes sure that the current transaction is already
* in sequential mode, or can still safely be put in sequential mode, it errors if that is
* not possible. The error contains information for the user to retry the transaction with
* sequential mode set from the beginning.
*/
static void
EnsureSequentialModeForDatabaseDDL(void)
{
if (!IsTransactionBlock())
{
/* we do not need to switch to sequential mode if we are not in a transaction */
return;
}
if (ParallelQueryExecutedInTransaction())
{
ereport(ERROR, (errmsg("cannot create or modify database because there was a "
"parallel operation on a distributed table in the "
"transaction"),
errdetail("When creating or altering a database, Citus needs to "
"perform all operations over a single connection per "
"node to ensure consistency."),
errhint("Try re-running the transaction with "
"\"SET LOCAL citus.multi_shard_modify_mode TO "
"\'sequential\';\"")));
}
ereport(DEBUG1, (errmsg("switching to sequential query execution mode"),
errdetail("Database is created or altered. To make sure subsequent "
"commands see the type correctly we need to make sure to "
"use only one connection for all future commands")));
SetLocalMultiShardModifyModeToSequential();
}

View File

@ -14,6 +14,7 @@
#include "catalog/objectaddress.h"
#include "commands/extension.h"
#include "distributed/commands.h"
#include "distributed/commands/utility_hook.h"
#include "distributed/connection_management.h"
#include "distributed/listutils.h"
#include "distributed/metadata/dependency.h"
@ -191,6 +192,20 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency)
return CreateCollationDDLsIdempotent(dependency->objectId);
}
case OCLASS_DATABASE:
{
List *databaseDDLCommands = NIL;
/* only propagate the ownership of the database when the feature is on */
if (EnableAlterDatabaseOwner)
{
List *ownerDDLCommands = DatabaseOwnerDDLCommands(dependency);
databaseDDLCommands = list_concat(databaseDDLCommands, ownerDDLCommands);
}
return databaseDDLCommands;
}
case OCLASS_PROC:
{
return CreateFunctionDDLCommandsIdempotent(dependency);

View File

@ -240,6 +240,13 @@ static DistributeObjectOps Collation_Rename = {
.postprocess = NULL,
.address = RenameCollationStmtObjectAddress,
};
static DistributeObjectOps Database_AlterOwner = {
.deparse = DeparseAlterDatabaseOwnerStmt,
.qualify = NULL,
.preprocess = PreprocessAlterDatabaseOwnerStmt,
.postprocess = PostprocessAlterDatabaseOwnerStmt,
.address = AlterDatabaseOwnerObjectAddress,
};
static DistributeObjectOps Extension_AlterObjectSchema = {
.deparse = DeparseAlterExtensionSchemaStmt,
.qualify = NULL,
@ -658,6 +665,11 @@ GetDistributeObjectOps(Node *node)
return &Collation_AlterOwner;
}
case OBJECT_DATABASE:
{
return &Database_AlterOwner;
}
case OBJECT_FUNCTION:
{
return &Function_AlterOwner;

View File

@ -0,0 +1,49 @@
/*-------------------------------------------------------------------------
*
* deparse_database_stmts.c
*
* All routines to deparse database statements.
*
* Copyright (c), Citus Data, Inc.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "catalog/namespace.h"
#include "lib/stringinfo.h"
#include "nodes/parsenodes.h"
#include "utils/builtins.h"
#include "distributed/citus_ruleutils.h"
#include "distributed/deparser.h"
static void AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt);
char *
DeparseAlterDatabaseOwnerStmt(Node *node)
{
AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node);
StringInfoData str = { 0 };
initStringInfo(&str);
Assert(stmt->objectType == OBJECT_DATABASE);
AppendAlterDatabaseOwnerStmt(&str, stmt);
return str.data;
}
static void
AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt)
{
Assert(stmt->objectType == OBJECT_DATABASE);
appendStringInfo(buf,
"ALTER DATABASE %s OWNER TO %s;",
quote_identifier(strVal((Value *) stmt->object)),
RoleSpecString(stmt->newowner, true));
}

View File

@ -570,6 +570,12 @@ SupportedDependencyByCitus(const ObjectAddress *address)
return true;
}
case OCLASS_DATABASE:
{
/* only to propagate its owner */
return true;
}
case OCLASS_ROLE:
{
/*

View File

@ -694,6 +694,17 @@ RegisterCitusConfigVariables(void)
GUC_NO_SHOW_ALL,
NULL, NULL, NULL);
DefineCustomBoolVariable(
"citus.enable_alter_database_owner",
gettext_noop("Enables propagating ALTER DATABASE ... OWNER TO ... statements to "
"workers"),
NULL,
&EnableAlterDatabaseOwner,
false,
PGC_USERSET,
GUC_NO_SHOW_ALL,
NULL, NULL, NULL);
DefineCustomBoolVariable(
"citus.enable_binary_protocol",
gettext_noop(

View File

@ -1,5 +1,14 @@
-- citus--10.0-3--10.1-1
-- add the current database to the distributed objects if not already in there.
-- this is to reliably propagate some of the alter database commands that might be
-- supported.
INSERT INTO citus.pg_dist_object SELECT
'pg_catalog.pg_database'::regclass::oid AS oid,
(SELECT oid FROM pg_database WHERE datname = current_database()) as objid,
0 as objsubid
ON CONFLICT DO NOTHING;
#include "../../columnar/sql/columnar--10.0-3--10.1-1.sql"
#include "udfs/create_distributed_table/10.1-1.sql";
#include "udfs/worker_partitioned_relation_total_size/10.1-1.sql"

View File

@ -1,5 +1,10 @@
-- citus--10.1-1--10.0-3
-- remove databases as distributed objects to prevent unknown object types being managed
-- on older versions.
DELETE FROM citus.pg_dist_object
WHERE classid = 'pg_catalog.pg_database'::regclass::oid;
#include "../../../columnar/sql/downgrades/columnar--10.1-1--10.0-3.sql"
DROP FUNCTION pg_catalog.create_distributed_table(regclass, text, citus.distribution_type, text, int);

View File

@ -149,6 +149,13 @@ extern char * GenerateBackupNameForCollationCollision(const ObjectAddress *addre
extern ObjectAddress DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok);
extern List * PostprocessDefineCollationStmt(Node *stmt, const char *queryString);
/* database.c - forward declarations */
extern List * PreprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern List * PostprocessAlterDatabaseOwnerStmt(Node *node, const char *queryString);
extern ObjectAddress AlterDatabaseOwnerObjectAddress(Node *node, bool missing_ok);
extern List * DatabaseOwnerDDLCommands(const ObjectAddress *address);
/* extension.c - forward declarations */
extern bool IsDropCitusExtensionStmt(Node *parsetree);
extern bool IsCreateAlterExtensionUpdateCitusStmt(Node *parsetree);

View File

@ -35,6 +35,7 @@ extern bool EnableDependencyCreation;
extern bool EnableCreateTypePropagation;
extern bool EnableAlterRolePropagation;
extern bool EnableAlterRoleSetPropagation;
extern bool EnableAlterDatabaseOwner;
extern int UtilityHookLevel;

View File

@ -127,4 +127,7 @@ extern char * DeparseDropExtensionStmt(Node *stmt);
extern char * DeparseAlterExtensionSchemaStmt(Node *stmt);
extern char * DeparseAlterExtensionStmt(Node *stmt);
/* forward declarations for deparse_database_stmts.c */
extern char * DeparseAlterDatabaseOwnerStmt(Node *node);
#endif /* CITUS_DEPARSER_H */

View File

@ -0,0 +1,301 @@
CREATE SCHEMA alter_database_owner;
SET search_path TO alter_database_owner, public;
CREATE USER database_owner_1;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
CREATE USER database_owner_2;
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
SELECT run_command_on_workers('CREATE USER database_owner_1');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(2 rows)
SELECT run_command_on_workers('CREATE USER database_owner_2');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"CREATE ROLE")
(localhost,57638,t,"CREATE ROLE")
(2 rows)
-- make sure the propagation of ALTER DATABASE ... OWNER TO ... is on
SET citus.enable_alter_database_owner TO on;
-- list the owners of the current database on all nodes
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,postgres)
(localhost,57638,t,postgres)
(2 rows)
-- remove a node to verify addition later
SELECT master_remove_node('localhost', :worker_2_port);
master_remove_node
---------------------------------------------------------------------
(1 row)
-- verify we can change the owner of a database
ALTER DATABASE regression OWNER TO database_owner_1;
-- list the owner of the current database on the coordinator
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
rolname
---------------------------------------------------------------------
database_owner_1
(1 row)
-- list the owners of the current database on all nodes
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,database_owner_1)
(1 row)
-- turn off propagation to verify it does _not_ propagate to new nodes when turned off
SET citus.enable_alter_database_owner TO off;
-- add back second node to verify the owner of the database was set accordingly
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- list the owners of the current database on all nodes, should reflect on newly added node
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,database_owner_1)
(localhost,57638,t,postgres)
(2 rows)
-- turn on propagation to verify it does propagate to new nodes when enabled
SET citus.enable_alter_database_owner TO on;
SELECT master_remove_node('localhost', :worker_2_port); -- remove so we can re add with propagation on
master_remove_node
---------------------------------------------------------------------
(1 row)
-- add back second node to verify the owner of the database was set accordingly
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
?column?
---------------------------------------------------------------------
1
(1 row)
-- list the owners of the current database on all nodes, should reflect on newly added node
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,database_owner_1)
(localhost,57638,t,database_owner_1)
(2 rows)
-- test changing the owner in a transaction and rollback to cancel
BEGIN;
ALTER DATABASE regression OWNER TO database_owner_2;
ROLLBACK;
-- list the owners of the current database on all nodes
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
rolname
---------------------------------------------------------------------
database_owner_1
(1 row)
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,database_owner_1)
(localhost,57638,t,database_owner_1)
(2 rows)
CREATE TABLE t (a int PRIMARY KEY);
SELECT create_distributed_table('t', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- test changing the owner in a xact that already had parallel execution
BEGIN;
SELECT count(*) FROM t; -- parallel execution;
count
---------------------------------------------------------------------
0
(1 row)
ALTER DATABASE regression OWNER TO database_owner_2; -- should ERROR
ERROR: cannot create or modify database because there was a parallel operation on a distributed table in the transaction
DETAIL: When creating or altering a database, Citus needs to perform all operations over a single connection per node to ensure consistency.
HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';"
ROLLBACK;
-- list the owners of the current database on all nodes
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
rolname
---------------------------------------------------------------------
database_owner_1
(1 row)
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,database_owner_1)
(localhost,57638,t,database_owner_1)
(2 rows)
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT count(*) FROM t; -- parallel execution;
count
---------------------------------------------------------------------
0
(1 row)
ALTER DATABASE regression OWNER TO database_owner_2;
COMMIT;
-- list the owners of the current database on all nodes
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
rolname
---------------------------------------------------------------------
database_owner_2
(1 row)
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,database_owner_2)
(localhost,57638,t,database_owner_2)
(2 rows)
-- turn propagation off and verify it does not propagate interactively when turned off
SET citus.enable_alter_database_owner TO off;
ALTER DATABASE regression OWNER TO database_owner_1;
-- list the owners of the current database on all nodes
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
rolname
---------------------------------------------------------------------
database_owner_1
(1 row)
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,database_owner_2)
(localhost,57638,t,database_owner_2)
(2 rows)
-- reset state of cluster
SET citus.enable_alter_database_owner TO on;
ALTER DATABASE regression OWNER TO current_user;
-- list the owners of the current database on all nodes
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
rolname
---------------------------------------------------------------------
postgres
(1 row)
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,postgres)
(localhost,57638,t,postgres)
(2 rows)
DROP USER database_owner_1;
DROP USER database_owner_2;
SELECT run_command_on_workers('DROP USER database_owner_1');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(2 rows)
SELECT run_command_on_workers('DROP USER database_owner_2');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"DROP ROLE")
(localhost,57638,t,"DROP ROLE")
(2 rows)
SET client_min_messages TO warning;
DROP SCHEMA alter_database_owner CASCADE;

View File

@ -24,6 +24,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -91,6 +92,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -143,6 +145,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -216,6 +219,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -268,6 +272,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -341,6 +346,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -393,6 +399,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -461,6 +468,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{myschema},{})
(schema,{public},{})
@ -514,6 +522,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -588,6 +597,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{myschema},{})
(schema,{public},{})
@ -641,6 +651,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -715,6 +726,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{myschema},{})
(schema,{public},{})
@ -768,6 +780,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -861,6 +874,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{myschema},{})
(schema,{public},{})
@ -914,6 +928,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -1000,6 +1015,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{myschema},{})
(schema,{public},{})
@ -1053,6 +1069,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -1147,6 +1164,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{myschema},{})
(schema,{myschema2},{})
@ -1201,6 +1219,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -1261,6 +1280,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
(type,{public.tt1},{})
@ -1314,6 +1334,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -1373,6 +1394,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
(type,{public.tt1},{})
@ -1426,6 +1448,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -1503,6 +1526,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{myschema},{})
(schema,{public},{})
@ -1557,6 +1581,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -1633,6 +1658,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(function,"{public,add}","{integer,integer}")
(role,{postgres},{})
(schema,{public},{})
@ -1686,6 +1712,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -1767,6 +1794,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(function,"{public,add}","{integer,integer}")
(role,{postgres},{})
(schema,{public},{})
@ -1820,6 +1848,7 @@ step s1-print-distributed-objects:
1
pg_identify_object_as_address
(database,{regression},{})
(role,{postgres},{})
(schema,{public},{})
count
@ -1902,6 +1931,7 @@ step s2-print-distributed-objects:
pg_identify_object_as_address
(database,{regression},{})
(function,"{myschema,add}","{integer,integer}")
(role,{postgres},{})
(schema,{myschema},{})

View File

@ -26,7 +26,7 @@ step s1-print:
count
3
4
extname extversion nspname
seg 1.1 public
@ -73,7 +73,7 @@ step s1-print:
count
3
4
extname extversion nspname
seg 1.2 public
@ -126,7 +126,7 @@ step s1-print:
count
2
3
extname extversion nspname
run_command_on_workers
@ -168,7 +168,7 @@ step s1-print:
count
4
5
extname extversion nspname
seg 1.3 schema1
@ -215,7 +215,7 @@ step s1-print:
count
3
4
extname extversion nspname
run_command_on_workers
@ -270,7 +270,7 @@ step s1-print:
count
6
7
extname extversion nspname
seg 1.3 schema3
@ -322,7 +322,7 @@ step s1-print:
count
6
7
extname extversion nspname
seg 1.3 schema1
@ -379,7 +379,7 @@ step s1-print:
count
5
6
extname extversion nspname
seg 1.1 public
@ -444,7 +444,7 @@ step s1-print:
count
6
7
extname extversion nspname
seg 1.2 public
@ -497,7 +497,7 @@ step s1-print:
count
5
6
extname extversion nspname
run_command_on_workers
@ -538,7 +538,7 @@ step s1-print:
count
5
6
extname extversion nspname
seg 1.3 schema1
@ -597,7 +597,7 @@ step s1-print:
count
6
7
extname extversion nspname
seg 1.3 schema2
@ -648,7 +648,7 @@ step s1-print:
count
5
6
extname extversion nspname
seg 1.1 public
@ -709,7 +709,7 @@ step s1-print:
count
5
6
extname extversion nspname
run_command_on_workers

View File

@ -39,11 +39,12 @@ drop cascades to table upgrade_basic.t_append
SELECT i.* FROM citus.pg_dist_object, pg_identify_object_as_address(classid, objid, objsubid) i ORDER BY 1, 2, 3;
type | object_names | object_args
---------------------------------------------------------------------
database | {postgres} | {}
extension | {isn} | {}
role | {postgres} | {}
schema | {fooschema} | {}
schema | {new_schema} | {}
schema | {public} | {}
type | {fooschema.footype} | {}
(6 rows)
(7 rows)

View File

@ -25,6 +25,7 @@ test: alter_role_propagation
test: propagate_extension_commands
test: escape_extension_name
test: ref_citus_local_fkeys
test: alter_database_owner
test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views

View File

@ -42,6 +42,6 @@ step "s2-commit"
COMMIT;
}
# make sure that we allow creating same-named temporary columnar tables in different sessions
# also make sure that they don't block each other
// make sure that we allow creating same-named temporary columnar tables in different sessions
// also make sure that they don't block each other
permutation "s1-begin" "s2-begin" "s1-create-temp" "s1-insert" "s2-create-temp" "s2-insert" "s1-commit" "s2-commit"

View File

@ -57,11 +57,11 @@ step "s2-commit"
COMMIT;
}
# writes shouldn't block writes or reads
// writes shouldn't block writes or reads
permutation "s1-begin" "s2-begin" "s1-insert" "s2-insert" "s1-select" "s2-select" "s1-commit" "s2-commit" "s1-select"
# copy vs insert
// copy vs insert
permutation "s1-begin" "s2-begin" "s1-copy" "s2-insert" "s1-select" "s2-select" "s1-commit" "s2-commit" "s1-select"
# insert vs copy
// insert vs copy
permutation "s1-begin" "s2-begin" "s2-insert" "s1-copy" "s1-select" "s2-select" "s1-commit" "s2-commit" "s1-select"

View File

@ -0,0 +1,175 @@
CREATE SCHEMA alter_database_owner;
SET search_path TO alter_database_owner, public;
CREATE USER database_owner_1;
CREATE USER database_owner_2;
SELECT run_command_on_workers('CREATE USER database_owner_1');
SELECT run_command_on_workers('CREATE USER database_owner_2');
-- make sure the propagation of ALTER DATABASE ... OWNER TO ... is on
SET citus.enable_alter_database_owner TO on;
-- list the owners of the current database on all nodes
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
-- remove a node to verify addition later
SELECT master_remove_node('localhost', :worker_2_port);
-- verify we can change the owner of a database
ALTER DATABASE regression OWNER TO database_owner_1;
-- list the owner of the current database on the coordinator
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
-- list the owners of the current database on all nodes
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
-- turn off propagation to verify it does _not_ propagate to new nodes when turned off
SET citus.enable_alter_database_owner TO off;
-- add back second node to verify the owner of the database was set accordingly
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
-- list the owners of the current database on all nodes, should reflect on newly added node
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
-- turn on propagation to verify it does propagate to new nodes when enabled
SET citus.enable_alter_database_owner TO on;
SELECT master_remove_node('localhost', :worker_2_port); -- remove so we can re add with propagation on
-- add back second node to verify the owner of the database was set accordingly
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
-- list the owners of the current database on all nodes, should reflect on newly added node
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
-- test changing the owner in a transaction and rollback to cancel
BEGIN;
ALTER DATABASE regression OWNER TO database_owner_2;
ROLLBACK;
-- list the owners of the current database on all nodes
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
CREATE TABLE t (a int PRIMARY KEY);
SELECT create_distributed_table('t', 'a');
-- test changing the owner in a xact that already had parallel execution
BEGIN;
SELECT count(*) FROM t; -- parallel execution;
ALTER DATABASE regression OWNER TO database_owner_2; -- should ERROR
ROLLBACK;
-- list the owners of the current database on all nodes
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
BEGIN;
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
SELECT count(*) FROM t; -- parallel execution;
ALTER DATABASE regression OWNER TO database_owner_2;
COMMIT;
-- list the owners of the current database on all nodes
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
-- turn propagation off and verify it does not propagate interactively when turned off
SET citus.enable_alter_database_owner TO off;
ALTER DATABASE regression OWNER TO database_owner_1;
-- list the owners of the current database on all nodes
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
-- reset state of cluster
SET citus.enable_alter_database_owner TO on;
ALTER DATABASE regression OWNER TO current_user;
-- list the owners of the current database on all nodes
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
SELECT run_command_on_workers($$
SELECT u.rolname
FROM pg_database d
JOIN pg_roles u
ON (d.datdba = u.oid)
WHERE d.datname = current_database();
$$);
DROP USER database_owner_1;
DROP USER database_owner_2;
SELECT run_command_on_workers('DROP USER database_owner_1');
SELECT run_command_on_workers('DROP USER database_owner_2');
SET client_min_messages TO warning;
DROP SCHEMA alter_database_owner CASCADE;