Merge branch 'main' into comment_propagation

pull/7388/head
Gürkan İndibay 2023-12-28 15:17:46 +03:00 committed by GitHub
commit d8b329767c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 605 additions and 61 deletions

View File

@ -31,20 +31,90 @@
#include "distributed/worker_manager.h"
#include "distributed/worker_transaction.h"
typedef enum RequiredObjectSet
{
REQUIRE_ONLY_DEPENDENCIES = 1,
REQUIRE_OBJECT_AND_DEPENDENCIES = 2,
} RequiredObjectSet;
static void EnsureDependenciesCanBeDistributed(const ObjectAddress *relationAddress);
static void ErrorIfCircularDependencyExists(const ObjectAddress *objectAddress);
static int ObjectAddressComparator(const void *a, const void *b);
static void EnsureDependenciesExistOnAllNodes(const ObjectAddress *target);
static void EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target,
RequiredObjectSet requiredObjectSet);
static List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency);
static bool ShouldPropagateObject(const ObjectAddress *address);
static char * DropTableIfExistsCommand(Oid relationId);
/*
* EnsureDependenciesExistOnAllNodes finds all the dependencies that we support and makes
* sure these are available on all nodes. If not available they will be created on the
* nodes via a separate session that will be committed directly so that the objects are
* visible to potentially multiple sessions creating the shards.
* EnsureObjectAndDependenciesExistOnAllNodes is a wrapper around
* EnsureRequiredObjectSetExistOnAllNodes to ensure the "object itself" (together
* with its dependencies) is available on all nodes.
*
* Different than EnsureDependenciesExistOnAllNodes, we return early if the
* target object is distributed already.
*
* The reason why we don't do the same in EnsureDependenciesExistOnAllNodes
* is that it's is used when altering an object too and hence the target object
* may instantly have a dependency that needs to be propagated now. For example,
* when "GRANT non_dist_role TO dist_role" is executed, we need to propagate
* "non_dist_role" to all nodes before propagating the "GRANT" command itself.
* For this reason, we call EnsureDependenciesExistOnAllNodes for "dist_role"
* and it would automatically discover that "non_dist_role" is a dependency of
* "dist_role" and propagate it beforehand.
*
* However, when we're requested to create the target object itself (and
* implicitly its dependencies), we're sure that we're not altering the target
* object itself, hence we can return early if the target object is already
* distributed. This is the case, for example, when
* "REASSIGN OWNED BY dist_role TO non_dist_role" is executed. In that case,
* "non_dist_role" is not a dependency of "dist_role" but we want to distribute
* "non_dist_role" beforehand and we call this function for "non_dist_role",
* not for "dist_role".
*
* See EnsureRequiredObjectExistOnAllNodes to learn more about how this
* function deals with an object created within the same transaction.
*/
void
EnsureObjectAndDependenciesExistOnAllNodes(const ObjectAddress *target)
{
if (IsAnyObjectDistributed(list_make1((ObjectAddress *) target)))
{
return;
}
EnsureRequiredObjectSetExistOnAllNodes(target, REQUIRE_OBJECT_AND_DEPENDENCIES);
}
/*
* EnsureDependenciesExistOnAllNodes is a wrapper around
* EnsureRequiredObjectSetExistOnAllNodes to ensure "all dependencies" of given
* object --but not the object itself-- are available on all nodes.
*
* See EnsureRequiredObjectSetExistOnAllNodes to learn more about how this
* function deals with an object created within the same transaction.
*/
static void
EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
{
EnsureRequiredObjectSetExistOnAllNodes(target, REQUIRE_ONLY_DEPENDENCIES);
}
/*
* EnsureRequiredObjectSetExistOnAllNodes finds all the dependencies that we support and makes
* sure these are available on all nodes if required object set is REQUIRE_ONLY_DEPENDENCIES.
* Otherwise, i.e., if required object set is REQUIRE_OBJECT_AND_DEPENDENCIES, then this
* function creates the object itself on all nodes too. This function ensures that each
* of the dependencies are supported by Citus but doesn't check the same for the target
* object itself (when REQUIRE_OBJECT_AND_DEPENDENCIES) is provided because we assume that
* callers don't call this function for an unsupported function at all.
*
* If not available, they will be created on the nodes via a separate session that will be
* committed directly so that the objects are visible to potentially multiple sessions creating
* the shards.
*
* Note; only the actual objects are created via a separate session, the records to
* pg_dist_object are created in this session. As a side effect the objects could be
@ -55,29 +125,52 @@ static char * DropTableIfExistsCommand(Oid relationId);
* postgres native CREATE IF NOT EXISTS, or citus helper functions.
*/
static void
EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target,
RequiredObjectSet requiredObjectSet)
{
List *dependenciesWithCommands = NIL;
Assert(requiredObjectSet == REQUIRE_ONLY_DEPENDENCIES ||
requiredObjectSet == REQUIRE_OBJECT_AND_DEPENDENCIES);
List *objectsWithCommands = NIL;
List *ddlCommands = NULL;
/*
* If there is any unsupported dependency or circular dependency exists, Citus can
* not ensure dependencies will exist on all nodes.
*
* Note that we don't check whether "target" is distributable (in case
* REQUIRE_OBJECT_AND_DEPENDENCIES is provided) because we expect callers
* to not even call this function if Citus doesn't know how to propagate
* "target" object itself.
*/
EnsureDependenciesCanBeDistributed(target);
/* collect all dependencies in creation order and get their ddl commands */
List *dependencies = GetDependenciesForObject(target);
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencies)
List *objectsToBeCreated = GetDependenciesForObject(target);
/*
* Append the target object to make sure that it's created after its
* dependencies are created, if requested.
*/
if (requiredObjectSet == REQUIRE_OBJECT_AND_DEPENDENCIES)
{
List *dependencyCommands = GetDependencyCreateDDLCommands(dependency);
ObjectAddress *targetCopy = palloc(sizeof(ObjectAddress));
*targetCopy = *target;
objectsToBeCreated = lappend(objectsToBeCreated, targetCopy);
}
ObjectAddress *object = NULL;
foreach_ptr(object, objectsToBeCreated)
{
List *dependencyCommands = GetDependencyCreateDDLCommands(object);
ddlCommands = list_concat(ddlCommands, dependencyCommands);
/* create a new list with dependencies that actually created commands */
/* create a new list with objects that actually created commands */
if (list_length(dependencyCommands) > 0)
{
dependenciesWithCommands = lappend(dependenciesWithCommands, dependency);
objectsWithCommands = lappend(objectsWithCommands, object);
}
}
if (list_length(ddlCommands) <= 0)
@ -100,26 +193,28 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
List *remoteNodeList = ActivePrimaryRemoteNodeList(RowShareLock);
/*
* Lock dependent objects explicitly to make sure same DDL command won't be sent
* Lock objects to be created explicitly to make sure same DDL command won't be sent
* multiple times from parallel sessions.
*
* Sort dependencies that will be created on workers to not to have any deadlock
* Sort the objects that will be created on workers to not to have any deadlock
* issue if different sessions are creating different objects.
*/
List *addressSortedDependencies = SortList(dependenciesWithCommands,
List *addressSortedDependencies = SortList(objectsWithCommands,
ObjectAddressComparator);
foreach_ptr(dependency, addressSortedDependencies)
foreach_ptr(object, addressSortedDependencies)
{
LockDatabaseObject(dependency->classId, dependency->objectId,
dependency->objectSubId, ExclusiveLock);
LockDatabaseObject(object->classId, object->objectId,
object->objectSubId, ExclusiveLock);
}
/*
* We need to propagate dependencies via the current user's metadata connection if
* any dependency for the target is created in the current transaction. Our assumption
* is that if we rely on a dependency created in the current transaction, then the
* current user, most probably, has permissions to create the target object as well.
* We need to propagate objects via the current user's metadata connection if
* any of the objects that we're interested in are created in the current transaction.
* Our assumption is that if we rely on an object created in the current transaction,
* then the current user, most probably, has permissions to create the target object
* as well.
*
* Note that, user still may not be able to create the target due to no permissions
* for any of its dependencies. But this is ok since it should be rare.
*
@ -127,7 +222,18 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
* have visibility issues since propagated dependencies would be invisible to
* the separate connection until we locally commit.
*/
if (HasAnyDependencyInPropagatedObjects(target))
List *createdObjectList = GetAllSupportedDependenciesForObject(target);
/* consider target as well if we're requested to create it too */
if (requiredObjectSet == REQUIRE_OBJECT_AND_DEPENDENCIES)
{
ObjectAddress *targetCopy = palloc(sizeof(ObjectAddress));
*targetCopy = *target;
createdObjectList = lappend(createdObjectList, targetCopy);
}
if (HasAnyObjectInPropagatedObjects(createdObjectList))
{
SendCommandListToRemoteNodesWithMetadata(ddlCommands);
}
@ -150,7 +256,7 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
* that objects have been created on remote nodes before marking them
* distributed, so MarkObjectDistributed wouldn't fail.
*/
foreach_ptr(dependency, dependenciesWithCommands)
foreach_ptr(object, objectsWithCommands)
{
/*
* pg_dist_object entries must be propagated with the super user, since
@ -160,7 +266,7 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
* Only dependent object's metadata should be propagated with super user.
* Metadata of the table itself must be propagated with the current user.
*/
MarkObjectDistributedViaSuperUser(dependency);
MarkObjectDistributedViaSuperUser(object);
}
}

View File

@ -275,6 +275,17 @@ static DistributeObjectOps Any_CreateRole = {
.address = CreateRoleStmtObjectAddress,
.markDistributed = true,
};
static DistributeObjectOps Any_ReassignOwned = {
.deparse = DeparseReassignOwnedStmt,
.qualify = NULL,
.preprocess = NULL,
.postprocess = PostprocessReassignOwnedStmt,
.operationType = DIST_OPS_ALTER,
.address = NULL,
.markDistributed = false,
};
static DistributeObjectOps Any_DropOwned = {
.deparse = DeparseDropOwnedStmt,
.qualify = NULL,
@ -1910,6 +1921,11 @@ GetDistributeObjectOps(Node *node)
return &Any_DropOwned;
}
case T_ReassignOwnedStmt:
{
return &Any_ReassignOwned;
}
case T_DropStmt:
{
DropStmt *stmt = castNode(DropStmt, node);

View File

@ -48,6 +48,9 @@
#include "distributed/version_compat.h"
#include "distributed/worker_transaction.h"
static ObjectAddress * GetNewRoleAddress(ReassignOwnedStmt *stmt);
/*
* PreprocessDropOwnedStmt finds the distributed role out of the ones
* being dropped and unmarks them distributed and creates the drop statements
@ -89,3 +92,81 @@ PreprocessDropOwnedStmt(Node *node, const char *queryString,
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* PostprocessReassignOwnedStmt takes a Node pointer representing a REASSIGN
* OWNED statement and performs any necessary post-processing after the statement
* has been executed locally.
*
* We filter out local roles in OWNED BY clause before deparsing the command,
* meaning that we skip reassigning what is owned by local roles. However,
* if the role specified in TO clause is local, we automatically distribute
* it before deparsing the command.
*/
List *
PostprocessReassignOwnedStmt(Node *node, const char *queryString)
{
ReassignOwnedStmt *stmt = castNode(ReassignOwnedStmt, node);
List *allReassignRoles = stmt->roles;
List *distributedReassignRoles = FilterDistributedRoles(allReassignRoles);
if (list_length(distributedReassignRoles) <= 0)
{
return NIL;
}
if (!ShouldPropagate())
{
return NIL;
}
EnsureCoordinator();
stmt->roles = distributedReassignRoles;
char *sql = DeparseTreeNode((Node *) stmt);
stmt->roles = allReassignRoles;
ObjectAddress *newRoleAddress = GetNewRoleAddress(stmt);
/*
* We temporarily enable create / alter role propagation to properly
* propagate the role specified in TO clause.
*/
int saveNestLevel = NewGUCNestLevel();
set_config_option("citus.enable_create_role_propagation", "on",
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
GUC_ACTION_LOCAL, true, 0, false);
set_config_option("citus.enable_alter_role_propagation", "on",
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
GUC_ACTION_LOCAL, true, 0, false);
set_config_option("citus.enable_alter_role_set_propagation", "on",
(superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION,
GUC_ACTION_LOCAL, true, 0, false);
EnsureObjectAndDependenciesExistOnAllNodes(newRoleAddress);
/* rollback GUCs to the state before this session */
AtEOXact_GUC(true, saveNestLevel);
List *commands = list_make3(DISABLE_DDL_PROPAGATION,
sql,
ENABLE_DDL_PROPAGATION);
return NodeDDLTaskList(NON_COORDINATOR_NODES, commands);
}
/*
* GetNewRoleAddress returns the ObjectAddress of the new role
*/
static ObjectAddress *
GetNewRoleAddress(ReassignOwnedStmt *stmt)
{
Oid roleOid = get_role_oid(stmt->newrole->rolename, false);
ObjectAddress *address = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*address, AuthIdRelationId, roleOid);
return address;
}

View File

@ -71,7 +71,7 @@ AppendRoleList(StringInfo buf, List *roleList)
{
Node *roleNode = (Node *) lfirst(cell);
Assert(IsA(roleNode, RoleSpec) || IsA(roleNode, AccessPriv));
char const *rolename = NULL;
const char *rolename = NULL;
if (IsA(roleNode, RoleSpec))
{
rolename = RoleSpecString((RoleSpec *) roleNode, true);
@ -83,3 +83,27 @@ AppendRoleList(StringInfo buf, List *roleList)
}
}
}
static void
AppendReassignOwnedStmt(StringInfo buf, ReassignOwnedStmt *stmt)
{
appendStringInfo(buf, "REASSIGN OWNED BY ");
AppendRoleList(buf, stmt->roles);
const char *newRoleName = RoleSpecString(stmt->newrole, true);
appendStringInfo(buf, " TO %s", newRoleName);
}
char *
DeparseReassignOwnedStmt(Node *node)
{
ReassignOwnedStmt *stmt = castNode(ReassignOwnedStmt, node);
StringInfoData buf = { 0 };
initStringInfo(&buf);
AppendReassignOwnedStmt(&buf, stmt);
return buf.data;
}

View File

@ -1171,18 +1171,17 @@ ResetPropagatedObjects(void)
/*
* HasAnyDependencyInPropagatedObjects decides if any dependency of given object is
* HasAnyObjectInPropagatedObjects decides if any of the objects in given list are
* propagated in the current transaction.
*/
bool
HasAnyDependencyInPropagatedObjects(const ObjectAddress *objectAddress)
HasAnyObjectInPropagatedObjects(List *objectList)
{
List *dependencyList = GetAllSupportedDependenciesForObject(objectAddress);
ObjectAddress *dependency = NULL;
foreach_ptr(dependency, dependencyList)
ObjectAddress *object = NULL;
foreach_ptr(object, objectList)
{
/* first search in root transaction */
if (DependencyInPropagatedObjectsHash(PropagatedObjectsInTx, dependency))
if (DependencyInPropagatedObjectsHash(PropagatedObjectsInTx, object))
{
return true;
}
@ -1195,7 +1194,7 @@ HasAnyDependencyInPropagatedObjects(const ObjectAddress *objectAddress)
SubXactContext *state = NULL;
foreach_ptr(state, activeSubXactContexts)
{
if (DependencyInPropagatedObjectsHash(state->propagatedObjects, dependency))
if (DependencyInPropagatedObjectsHash(state->propagatedObjects, object))
{
return true;
}

View File

@ -445,6 +445,7 @@ extern List * CreateExtensionStmtObjectAddress(Node *stmt, bool missing_ok, bool
/* owned.c - forward declarations */
extern List * PreprocessDropOwnedStmt(Node *node, const char *queryString,
ProcessUtilityContext processUtilityContext);
extern List * PostprocessReassignOwnedStmt(Node *node, const char *queryString);
/* policy.c - forward declarations */
extern List * CreatePolicyCommands(Oid relationId);

View File

@ -232,6 +232,7 @@ extern char * DeparseCreateRoleStmt(Node *stmt);
extern char * DeparseDropRoleStmt(Node *stmt);
extern char * DeparseGrantRoleStmt(Node *stmt);
extern char * DeparseRoleCommentStmt(Node *node);
extern char * DeparseReassignOwnedStmt(Node *node);
/* forward declarations for deparse_owned_stmts.c */
extern char * DeparseDropOwnedStmt(Node *node);

View File

@ -386,6 +386,7 @@ extern void EnsureUndistributeTenantTableSafe(Oid relationId, const char *operat
extern TableConversionReturn * UndistributeTable(TableConversionParameters *params);
extern void UndistributeTables(List *relationIdList);
extern void EnsureObjectAndDependenciesExistOnAllNodes(const ObjectAddress *target);
extern void EnsureAllObjectDependenciesExistOnAllNodes(const List *targets);
extern DeferredErrorMessage * DeferErrorIfCircularDependencyExists(const
ObjectAddress *

View File

@ -163,7 +163,7 @@ extern bool MaybeExecutingUDF(void);
extern void TrackPropagatedObject(const ObjectAddress *objectAddress);
extern void TrackPropagatedTableAndSequences(Oid relationId);
extern void ResetPropagatedObjects(void);
extern bool HasAnyDependencyInPropagatedObjects(const ObjectAddress *objectAddress);
extern bool HasAnyObjectInPropagatedObjects(List *objectList);
/* initialization function(s) */
extern void InitializeTransactionManagement(void);

View File

@ -285,14 +285,7 @@ SELECT citus_schema_undistribute('tenant1');
ERROR: must be owner of schema tenant1
-- assign all tables to dummyregular except table5
SET role tenantuser;
SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY tenantuser TO dummyregular; $$);
result
---------------------------------------------------------------------
REASSIGN OWNED
REASSIGN OWNED
REASSIGN OWNED
(3 rows)
REASSIGN OWNED BY tenantuser TO dummyregular;
CREATE TABLE tenant1.table5(id int);
-- table owner check fails the distribution
SET role dummyregular;
@ -366,14 +359,7 @@ SELECT result FROM run_command_on_all_nodes($$ SELECT array_agg(logicalrelid ORD
(3 rows)
RESET role;
SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY dummyregular TO tenantuser; $$);
result
---------------------------------------------------------------------
REASSIGN OWNED
REASSIGN OWNED
REASSIGN OWNED
(3 rows)
REASSIGN OWNED BY dummyregular TO tenantuser;
DROP USER dummyregular;
CREATE USER dummysuper superuser;
SET role dummysuper;

View File

@ -189,14 +189,7 @@ SELECT citus_schema_move('s2', 'dummy_node', 1234);
ERROR: must be owner of schema s2
-- assign all tables to regularuser
RESET ROLE;
SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY tenantuser TO regularuser; $$);
result
---------------------------------------------------------------------
REASSIGN OWNED
REASSIGN OWNED
REASSIGN OWNED
(3 rows)
REASSIGN OWNED BY tenantuser TO regularuser;
GRANT USAGE ON SCHEMA citus_schema_move TO regularuser;
SET ROLE regularuser;
SELECT nodeid AS s2_new_nodeid, quote_literal(nodename) AS s2_new_nodename, nodeport AS s2_new_nodeport

View File

@ -0,0 +1,194 @@
CREATE ROLE distributed_source_role1;
create ROLE "distributed_source_role-\!";
CREATE ROLE "distributed_target_role1-\!";
set citus.enable_create_role_propagation to off;
create ROLE local_target_role1;
NOTICE: not propagating CREATE ROLE/USER commands to other nodes
HINT: Connect to other nodes directly to manually create all necessary users and roles.
\c - - - :worker_1_port
set citus.enable_create_role_propagation to off;
CREATE ROLE local_target_role1;
NOTICE: not propagating CREATE ROLE/USER commands to other nodes
HINT: Connect to other nodes directly to manually create all necessary users and roles.
\c - - - :master_port
set citus.enable_create_role_propagation to off;
create role local_source_role1;
NOTICE: not propagating CREATE ROLE/USER commands to other nodes
HINT: Connect to other nodes directly to manually create all necessary users and roles.
reset citus.enable_create_role_propagation;
GRANT CREATE ON SCHEMA public TO distributed_source_role1,"distributed_source_role-\!";
SET ROLE distributed_source_role1;
CREATE TABLE public.test_table (col1 int);
set role "distributed_source_role-\!";
CREATE TABLE public.test_table2 (col2 int);
RESET ROLE;
select create_distributed_table('test_table', 'col1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
select create_distributed_table('test_table2', 'col2');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT
schemaname,
tablename,
tableowner
FROM
pg_tables
WHERE
tablename in ('test_table', 'test_table2')
ORDER BY tablename
) q2
$$
) ORDER BY result;
result
---------------------------------------------------------------------
[{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_source_role1"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_source_role-\\!"}]
[{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_source_role1"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_source_role-\\!"}]
[{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_source_role1"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_source_role-\\!"}]
(3 rows)
--tests for reassing owned by with multiple distributed roles and a local role to a distributed role
--local role should be ignored
set citus.log_remote_commands to on;
set citus.grep_remote_commands = '%REASSIGN OWNED BY%';
REASSIGN OWNED BY distributed_source_role1,"distributed_source_role-\!",local_source_role1 TO "distributed_target_role1-\!";
NOTICE: issuing REASSIGN OWNED BY distributed_source_role1, "distributed_source_role-\!" TO "distributed_target_role1-\!"
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REASSIGN OWNED BY distributed_source_role1, "distributed_source_role-\!" TO "distributed_target_role1-\!"
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
reset citus.grep_remote_commands;
reset citus.log_remote_commands;
--check if the owner changed to "distributed_target_role1-\!"
RESET citus.log_remote_commands;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT
schemaname,
tablename,
tableowner
FROM
pg_tables
WHERE
tablename in ('test_table', 'test_table2')
ORDER BY tablename
) q2
$$
) ORDER BY result;
result
---------------------------------------------------------------------
[{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}]
[{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}]
[{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}]
(3 rows)
--tests for reassing owned by with multiple distributed roles and a local role to a local role
--local role should be ignored
SET ROLE distributed_source_role1;
CREATE TABLE public.test_table3 (col1 int);
set role "distributed_source_role-\!";
CREATE TABLE public.test_table4 (col2 int);
RESET ROLE;
select create_distributed_table('test_table3', 'col1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
select create_distributed_table('test_table4', 'col2');
create_distributed_table
---------------------------------------------------------------------
(1 row)
set citus.log_remote_commands to on;
set citus.grep_remote_commands = '%REASSIGN OWNED BY%';
set citus.enable_create_role_propagation to off;
set citus.enable_alter_role_propagation to off;
set citus.enable_alter_role_set_propagation to off;
REASSIGN OWNED BY distributed_source_role1,"distributed_source_role-\!",local_source_role1 TO local_target_role1;
NOTICE: issuing REASSIGN OWNED BY distributed_source_role1, "distributed_source_role-\!" TO local_target_role1
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing REASSIGN OWNED BY distributed_source_role1, "distributed_source_role-\!" TO local_target_role1
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
show citus.enable_create_role_propagation;
citus.enable_create_role_propagation
---------------------------------------------------------------------
off
(1 row)
show citus.enable_alter_role_propagation;
citus.enable_alter_role_propagation
---------------------------------------------------------------------
off
(1 row)
show citus.enable_alter_role_set_propagation;
citus.enable_alter_role_set_propagation
---------------------------------------------------------------------
off
(1 row)
reset citus.grep_remote_commands;
reset citus.log_remote_commands;
reset citus.enable_create_role_propagation;
reset citus.enable_alter_role_propagation;
reset citus.enable_alter_role_set_propagation;
--check if the owner changed to local_target_role1
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT
schemaname,
tablename,
tableowner
FROM
pg_tables
WHERE
tablename in ('test_table3', 'test_table4')
ORDER BY tablename
) q2
$$
) ORDER BY result;
result
---------------------------------------------------------------------
[{"tablename": "test_table3", "schemaname": "public", "tableowner": "local_target_role1"}, {"tablename": "test_table4", "schemaname": "public", "tableowner": "local_target_role1"}]
[{"tablename": "test_table3", "schemaname": "public", "tableowner": "local_target_role1"}, {"tablename": "test_table4", "schemaname": "public", "tableowner": "local_target_role1"}]
[{"tablename": "test_table3", "schemaname": "public", "tableowner": "local_target_role1"}, {"tablename": "test_table4", "schemaname": "public", "tableowner": "local_target_role1"}]
(3 rows)
--clear resources
DROP OWNED BY distributed_source_role1, "distributed_source_role-\!","distributed_target_role1-\!",local_target_role1;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT
schemaname,
tablename,
tableowner
FROM
pg_tables
WHERE
tablename in ('test_table', 'test_table2', 'test_table3', 'test_table4')
) q2
$$
) ORDER BY result;
result
---------------------------------------------------------------------
(3 rows)
set client_min_messages to warning;
drop role distributed_source_role1, "distributed_source_role-\!","distributed_target_role1-\!",local_target_role1,local_source_role1;

View File

@ -61,6 +61,7 @@ test: grant_on_database_propagation
test: alter_database_propagation
test: citus_shards
test: reassign_owned
# ----------

View File

@ -185,7 +185,7 @@ SELECT citus_schema_undistribute('tenant1');
-- assign all tables to dummyregular except table5
SET role tenantuser;
SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY tenantuser TO dummyregular; $$);
REASSIGN OWNED BY tenantuser TO dummyregular;
CREATE TABLE tenant1.table5(id int);
-- table owner check fails the distribution
@ -219,7 +219,7 @@ SELECT result FROM run_command_on_all_nodes($$ SELECT COUNT(*)=0 FROM pg_dist_co
SELECT result FROM run_command_on_all_nodes($$ SELECT array_agg(logicalrelid ORDER BY logicalrelid) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'tenant1.%' AND colocationid > 0 $$);
RESET role;
SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY dummyregular TO tenantuser; $$);
REASSIGN OWNED BY dummyregular TO tenantuser;
DROP USER dummyregular;
CREATE USER dummysuper superuser;

View File

@ -147,7 +147,7 @@ SELECT citus_schema_move('s2', 'dummy_node', 1234);
-- assign all tables to regularuser
RESET ROLE;
SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY tenantuser TO regularuser; $$);
REASSIGN OWNED BY tenantuser TO regularuser;
GRANT USAGE ON SCHEMA citus_schema_move TO regularuser;

View File

@ -0,0 +1,141 @@
CREATE ROLE distributed_source_role1;
create ROLE "distributed_source_role-\!";
CREATE ROLE "distributed_target_role1-\!";
set citus.enable_create_role_propagation to off;
create ROLE local_target_role1;
\c - - - :worker_1_port
set citus.enable_create_role_propagation to off;
CREATE ROLE local_target_role1;
\c - - - :master_port
set citus.enable_create_role_propagation to off;
create role local_source_role1;
reset citus.enable_create_role_propagation;
GRANT CREATE ON SCHEMA public TO distributed_source_role1,"distributed_source_role-\!";
SET ROLE distributed_source_role1;
CREATE TABLE public.test_table (col1 int);
set role "distributed_source_role-\!";
CREATE TABLE public.test_table2 (col2 int);
RESET ROLE;
select create_distributed_table('test_table', 'col1');
select create_distributed_table('test_table2', 'col2');
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT
schemaname,
tablename,
tableowner
FROM
pg_tables
WHERE
tablename in ('test_table', 'test_table2')
ORDER BY tablename
) q2
$$
) ORDER BY result;
--tests for reassing owned by with multiple distributed roles and a local role to a distributed role
--local role should be ignored
set citus.log_remote_commands to on;
set citus.grep_remote_commands = '%REASSIGN OWNED BY%';
REASSIGN OWNED BY distributed_source_role1,"distributed_source_role-\!",local_source_role1 TO "distributed_target_role1-\!";
reset citus.grep_remote_commands;
reset citus.log_remote_commands;
--check if the owner changed to "distributed_target_role1-\!"
RESET citus.log_remote_commands;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT
schemaname,
tablename,
tableowner
FROM
pg_tables
WHERE
tablename in ('test_table', 'test_table2')
ORDER BY tablename
) q2
$$
) ORDER BY result;
--tests for reassing owned by with multiple distributed roles and a local role to a local role
--local role should be ignored
SET ROLE distributed_source_role1;
CREATE TABLE public.test_table3 (col1 int);
set role "distributed_source_role-\!";
CREATE TABLE public.test_table4 (col2 int);
RESET ROLE;
select create_distributed_table('test_table3', 'col1');
select create_distributed_table('test_table4', 'col2');
set citus.log_remote_commands to on;
set citus.grep_remote_commands = '%REASSIGN OWNED BY%';
set citus.enable_create_role_propagation to off;
set citus.enable_alter_role_propagation to off;
set citus.enable_alter_role_set_propagation to off;
REASSIGN OWNED BY distributed_source_role1,"distributed_source_role-\!",local_source_role1 TO local_target_role1;
show citus.enable_create_role_propagation;
show citus.enable_alter_role_propagation;
show citus.enable_alter_role_set_propagation;
reset citus.grep_remote_commands;
reset citus.log_remote_commands;
reset citus.enable_create_role_propagation;
reset citus.enable_alter_role_propagation;
reset citus.enable_alter_role_set_propagation;
--check if the owner changed to local_target_role1
SET citus.log_remote_commands = false;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT
schemaname,
tablename,
tableowner
FROM
pg_tables
WHERE
tablename in ('test_table3', 'test_table4')
ORDER BY tablename
) q2
$$
) ORDER BY result;
--clear resources
DROP OWNED BY distributed_source_role1, "distributed_source_role-\!","distributed_target_role1-\!",local_target_role1;
SELECT result from run_command_on_all_nodes(
$$
SELECT jsonb_agg(to_jsonb(q2.*)) FROM (
SELECT
schemaname,
tablename,
tableowner
FROM
pg_tables
WHERE
tablename in ('test_table', 'test_table2', 'test_table3', 'test_table4')
) q2
$$
) ORDER BY result;
set client_min_messages to warning;
drop role distributed_source_role1, "distributed_source_role-\!","distributed_target_role1-\!",local_target_role1,local_source_role1;