From b3ef1b7e390f289264bb34c43607b0ab08640b90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Wed, 21 Feb 2024 13:14:58 +0300 Subject: [PATCH 01/14] Add support for grant on database propagation from non-main databases (#7443) DESCRIPTION: Adds support for distributed `GRANT .. ON DATABASE TO USER` commands from the databases where Citus is not installed --------- Co-authored-by: Onur Tirtir --- .../distributed/commands/utility_hook.c | 65 ++- ...n_database_propagation_from_non_maindb.out | 471 ++++++++++++++++++ .../metadata_sync_from_non_maindb.out | 71 +++ .../regress/expected/multi_test_helpers.out | 3 +- src/test/regress/multi_1_schedule | 2 +- ...n_database_propagation_from_non_maindb.sql | 246 +++++++++ .../sql/metadata_sync_from_non_maindb.sql | 21 + src/test/regress/sql/multi_test_helpers.sql | 3 +- 8 files changed, 868 insertions(+), 14 deletions(-) create mode 100644 src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out create mode 100644 src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index b021b3fa3..dfb07f179 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -107,8 +107,20 @@ typedef struct NonMainDbDistributedStatementInfo { int statementType; bool explicitlyMarkAsDistributed; + + /* + * checkSupportedObjectTypes is a callback function that checks whether + * type of the object referred to by given statement is supported. + * + * Can be NULL if not applicable for the statement type. + */ + bool (*checkSupportedObjectTypes)(Node *node); } NonMainDbDistributedStatementInfo; +/* + * MarkObjectDistributedParams is used to pass parameters to the + * MarkObjectDistributedFromNonMainDb function. + */ typedef struct MarkObjectDistributedParams { char *name; @@ -116,15 +128,6 @@ typedef struct MarkObjectDistributedParams uint16 catalogRelId; } MarkObjectDistributedParams; -/* - * NonMainDbSupportedStatements is an array of statements that are supported - * from non-main databases. - */ -static const NonMainDbDistributedStatementInfo NonMainDbSupportedStatements[] = { - { T_GrantRoleStmt, false }, - { T_CreateRoleStmt, true } -}; - bool EnableDDLPropagation = true; /* ddl propagation is enabled */ int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE; @@ -153,6 +156,12 @@ static void PostStandardProcessUtility(Node *parsetree); static void DecrementUtilityHookCountersIfNecessary(Node *parsetree); static bool IsDropSchemaOrDB(Node *parsetree); static bool ShouldCheckUndistributeCitusLocalTables(void); + + +/* + * Functions to support commands used to manage node-wide objects from non-main + * databases. + */ static void RunPreprocessMainDBCommand(Node *parsetree); static void RunPostprocessMainDBCommand(Node *parsetree); static bool IsStatementSupportedFromNonMainDb(Node *parsetree); @@ -160,6 +169,25 @@ static bool StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree); static void MarkObjectDistributedFromNonMainDb(Node *parsetree); static MarkObjectDistributedParams GetMarkObjectDistributedParams(Node *parsetree); +/* + * checkSupportedObjectTypes callbacks for + * NonMainDbDistributedStatementInfo objects. + */ +static bool NonMainDbCheckSupportedObjectTypeForGrant(Node *node); + + +/* + * NonMainDbSupportedStatements is an array of statements that are supported + * from non-main databases. + */ +ObjectType supportedObjectTypesForGrantStmt[] = { OBJECT_DATABASE }; +static const NonMainDbDistributedStatementInfo NonMainDbSupportedStatements[] = { + { T_GrantRoleStmt, false, NULL }, + { T_CreateRoleStmt, true, NULL }, + { T_GrantStmt, false, NonMainDbCheckSupportedObjectTypeForGrant } +}; + + /* * ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of * pieces of a utility statement before invoking ProcessUtility. @@ -1692,10 +1720,13 @@ IsStatementSupportedFromNonMainDb(Node *parsetree) for (int i = 0; i < sizeof(NonMainDbSupportedStatements) / sizeof(NonMainDbSupportedStatements[0]); i++) { - if (type == NonMainDbSupportedStatements[i].statementType) + if (type != NonMainDbSupportedStatements[i].statementType) { - return true; + continue; } + + return !NonMainDbSupportedStatements[i].checkSupportedObjectTypes || + NonMainDbSupportedStatements[i].checkSupportedObjectTypes(parsetree); } return false; @@ -1767,3 +1798,15 @@ GetMarkObjectDistributedParams(Node *parsetree) elog(ERROR, "unsupported statement type"); } + + +/* + * NonMainDbCheckSupportedObjectTypeForGrant implements checkSupportedObjectTypes + * callback for GrantStmt. + */ +static bool +NonMainDbCheckSupportedObjectTypeForGrant(Node *node) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + return stmt->objtype == OBJECT_DATABASE; +} diff --git a/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out b/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out new file mode 100644 index 000000000..594e3b74e --- /dev/null +++ b/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out @@ -0,0 +1,471 @@ +-- Public role has connect,temp,temporary privileges on database +-- To test these scenarios, we need to revoke these privileges from public role +-- since public role privileges are inherited by new roles/users +set citus.enable_create_database_propagation to on; +create database test_2pc_db; +show citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +revoke connect,temp,temporary on database test_2pc_db from public; +CREATE SCHEMA grant_on_database_propagation_non_maindb; +SET search_path TO grant_on_database_propagation_non_maindb; +-- test grant/revoke CREATE privilege propagation on database +create user "myuser'_test"; +\c test_2pc_db - - :master_port +grant create on database test_2pc_db to "myuser'_test"; +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke create on database test_2pc_db from "myuser'_test"; +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) +(3 rows) + +drop user "myuser'_test"; +--------------------------------------------------------------------- +-- test grant/revoke CONNECT privilege propagation on database +\c regression - - :master_port +create user myuser2; +\c test_2pc_db - - :master_port +grant CONNECT on database test_2pc_db to myuser2; +\c regression - - :master_port; +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke connect on database test_2pc_db from myuser2; +\c regression - - :master_port +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) +(3 rows) + +drop user myuser2; +--------------------------------------------------------------------- +-- test grant/revoke TEMP privilege propagation on database +\c regression - - :master_port +create user myuser3; +-- test grant/revoke temp on database +\c test_2pc_db - - :master_port +grant TEMP on database test_2pc_db to myuser3; +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + check_database_privileges +--------------------------------------------------------------------- + (TEMP,t) + (TEMP,t) + (TEMP,t) +(3 rows) + +\c test_2pc_db - - :worker_1_port +revoke TEMP on database test_2pc_db from myuser3; +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + check_database_privileges +--------------------------------------------------------------------- + (TEMP,f) + (TEMP,f) + (TEMP,f) +(3 rows) + +drop user myuser3; +--------------------------------------------------------------------- +\c regression - - :master_port +-- test temporary privilege on database +create user myuser4; +-- test grant/revoke temporary on database +\c test_2pc_db - - :worker_1_port +grant TEMPORARY on database test_2pc_db to myuser4; +\c regression - - :master_port +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke TEMPORARY on database test_2pc_db from myuser4; +\c regression - - :master_port; +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(3 rows) + +drop user myuser4; +--------------------------------------------------------------------- +-- test ALL privileges with ALL statement on database +create user myuser5; +grant ALL on database test_2pc_db to myuser5; +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +revoke ALL on database test_2pc_db from myuser5; +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +drop user myuser5; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database +create user myuser6; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6; +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6; +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +drop user myuser6; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option +create user myuser7; +create user myuser_1; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7; +set role myuser7; +--here since myuser7 does not have grant option, it should fail +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1; +WARNING: no privileges were granted for "test_2pc_db" +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option; +set role myuser7; +--here since myuser have grant option, it should succeed +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7; +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict; +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ; +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ; +--here we test if myuser7 still have the privileges after revoke grant option for +\c regression - - :master_port +select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +reset role; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1; +\c regression - - :master_port +drop user myuser_1; +drop user myuser7; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database +-- and multi user +\c regression - - :master_port +create user myuser8; +create user myuser_2; +set citus.enable_create_database_propagation to on; +create database test_db; +revoke connect,temp,temporary on database test_db from public; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2; +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ; +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2; +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade; +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +\c test_2pc_db - - :master_port +reset role; +\c regression - - :master_port +drop user myuser_2; +drop user myuser8; +set citus.enable_create_database_propagation to on; +drop database test_db; +--------------------------------------------------------------------- +-- rollbacks public role database privileges to original state +grant connect,temp,temporary on database test_2pc_db to public; +drop database test_2pc_db; +set citus.enable_create_database_propagation to off; +DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE; +reset citus.enable_create_database_propagation; +reset search_path; +--------------------------------------------------------------------- diff --git a/src/test/regress/expected/metadata_sync_from_non_maindb.out b/src/test/regress/expected/metadata_sync_from_non_maindb.out index 03202b15f..f1fdcd93d 100644 --- a/src/test/regress/expected/metadata_sync_from_non_maindb.out +++ b/src/test/regress/expected/metadata_sync_from_non_maindb.out @@ -2,6 +2,7 @@ CREATE SCHEMA metadata_sync_2pc_schema; SET search_path TO metadata_sync_2pc_schema; set citus.enable_create_database_propagation to on; CREATE DATABASE metadata_sync_2pc_db; +revoke connect,temp,temporary on database metadata_sync_2pc_db from public; \c metadata_sync_2pc_db SHOW citus.main_db; citus.main_db @@ -24,7 +25,41 @@ select 1 from citus_remove_node('localhost', :worker_2_port); \c metadata_sync_2pc_db grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +--test for grant on database +\c metadata_sync_2pc_db - - :master_port +grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; +grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2"; +grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3"; \c regression +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) +(2 rows) + +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) +(2 rows) + +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) +(8 rows) + +\c regression +set citus.enable_create_database_propagation to on; select 1 from citus_add_node('localhost', :worker_2_port); ?column? --------------------------------------------------------------------- @@ -48,10 +83,46 @@ $$); [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false}] (3 rows) +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) +(3 rows) + +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) +(3 rows) + +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + \c metadata_sync_2pc_db revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; +revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; +revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; \c regression drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; set citus.enable_create_database_propagation to on; diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index 5fc694d13..0f31f2354 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -634,7 +634,8 @@ DECLARE BEGIN FOREACH permission IN ARRAY permissions LOOP - RETURN QUERY EXECUTE format($inner$SELECT '%s', result FROM run_command_on_all_nodes($$select has_database_privilege('%s','%s', '%s'); $$)$inner$, permission, role_name, db_name, permission); + RETURN QUERY EXECUTE format($inner$SELECT %s, result FROM run_command_on_all_nodes($$select has_database_privilege(%s,%s,%s); $$)$inner$, + quote_literal(permission), quote_literal(role_name), quote_literal(db_name), quote_literal(permission)); END LOOP; END; $func$ LANGUAGE plpgsql; diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index a05601855..015f74973 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -58,7 +58,7 @@ test: multi_metadata_attributes test: multi_read_from_secondaries -test: grant_on_database_propagation +test: grant_on_database_propagation grant_on_database_propagation_from_non_maindb test: alter_database_propagation test: citus_shards diff --git a/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql b/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql new file mode 100644 index 000000000..f83472b36 --- /dev/null +++ b/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql @@ -0,0 +1,246 @@ +-- Public role has connect,temp,temporary privileges on database +-- To test these scenarios, we need to revoke these privileges from public role +-- since public role privileges are inherited by new roles/users +set citus.enable_create_database_propagation to on; +create database test_2pc_db; +show citus.main_db; +revoke connect,temp,temporary on database test_2pc_db from public; + +CREATE SCHEMA grant_on_database_propagation_non_maindb; +SET search_path TO grant_on_database_propagation_non_maindb; + +-- test grant/revoke CREATE privilege propagation on database +create user "myuser'_test"; + +\c test_2pc_db - - :master_port +grant create on database test_2pc_db to "myuser'_test"; + +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + +\c test_2pc_db - - :master_port +revoke create on database test_2pc_db from "myuser'_test"; + +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + +drop user "myuser'_test"; +----------------------------------------------------------------------- + +-- test grant/revoke CONNECT privilege propagation on database +\c regression - - :master_port +create user myuser2; + +\c test_2pc_db - - :master_port +grant CONNECT on database test_2pc_db to myuser2; + +\c regression - - :master_port; +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + +\c test_2pc_db - - :master_port +revoke connect on database test_2pc_db from myuser2; + +\c regression - - :master_port +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + +drop user myuser2; + +----------------------------------------------------------------------- + +-- test grant/revoke TEMP privilege propagation on database +\c regression - - :master_port +create user myuser3; + +-- test grant/revoke temp on database +\c test_2pc_db - - :master_port +grant TEMP on database test_2pc_db to myuser3; + +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + + +\c test_2pc_db - - :worker_1_port +revoke TEMP on database test_2pc_db from myuser3; + +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + +drop user myuser3; + +----------------------------------------------------------------------- + +\c regression - - :master_port +-- test temporary privilege on database +create user myuser4; + +-- test grant/revoke temporary on database +\c test_2pc_db - - :worker_1_port +grant TEMPORARY on database test_2pc_db to myuser4; + +\c regression - - :master_port +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + +\c test_2pc_db - - :master_port +revoke TEMPORARY on database test_2pc_db from myuser4; + +\c regression - - :master_port; +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + +drop user myuser4; +----------------------------------------------------------------------- + +-- test ALL privileges with ALL statement on database +create user myuser5; + +grant ALL on database test_2pc_db to myuser5; + +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port +revoke ALL on database test_2pc_db from myuser5; + +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +drop user myuser5; +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database +create user myuser6; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6; + +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6; + +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +drop user myuser6; +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option +create user myuser7; +create user myuser_1; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7; + +set role myuser7; +--here since myuser7 does not have grant option, it should fail +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1; + +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port + +RESET ROLE; + +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option; +set role myuser7; + +--here since myuser have grant option, it should succeed +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7; + +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port + +RESET ROLE; + +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict; +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ; + +--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ; + +--here we test if myuser7 still have the privileges after revoke grant option for + +\c regression - - :master_port +select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +reset role; + +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1; + +\c regression - - :master_port +drop user myuser_1; +drop user myuser7; + +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database +-- and multi user +\c regression - - :master_port +create user myuser8; +create user myuser_2; + +set citus.enable_create_database_propagation to on; +create database test_db; + +revoke connect,temp,temporary on database test_db from public; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2; + +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +RESET ROLE; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ; + +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2; + +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade; + +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +reset role; + +\c regression - - :master_port +drop user myuser_2; +drop user myuser8; + +set citus.enable_create_database_propagation to on; +drop database test_db; + +--------------------------------------------------------------------------- +-- rollbacks public role database privileges to original state +grant connect,temp,temporary on database test_2pc_db to public; +drop database test_2pc_db; +set citus.enable_create_database_propagation to off; +DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE; + +reset citus.enable_create_database_propagation; +reset search_path; +--------------------------------------------------------------------------- diff --git a/src/test/regress/sql/metadata_sync_from_non_maindb.sql b/src/test/regress/sql/metadata_sync_from_non_maindb.sql index ea0a22d56..43f525189 100644 --- a/src/test/regress/sql/metadata_sync_from_non_maindb.sql +++ b/src/test/regress/sql/metadata_sync_from_non_maindb.sql @@ -3,6 +3,8 @@ SET search_path TO metadata_sync_2pc_schema; set citus.enable_create_database_propagation to on; CREATE DATABASE metadata_sync_2pc_db; +revoke connect,temp,temporary on database metadata_sync_2pc_db from public; + \c metadata_sync_2pc_db SHOW citus.main_db; @@ -19,7 +21,19 @@ select 1 from citus_remove_node('localhost', :worker_2_port); grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +--test for grant on database +\c metadata_sync_2pc_db - - :master_port +grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; +grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2"; +grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3"; + \c regression +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + +\c regression +set citus.enable_create_database_propagation to on; select 1 from citus_add_node('localhost', :worker_2_port); select result FROM run_command_on_all_nodes($$ @@ -33,12 +47,19 @@ FROM ( ) t $$); +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + \c metadata_sync_2pc_db revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; +revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; +revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; \c regression diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index 40bbaaf07..7d218361c 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -661,7 +661,8 @@ DECLARE BEGIN FOREACH permission IN ARRAY permissions LOOP - RETURN QUERY EXECUTE format($inner$SELECT '%s', result FROM run_command_on_all_nodes($$select has_database_privilege('%s','%s', '%s'); $$)$inner$, permission, role_name, db_name, permission); + RETURN QUERY EXECUTE format($inner$SELECT %s, result FROM run_command_on_all_nodes($$select has_database_privilege(%s,%s,%s); $$)$inner$, + quote_literal(permission), quote_literal(role_name), quote_literal(db_name), quote_literal(permission)); END LOOP; END; $func$ LANGUAGE plpgsql; From 852bcc5483e912df7e8e3750e2c3a4c46efdadbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Halil=20Ozan=20Akg=C3=BCl?= Date: Wed, 21 Feb 2024 13:44:01 +0300 Subject: [PATCH 02/14] Add support for create / drop database propagation from non-main databases (#7439) DESCRIPTION: Adds support for distributed `CREATE/DROP DATABASE ` commands from the databases where Citus is not installed --------- Co-authored-by: Onur Tirtir --- .../distributed/commands/utility_hook.c | 56 ++++- .../transaction/remote_transaction.c | 12 +- .../transaction/transaction_management.c | 4 +- src/include/distributed/remote_transaction.h | 1 + src/test/regress/expected/other_databases.out | 211 +++++++++++++++++- src/test/regress/sql/other_databases.sql | 84 ++++++- 6 files changed, 355 insertions(+), 13 deletions(-) diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index dfb07f179..1cae4306c 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -162,6 +162,7 @@ static bool ShouldCheckUndistributeCitusLocalTables(void); * Functions to support commands used to manage node-wide objects from non-main * databases. */ +static bool IsCommandToCreateOrDropMainDB(Node *parsetree); static void RunPreprocessMainDBCommand(Node *parsetree); static void RunPostprocessMainDBCommand(Node *parsetree); static bool IsStatementSupportedFromNonMainDb(Node *parsetree); @@ -184,7 +185,9 @@ ObjectType supportedObjectTypesForGrantStmt[] = { OBJECT_DATABASE }; static const NonMainDbDistributedStatementInfo NonMainDbSupportedStatements[] = { { T_GrantRoleStmt, false, NULL }, { T_CreateRoleStmt, true, NULL }, - { T_GrantStmt, false, NonMainDbCheckSupportedObjectTypeForGrant } + { T_GrantStmt, false, NonMainDbCheckSupportedObjectTypeForGrant }, + { T_CreatedbStmt, false, NULL }, + { T_DropdbStmt, false, NULL }, }; @@ -318,9 +321,24 @@ citus_ProcessUtility(PlannedStmt *pstmt, if (!CitusHasBeenLoaded()) { - if (!IsMainDB) + /* + * We always execute CREATE/DROP DATABASE from the main database. There are no + * transactional visibility issues, since these commands are non-transactional. + * And this way we only have to consider one codepath when creating databases. + * We don't try to send the query to the main database if the CREATE/DROP DATABASE + * command is for the main database itself, this is a very rare case but it's + * exercised by our test suite. + */ + if (!IsMainDB && + !IsCommandToCreateOrDropMainDB(parsetree)) { RunPreprocessMainDBCommand(parsetree); + + if (IsA(parsetree, CreatedbStmt) || + IsA(parsetree, DropdbStmt)) + { + return; + } } /* @@ -1666,6 +1684,29 @@ DropSchemaOrDBInProgress(void) } +/* + * IsCommandToCreateOrDropMainDB checks if this query creates or drops the + * main database, so we can make an exception and not send this query to + * the main database. + */ +static bool +IsCommandToCreateOrDropMainDB(Node *parsetree) +{ + if (IsA(parsetree, CreatedbStmt)) + { + CreatedbStmt *createdbStmt = castNode(CreatedbStmt, parsetree); + return strcmp(createdbStmt->dbname, MainDb) == 0; + } + else if (IsA(parsetree, DropdbStmt)) + { + DropdbStmt *dropdbStmt = castNode(DropdbStmt, parsetree); + return strcmp(dropdbStmt->dbname, MainDb) == 0; + } + + return false; +} + + /* * RunPreprocessMainDBCommand runs the necessary commands for a query, in main * database before query is run on the local node with PrevProcessUtility @@ -1679,6 +1720,17 @@ RunPreprocessMainDBCommand(Node *parsetree) } char *queryString = DeparseTreeNode(parsetree); + + if (IsA(parsetree, CreatedbStmt) || + IsA(parsetree, DropdbStmt)) + { + IsMainDBCommandInXact = false; + RunCitusMainDBQuery((char *) queryString); + return; + } + + IsMainDBCommandInXact = true; + StringInfo mainDBQuery = makeStringInfo(); appendStringInfo(mainDBQuery, START_MANAGEMENT_TRANSACTION, diff --git a/src/backend/distributed/transaction/remote_transaction.c b/src/backend/distributed/transaction/remote_transaction.c index 71b6a78dd..4c26e2478 100644 --- a/src/backend/distributed/transaction/remote_transaction.c +++ b/src/backend/distributed/transaction/remote_transaction.c @@ -107,6 +107,12 @@ bool IsMainDB = true; */ char *SuperuserRole = NULL; +/* + * IsMainDBCommandInXact shows if the query sent to the main database requires + * a transaction + */ +bool IsMainDBCommandInXact = true; + /* * start_management_transaction starts a management transaction @@ -190,7 +196,11 @@ RunCitusMainDBQuery(char *query) PostPortNumber, SuperuserRole, MainDb); - RemoteTransactionBegin(MainDBConnection); + + if (IsMainDBCommandInXact) + { + RemoteTransactionBegin(MainDBConnection); + } } SendRemoteCommand(MainDBConnection, query); diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 29f5b367e..9c7b45680 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -333,7 +333,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) * If this is a non-Citus main database we should try to commit the prepared * transactions created by the Citus main database on the worker nodes. */ - if (!IsMainDB && MainDBConnection != NULL) + if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact) { RunCitusMainDBQuery(COMMIT_MANAGEMENT_COMMAND_2PC); CleanCitusMainDBConnection(); @@ -533,7 +533,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) * main database query. So if some error happens on the distributed main * database query we wouldn't have committed the current query. */ - if (!IsMainDB && MainDBConnection != NULL) + if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact) { RunCitusMainDBQuery("COMMIT"); } diff --git a/src/include/distributed/remote_transaction.h b/src/include/distributed/remote_transaction.h index 2b61c25bd..45e2eba70 100644 --- a/src/include/distributed/remote_transaction.h +++ b/src/include/distributed/remote_transaction.h @@ -152,5 +152,6 @@ extern bool IsMainDB; extern char *SuperuserRole; extern char *MainDb; extern struct MultiConnection *MainDBConnection; +extern bool IsMainDBCommandInXact; #endif /* REMOTE_TRANSACTION_H */ diff --git a/src/test/regress/expected/other_databases.out b/src/test/regress/expected/other_databases.out index a15c4bb50..c67746055 100644 --- a/src/test/regress/expected/other_databases.out +++ b/src/test/regress/expected/other_databases.out @@ -98,11 +98,11 @@ REVOKE ALL ON SCHEMA citus_internal FROM nonsuperuser; DROP USER other_db_user9, nonsuperuser; -- test from a worker \c - - - :worker_1_port -CREATE DATABASE other_db2; +CREATE DATABASE worker_other_db; NOTICE: Citus partially supports CREATE DATABASE for distributed databases DETAIL: Citus does not propagate CREATE DATABASE command to other nodes HINT: You can manually create a database and its extensions on other nodes. -\c other_db2 +\c worker_other_db CREATE USER worker_user1; BEGIN; CREATE USER worker_user2; @@ -129,8 +129,211 @@ SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; -- some user creation commands will fail but let's make sure we try to drop them just in case DROP USER IF EXISTS worker_user1, worker_user2, worker_user3; NOTICE: role "worker_user3" does not exist, skipping -\c - - - :worker_1_port -DROP DATABASE other_db2; +-- test creating and dropping a database from a Citus non-main database +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db1 +CREATE DATABASE other_db3; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c other_db1 +DROP DATABASE other_db3; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c worker_other_db - - :worker_1_port +CREATE DATABASE other_db4; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c worker_other_db +DROP DATABASE other_db4; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE worker_other_db; +CREATE DATABASE other_db5; +-- disable create database propagation for the next test +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db5 - - :worker_2_port +-- locally create a database +CREATE DATABASE local_db; +\c regression - - - +-- re-enable create database propagation +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db5 - - :master_port +-- Test a scenario where create database fails because the database +-- already exists on another node and we don't crash etc. +CREATE DATABASE local_db; +ERROR: database "local_db" already exists +CONTEXT: while executing command on localhost:xxxxx +while executing command on localhost:xxxxx +\c regression - - - +SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "local_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c - - - :worker_2_port +-- locally drop the database for cleanup purposes +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +DROP DATABASE local_db; +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + \c - - - :master_port +DROP DATABASE other_db5; +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + DROP SCHEMA other_databases; DROP DATABASE other_db1; diff --git a/src/test/regress/sql/other_databases.sql b/src/test/regress/sql/other_databases.sql index 8cd54f354..aa936e507 100644 --- a/src/test/regress/sql/other_databases.sql +++ b/src/test/regress/sql/other_databases.sql @@ -75,9 +75,9 @@ DROP USER other_db_user9, nonsuperuser; -- test from a worker \c - - - :worker_1_port -CREATE DATABASE other_db2; +CREATE DATABASE worker_other_db; -\c other_db2 +\c worker_other_db CREATE USER worker_user1; @@ -98,9 +98,85 @@ SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; -- some user creation commands will fail but let's make sure we try to drop them just in case DROP USER IF EXISTS worker_user1, worker_user2, worker_user3; -\c - - - :worker_1_port -DROP DATABASE other_db2; +-- test creating and dropping a database from a Citus non-main database +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); +\c other_db1 +CREATE DATABASE other_db3; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + +\c other_db1 +DROP DATABASE other_db3; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + +\c worker_other_db - - :worker_1_port +CREATE DATABASE other_db4; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + +\c worker_other_db +DROP DATABASE other_db4; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + +DROP DATABASE worker_other_db; + +CREATE DATABASE other_db5; + +-- disable create database propagation for the next test +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c other_db5 - - :worker_2_port + +-- locally create a database +CREATE DATABASE local_db; + +\c regression - - - + +-- re-enable create database propagation +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c other_db5 - - :master_port + +-- Test a scenario where create database fails because the database +-- already exists on another node and we don't crash etc. +CREATE DATABASE local_db; + +\c regression - - - + +SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result; + +\c - - - :worker_2_port + +-- locally drop the database for cleanup purposes +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +DROP DATABASE local_db; + +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + \c - - - :master_port +DROP DATABASE other_db5; + +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + DROP SCHEMA other_databases; DROP DATABASE other_db1; From 683e10ab69d42c752212c4c5080431abb4c35306 Mon Sep 17 00:00:00 2001 From: Karina <55838532+Green-Chan@users.noreply.github.com> Date: Wed, 21 Feb 2024 14:35:27 +0300 Subject: [PATCH 03/14] Fix error in master_disable_node/citus_disable_node (#7492) This fixes #7454: master_disable_node() has only two arguments, but calls citus_disable_node() that tries to read three arguments Co-authored-by: Karina Litskevich --- src/backend/distributed/metadata/node_metadata.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index 3fa2549e7..d93b133ea 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -507,7 +507,13 @@ citus_disable_node(PG_FUNCTION_ARGS) { text *nodeNameText = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); - bool synchronousDisableNode = PG_GETARG_BOOL(2); + + bool synchronousDisableNode = 1; + Assert(PG_NARGS() == 2 || PG_NARGS() == 3); + if (PG_NARGS() == 3) + { + synchronousDisableNode = PG_GETARG_BOOL(2); + } char *nodeName = text_to_cstring(nodeNameText); WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort); From 211415dd4bea65451ae51bf917b40bce842baf95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Wed, 21 Feb 2024 18:37:25 +0300 Subject: [PATCH 04/14] Removes granted by statement to fix flaky test errors (#7526) Fix for the #7519 In metadata sync phase, grant statements for roles are being fetched and propagated from catalog tables. However, in some cases grant .. with admin option clauses executes after the granted by statements which causes #7519 error. We will fix this issue with the grantor propagation task in the project --- .../expected/metadata_sync_from_non_maindb.out | 17 +++++++++++------ .../sql/metadata_sync_from_non_maindb.sql | 9 +++++++-- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/src/test/regress/expected/metadata_sync_from_non_maindb.out b/src/test/regress/expected/metadata_sync_from_non_maindb.out index f1fdcd93d..695b7a4b3 100644 --- a/src/test/regress/expected/metadata_sync_from_non_maindb.out +++ b/src/test/regress/expected/metadata_sync_from_non_maindb.out @@ -24,7 +24,12 @@ select 1 from citus_remove_node('localhost', :worker_2_port); \c metadata_sync_2pc_db grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; -grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +-- This section was originally testing a scenario where a user with the 'admin option' grants the same role to another user, also with the 'admin option'. +-- However, we encountered inconsistent errors because the 'admin option' grant is executed after the grant below. +-- Once we establish the correct order of granting, we will reintroduce the 'granted by' clause. +-- For now, we are commenting out the grant below that includes 'granted by', and instead, we are adding a grant without the 'granted by' clause. +-- grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5; --test for grant on database \c metadata_sync_2pc_db - - :master_port grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; @@ -76,11 +81,11 @@ FROM ( order by member::regrole::text ) t $$); - result + result --------------------------------------------------------------------- - [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false}] - [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false}] - [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] (3 rows) select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); @@ -117,7 +122,7 @@ select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db', (12 rows) \c metadata_sync_2pc_db -revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ; revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; diff --git a/src/test/regress/sql/metadata_sync_from_non_maindb.sql b/src/test/regress/sql/metadata_sync_from_non_maindb.sql index 43f525189..a90d6915a 100644 --- a/src/test/regress/sql/metadata_sync_from_non_maindb.sql +++ b/src/test/regress/sql/metadata_sync_from_non_maindb.sql @@ -19,7 +19,12 @@ select 1 from citus_remove_node('localhost', :worker_2_port); \c metadata_sync_2pc_db grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; -grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +-- This section was originally testing a scenario where a user with the 'admin option' grants the same role to another user, also with the 'admin option'. +-- However, we encountered inconsistent errors because the 'admin option' grant is executed after the grant below. +-- Once we establish the correct order of granting, we will reintroduce the 'granted by' clause. +-- For now, we are commenting out the grant below that includes 'granted by', and instead, we are adding a grant without the 'granted by' clause. +-- grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5; --test for grant on database \c metadata_sync_2pc_db - - :master_port @@ -52,7 +57,7 @@ select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db', select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); \c metadata_sync_2pc_db -revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ; revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; From 3509b7df5a96996af86b88310126799a9bae4e42 Mon Sep 17 00:00:00 2001 From: eaydingol <60466783+eaydingol@users.noreply.github.com> Date: Fri, 23 Feb 2024 09:54:19 +0300 Subject: [PATCH 05/14] Add support for SECURITY LABEL on ROLE propagation from non-main databases (#7525) DESCRIPTION: Adds support for distributed "SECURITY LABEL on ROLE" commands from the databases where Citus is not installed. --- .../distributed/commands/utility_hook.c | 14 +++ .../metadata_sync_from_non_maindb.out | 34 ++++++ .../regress/expected/seclabel_non_maindb.out | 111 ++++++++++++++++++ src/test/regress/multi_schedule | 2 +- .../sql/metadata_sync_from_non_maindb.sql | 11 ++ src/test/regress/sql/seclabel_non_maindb.sql | 71 +++++++++++ 6 files changed, 242 insertions(+), 1 deletion(-) create mode 100644 src/test/regress/expected/seclabel_non_maindb.out create mode 100644 src/test/regress/sql/seclabel_non_maindb.sql diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 1cae4306c..a1a233310 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -175,6 +175,7 @@ static MarkObjectDistributedParams GetMarkObjectDistributedParams(Node *parsetre * NonMainDbDistributedStatementInfo objects. */ static bool NonMainDbCheckSupportedObjectTypeForGrant(Node *node); +static bool NonMainDbCheckSupportedObjectTypeForSecLabel(Node *node); /* @@ -188,6 +189,7 @@ static const NonMainDbDistributedStatementInfo NonMainDbSupportedStatements[] = { T_GrantStmt, false, NonMainDbCheckSupportedObjectTypeForGrant }, { T_CreatedbStmt, false, NULL }, { T_DropdbStmt, false, NULL }, + { T_SecLabelStmt, false, NonMainDbCheckSupportedObjectTypeForSecLabel }, }; @@ -1862,3 +1864,15 @@ NonMainDbCheckSupportedObjectTypeForGrant(Node *node) GrantStmt *stmt = castNode(GrantStmt, node); return stmt->objtype == OBJECT_DATABASE; } + + +/* + * NonMainDbCheckSupportedObjectTypeForSecLabel implements checkSupportedObjectTypes + * callback for SecLabel. + */ +static bool +NonMainDbCheckSupportedObjectTypeForSecLabel(Node *node) +{ + SecLabelStmt *stmt = castNode(SecLabelStmt, node); + return stmt->objtype == OBJECT_ROLE; +} diff --git a/src/test/regress/expected/metadata_sync_from_non_maindb.out b/src/test/regress/expected/metadata_sync_from_non_maindb.out index 695b7a4b3..91ca1c82d 100644 --- a/src/test/regress/expected/metadata_sync_from_non_maindb.out +++ b/src/test/regress/expected/metadata_sync_from_non_maindb.out @@ -63,7 +63,25 @@ select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db', (TEMPORARY,t) (8 rows) +-- test for security label on role +\c metadata_sync_2pc_db - - :master_port +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE grant_role2pc_user4 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "grant_role2pc'_user1" IS 'citus_classified'; \c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + set citus.enable_create_database_propagation to on; select 1 from citus_add_node('localhost', :worker_2_port); ?column? @@ -121,6 +139,22 @@ select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db', (TEMPORARY,t) (12 rows) +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + \c metadata_sync_2pc_db revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ; revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; diff --git a/src/test/regress/expected/seclabel_non_maindb.out b/src/test/regress/expected/seclabel_non_maindb.out new file mode 100644 index 000000000..48c89fb31 --- /dev/null +++ b/src/test/regress/expected/seclabel_non_maindb.out @@ -0,0 +1,111 @@ +-- SECLABEL +-- +-- Test suite for running SECURITY LABEL ON ROLE statements from non-main databases +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database1; +CREATE DATABASE database2; +\c - - - :worker_1_port +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database_w1; +\c - - - :master_port +CREATE ROLE user1; +\c database1 +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +SHOW citus.superuser; + citus.superuser +--------------------------------------------------------------------- + postgres +(1 row) + +CREATE ROLE "user 2"; +-- Set a SECURITY LABEL on a role from a non-main database +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_unclassified'; +-- Check the result +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c database1 +-- Set a SECURITY LABEL on database, it should not be propagated +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database1 IS 'citus_classified'; +-- Set a SECURITY LABEL on a table, it should not be propagated +CREATE TABLE a (i int); +SECURITY LABEL ON TABLE a IS 'citus_classified'; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "database", "provider": "citus '!tests_label_provider"} + worker_1 | + worker_2 | +(3 rows) + +-- Check that only the SECURITY LABEL for ROLES is propagated to the non-main databases on other nodes +\c database_w1 - - :worker_1_port +SELECT provider, objtype, label, objname FROM pg_seclabels ORDER BY objname; + provider | objtype | label | objname +--------------------------------------------------------------------- + citus '!tests_label_provider | role | citus_unclassified | "user 2" + citus '!tests_label_provider | role | citus_classified | user1 +(2 rows) + +-- Check the result after a transaction +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database_w1 IS 'citus_classified'; +COMMIT; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database_w1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | + worker_1 | {"label": "citus_classified", "objtype": "database", "provider": "citus '!tests_label_provider"} + worker_2 | +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +ROLLBACK; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +-- clean up +SET citus.enable_create_database_propagation to ON; +DROP DATABASE database1; +DROP DATABASE database2; +DROP DATABASE database_w1; +DROP ROLE user1; +DROP ROLE "user 2"; +RESET citus.enable_create_database_propagation; diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 4fe98b4e3..85de7b8b8 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -108,7 +108,7 @@ test: object_propagation_debug test: undistribute_table test: run_command_on_all_nodes test: background_task_queue_monitor -test: other_databases grant_role_from_non_maindb +test: other_databases grant_role_from_non_maindb seclabel_non_maindb test: citus_internal_access # Causal clock test diff --git a/src/test/regress/sql/metadata_sync_from_non_maindb.sql b/src/test/regress/sql/metadata_sync_from_non_maindb.sql index a90d6915a..93445be27 100644 --- a/src/test/regress/sql/metadata_sync_from_non_maindb.sql +++ b/src/test/regress/sql/metadata_sync_from_non_maindb.sql @@ -37,7 +37,15 @@ select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db', select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); +-- test for security label on role +\c metadata_sync_2pc_db - - :master_port +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE grant_role2pc_user4 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "grant_role2pc'_user1" IS 'citus_classified'; + \c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + set citus.enable_create_database_propagation to on; select 1 from citus_add_node('localhost', :worker_2_port); @@ -56,6 +64,9 @@ select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db', select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + \c metadata_sync_2pc_db revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ; diff --git a/src/test/regress/sql/seclabel_non_maindb.sql b/src/test/regress/sql/seclabel_non_maindb.sql new file mode 100644 index 000000000..1833d4193 --- /dev/null +++ b/src/test/regress/sql/seclabel_non_maindb.sql @@ -0,0 +1,71 @@ +-- SECLABEL +-- +-- Test suite for running SECURITY LABEL ON ROLE statements from non-main databases + +SET citus.enable_create_database_propagation to ON; + +CREATE DATABASE database1; +CREATE DATABASE database2; + +\c - - - :worker_1_port +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database_w1; + + +\c - - - :master_port +CREATE ROLE user1; +\c database1 +SHOW citus.main_db; +SHOW citus.superuser; + +CREATE ROLE "user 2"; + +-- Set a SECURITY LABEL on a role from a non-main database +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_unclassified'; + +-- Check the result +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +\c database1 +-- Set a SECURITY LABEL on database, it should not be propagated +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database1 IS 'citus_classified'; + +-- Set a SECURITY LABEL on a table, it should not be propagated +CREATE TABLE a (i int); +SECURITY LABEL ON TABLE a IS 'citus_classified'; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database1') ORDER BY node_type; + +-- Check that only the SECURITY LABEL for ROLES is propagated to the non-main databases on other nodes +\c database_w1 - - :worker_1_port +SELECT provider, objtype, label, objname FROM pg_seclabels ORDER BY objname; + + +-- Check the result after a transaction +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database_w1 IS 'citus_classified'; +COMMIT; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database_w1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +ROLLBACK; + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +-- clean up +SET citus.enable_create_database_propagation to ON; +DROP DATABASE database1; +DROP DATABASE database2; +DROP DATABASE database_w1; +DROP ROLE user1; +DROP ROLE "user 2"; +RESET citus.enable_create_database_propagation; From 9ddee5d02a7ea4538fac95e0f34ec687481e8b7e Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Fri, 23 Feb 2024 13:37:11 +0300 Subject: [PATCH 06/14] Test that we check unsupported options for CREATE DATABASE from non-main dbs (#7532) When adding CREATE/DROP DATABASE propagation in #7240, luckily we've added EnsureSupportedCreateDatabaseCommand() check into deparser too just to be on the safe side. That way, today CREATE DATABASE commands from non-main dbs don't silently allow unsupported options. I wasn't aware of this when merging #7439 and hence wanted to add a test so that we don't mistakenly remove that check from deparser in future. --- src/backend/distributed/deparser/deparse_database_stmts.c | 5 +++++ .../expected/create_drop_database_propagation_pg15.out | 6 ++++++ .../regress/sql/create_drop_database_propagation_pg15.sql | 8 ++++++++ 3 files changed, 19 insertions(+) diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c index 30ac3f32c..66df5361e 100644 --- a/src/backend/distributed/deparser/deparse_database_stmts.c +++ b/src/backend/distributed/deparser/deparse_database_stmts.c @@ -277,6 +277,11 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt) /* * Make sure that we don't try to deparse something that this * function doesn't expect. + * + * This is also useful to throw an error for unsupported CREATE + * DATABASE options when the command is issued from non-main dbs + * because we use the same function to deparse CREATE DATABASE + * commands there too. */ EnsureSupportedCreateDatabaseCommand(stmt); diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15.out b/src/test/regress/expected/create_drop_database_propagation_pg15.out index 9a501558a..7e76d87f3 100644 --- a/src/test/regress/expected/create_drop_database_propagation_pg15.out +++ b/src/test/regress/expected/create_drop_database_propagation_pg15.out @@ -78,5 +78,11 @@ SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER B worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} (3 rows) +\c test_locale_provider - - :worker_2_port +set citus.enable_create_database_propagation to on; +create database unsupported_option_from_non_main_db with oid = 12345; +ERROR: CREATE DATABASE option "oid" is not supported +\c regression - - :master_port +set citus.enable_create_database_propagation to on; drop database test_locale_provider; \c - - - :master_port diff --git a/src/test/regress/sql/create_drop_database_propagation_pg15.sql b/src/test/regress/sql/create_drop_database_propagation_pg15.sql index 40d1b9e09..4e006c54f 100644 --- a/src/test/regress/sql/create_drop_database_propagation_pg15.sql +++ b/src/test/regress/sql/create_drop_database_propagation_pg15.sql @@ -60,6 +60,14 @@ CREATE DATABASE test_locale_provider SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER BY node_type; +\c test_locale_provider - - :worker_2_port + +set citus.enable_create_database_propagation to on; +create database unsupported_option_from_non_main_db with oid = 12345; + +\c regression - - :master_port + +set citus.enable_create_database_propagation to on; drop database test_locale_provider; \c - - - :master_port From cbb90cc4ae24b94def330bfa5a4b1876af9299d2 Mon Sep 17 00:00:00 2001 From: Nils Dijk Date: Fri, 23 Feb 2024 14:38:11 +0100 Subject: [PATCH 07/14] Devcontainer: enable coredumps (#7523) Add configuration for coredumps and document how to make sure they are enabled when developing in a devcontainer. --------- Co-authored-by: Jelte Fennema-Nio --- .devcontainer/.vscode/launch.json | 22 +++++++++++++++- .devcontainer/Dockerfile | 1 + .devcontainer/devcontainer.json | 7 ++++- DEVCONTAINER.md | 43 +++++++++++++++++++++++++++++++ 4 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 DEVCONTAINER.md diff --git a/.devcontainer/.vscode/launch.json b/.devcontainer/.vscode/launch.json index 290f6573a..6de90ce09 100644 --- a/.devcontainer/.vscode/launch.json +++ b/.devcontainer/.vscode/launch.json @@ -16,5 +16,25 @@ } ], }, - ] + { + "name": "Open core file", + "type": "cppdbg", + "request": "launch", + "program": "/home/citus/.pgenv/pgsql/bin/postgres", + "coreDumpPath": "${input:corefile}", + "cwd": "${workspaceFolder}", + "MIMode": "gdb", + } + ], + "inputs": [ + { + "id": "corefile", + "type": "command", + "command": "extension.commandvariable.file.pickFile", + "args": { + "dialogTitle": "Select core file", + "include": "**/core*", + }, + }, + ], } diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 38055f367..13762e1e5 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -152,6 +152,7 @@ RUN sudo apt update \ lsof \ man \ net-tools \ + psmisc \ pspg \ tree \ vim \ diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 58c9e07a8..cddfcebf4 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,8 +2,11 @@ "image": "ghcr.io/citusdata/citus-devcontainer:main", "runArgs": [ "--cap-add=SYS_PTRACE", + "--ulimit=core=-1", + ], + "forwardPorts": [ + 9700 ], - "forwardPorts": [9700], "customizations": { "vscode": { "extensions": [ @@ -14,6 +17,7 @@ "github.vscode-pull-request-github", "ms-vscode.cpptools-extension-pack", "ms-vsliveshare.vsliveshare", + "rioj7.command-variable", ], "settings": { "files.exclude": { @@ -30,3 +34,4 @@ "updateContentCommand": "./configure", "postCreateCommand": "make -C .devcontainer/", } + diff --git a/DEVCONTAINER.md b/DEVCONTAINER.md new file mode 100644 index 000000000..d004e6f71 --- /dev/null +++ b/DEVCONTAINER.md @@ -0,0 +1,43 @@ +# Devcontainer + +## Coredumps +When postgres/citus crashes, there is the option to create a coredump. This is useful for debugging the issue. Coredumps are enabled in the devcontainer by default. However, not all environments are configured correctly out of the box. The most important configuration that is not standardized is the `core_pattern`. The configuration can be verified from the container, however, you cannot change this setting from inside the container as the filesystem containing this setting is in read only mode while inside the container. + +To verify if corefiles are written run the following command in a terminal. This shows the filename pattern with which the corefile will be written. +```bash +cat /proc/sys/kernel/core_pattern +``` + +This should be configured with a relative path or simply a simple filename, such as `core`. When your environment shows an absolute path you will need to change this setting. How to change this setting depends highly on the underlying system as the setting needs to be changed on the kernel of the host running the container. + +You can put any pattern in `/proc/sys/kernel/core_pattern` as you see fit. eg. You can add the PID to the core pattern in one of two ways; +- You either include `%p` in the core_pattern. This gets substituted with the PID of the crashing process. +- Alternatively you could set `/proc/sys/kernel/core_uses_pid` to `1` in the same way as you set `core_pattern`. This will append the PID to the corefile if `%p` is not explicitly contained in the core_pattern. + +When a coredump is written you can use the debug/launch configuration `Open core file` which is preconfigured in the devcontainer. This will open a fileprompt that lists all coredumps that are found in your workspace. When you want to debug coredumps from `citus_dev` that are run in your `/data` directory, you can add the data directory to your workspace. In the command pallet of vscode you can run `>Workspace: Add Folder to Workspace...` and select the `/data` directory. This will allow you to open the coredumps from the `/data` directory in the `Open core file` debug configuration. + +### Windows (docker desktop) +When running in docker desktop on windows you will most likely need to change this setting. The linux guest in WSL2 that runs your container is the `docker-desktop` environment. The easiest way to get onto the host, where you can change this setting, is to open a powershell window and verify you have the docker-desktop environment listed. + +```powershell +wsl --list +``` + +Among others this should list both `docker-desktop` and `docker-desktop-data`. You can then open a shell in the `docker-desktop` environment. + +```powershell +wsl -d docker-desktop +``` + +Inside this shell you can verify that you have the right environment by running + +```bash +cat /proc/sys/kernel/core_pattern +``` + +This should show the same configuration as the one you see inside the devcontainer. You can then change the setting by running the following command. +This will change the setting for the current session. If you want to make the change permanent you will need to add this to a startup script. + +```bash +echo "core" > /proc/sys/kernel/core_pattern +``` From f4242685e377d5eddf8c81e1c127b1c4325c83cb Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Fri, 23 Feb 2024 20:02:32 +0300 Subject: [PATCH 08/14] Add failure handling for CREATE DATABASE commands (#7483) In preprocess phase, we save the original database name, replace dbname field of CreatedbStmt with a temporary name (to let Postgres to create the database with the temporary name locally) and then we insert a cleanup record for the temporary database name on all nodes **(\*\*)**. And in postprocess phase, we first rename the temporary database back to its original name for local node and then return a list of distributed DDL jobs i) to create the database with the temporary name and then ii) to rename it back to its original name on other nodes. That way, if CREATE DATABASE fails on any of the nodes, the temporary database will be cleaned up by the cleanup records that we inserted in preprocess phase and in case of a failure, we won't leak any databases called as the name that user intended to use for the database. Solves the problem documented in https://github.com/citusdata/citus/issues/7369 for CREATE DATABASE commands. **(\*\*):** To ensure that we insert cleanup records on all nodes, with this PR we also start requiring having the coordinator in the metadata because otherwise we would skip inserting a cleanup record for the coordinator. --- src/backend/distributed/commands/database.c | 129 +++++- src/backend/distributed/commands/index.c | 3 + .../distributed/commands/utility_hook.c | 13 +- .../distributed/operations/shard_cleaner.c | 78 +++- .../distributed/commands/utility_hook.h | 12 +- src/include/distributed/shard_cleaner.h | 3 +- .../expected/alter_database_propagation.out | 10 + .../create_drop_database_propagation.out | 100 ++++- .../expected/failure_create_database.out | 386 ++++++++++++++++++ .../isolation_database_cmd_from_any_node.out | 160 ++++++++ src/test/regress/failure_schedule | 1 + .../isolation_database_cmd_from_any_node.spec | 4 + .../sql/alter_database_propagation.sql | 2 + .../sql/create_drop_database_propagation.sql | 60 ++- .../regress/sql/failure_create_database.sql | 128 ++++++ 15 files changed, 1063 insertions(+), 26 deletions(-) create mode 100644 src/test/regress/expected/failure_create_database.out create mode 100644 src/test/regress/sql/failure_create_database.sql diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 33223f416..5479a59ed 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -40,15 +40,38 @@ #include "distributed/deparse_shard_query.h" #include "distributed/deparser.h" #include "distributed/listutils.h" +#include "distributed/local_executor.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" #include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" #include "distributed/serialize_distributed_ddls.h" +#include "distributed/shard_cleaner.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" + +/* + * Used to save original name of the database before it is replaced with a + * temporary name for failure handling purposes in PreprocessCreateDatabaseStmt(). + */ +static char *CreateDatabaseCommandOriginalDbName = NULL; + + +/* + * The format string used when creating a temporary databases for failure + * handling purposes. + * + * The fields are as follows to ensure using a unique name for each temporary + * database: + * - operationId: The operation id returned by RegisterOperationNeedingCleanup(). + * - groupId: The group id of the worker node where CREATE DATABASE command + * is issued from. + */ +#define TEMP_DATABASE_NAME_FMT "citus_temp_database_%lu_%d" + + /* * DatabaseCollationInfo is used to store collation related information of a database. */ @@ -286,8 +309,9 @@ PreprocessAlterDatabaseStmt(Node *node, const char *queryString, * NontransactionalNodeDDLTask to run the command on the workers outside * the transaction block. */ - - return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands); + bool warnForPartialFailure = true; + return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands, + warnForPartialFailure); } else { @@ -453,7 +477,12 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, * * In this stage, we perform validations that we want to ensure before delegating to * previous utility hooks because it might not be convenient to throw an error in an - * implicit transaction that creates a database. + * implicit transaction that creates a database. Also in this stage, we save the original + * database name and replace dbname field with a temporary name for failure handling + * purposes. We let Postgres create the database with the temporary name, insert a cleanup + * record for the temporary database name on all nodes and let PostprocessCreateDatabaseStmt() + * to return the distributed DDL job that both creates the database with the temporary name + * and then renames it back to its original name. * * We also serialize database commands globally by acquiring a Citus specific advisory * lock based on OCLASS_DATABASE on the first primary worker node. @@ -467,22 +496,56 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString, return NIL; } - EnsurePropagationToCoordinator(); + EnsureCoordinatorIsInMetadata(); CreatedbStmt *stmt = castNode(CreatedbStmt, node); EnsureSupportedCreateDatabaseCommand(stmt); SerializeDistributedDDLsOnObjectClass(OCLASS_DATABASE); + OperationId operationId = RegisterOperationNeedingCleanup(); + + char *tempDatabaseName = psprintf(TEMP_DATABASE_NAME_FMT, + operationId, GetLocalGroupId()); + + List *remoteNodes = TargetWorkerSetNodeList(ALL_SHARD_NODES, RowShareLock); + WorkerNode *remoteNode = NULL; + foreach_ptr(remoteNode, remoteNodes) + { + InsertCleanupRecordOutsideTransaction( + CLEANUP_OBJECT_DATABASE, + pstrdup(quote_identifier(tempDatabaseName)), + remoteNode->groupId, + CLEANUP_ON_FAILURE + ); + } + + CreateDatabaseCommandOriginalDbName = stmt->dbname; + stmt->dbname = tempDatabaseName; + + /* + * Delete cleanup records in the same transaction so that if the current + * transactions fails for some reason, then the cleanup records won't be + * deleted. In the happy path, we will delete the cleanup records without + * deferring them to the background worker. + */ + FinalizeOperationNeedingCleanupOnSuccess("create database"); + return NIL; } /* * PostprocessCreateDatabaseStmt is executed after the statement is applied to the local - * postgres instance. In this stage we prepare the commands that need to be run on - * all workers to create the database. + * postgres instance. * + * In this stage, we first rename the temporary database back to its original name for + * local node and then return a list of distributed DDL jobs to create the database with + * the temporary name and then to rename it back to its original name. That way, if CREATE + * DATABASE fails on any of the nodes, the temporary database will be cleaned up by the + * cleanup records that we inserted in PreprocessCreateDatabaseStmt() and in case of a + * failure, we won't leak any databases called as the name that user intended to use for + * the database. */ List * PostprocessCreateDatabaseStmt(Node *node, const char *queryString) @@ -515,9 +578,55 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString) * block, we need to use NontransactionalNodeDDLTaskList() to send the CREATE * DATABASE statement to the workers. */ + bool warnForPartialFailure = false; List *createDatabaseDDLJobList = - NontransactionalNodeDDLTaskList(REMOTE_NODES, createDatabaseCommands); - return createDatabaseDDLJobList; + NontransactionalNodeDDLTaskList(REMOTE_NODES, createDatabaseCommands, + warnForPartialFailure); + + CreatedbStmt *stmt = castNode(CreatedbStmt, node); + + char *renameDatabaseCommand = + psprintf("ALTER DATABASE %s RENAME TO %s", + quote_identifier(stmt->dbname), + quote_identifier(CreateDatabaseCommandOriginalDbName)); + + List *renameDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + renameDatabaseCommand, + ENABLE_DDL_PROPAGATION); + + /* + * We use NodeDDLTaskList() to send the RENAME DATABASE statement to the + * workers because we want to execute it in a coordinated transaction. + */ + List *renameDatabaseDDLJobList = + NodeDDLTaskList(REMOTE_NODES, renameDatabaseCommands); + + /* + * Temporarily disable citus.enable_ddl_propagation before issuing + * rename command locally because we don't want to execute it on remote + * nodes yet. We will execute it on remote nodes by returning it as a + * distributed DDL job. + * + * The reason why we don't want to execute it on remote nodes yet is that + * the database is not created on remote nodes yet. + */ + int saveNestLevel = NewGUCNestLevel(); + set_config_option("citus.enable_ddl_propagation", "off", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); + + ExecuteUtilityCommand(renameDatabaseCommand); + + AtEOXact_GUC(true, saveNestLevel); + + /* + * Restore the original database name because MarkObjectDistributed() + * resolves oid of the object based on the database name and is called + * after executing the distributed DDL job that renames temporary database. + */ + stmt->dbname = CreateDatabaseCommandOriginalDbName; + + return list_concat(createDatabaseDDLJobList, renameDatabaseDDLJobList); } @@ -571,8 +680,10 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, * use NontransactionalNodeDDLTaskList() to send the DROP DATABASE statement * to the workers. */ + bool warnForPartialFailure = true; List *dropDatabaseDDLJobList = - NontransactionalNodeDDLTaskList(REMOTE_NODES, dropDatabaseCommands); + NontransactionalNodeDDLTaskList(REMOTE_NODES, dropDatabaseCommands, + warnForPartialFailure); return dropDatabaseDDLJobList; } diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index c41136176..e97312df2 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -493,6 +493,7 @@ GenerateCreateIndexDDLJob(IndexStmt *createIndexStatement, const char *createInd ddlJob->startNewTransaction = createIndexStatement->concurrent; ddlJob->metadataSyncCommand = createIndexCommand; ddlJob->taskList = CreateIndexTaskList(createIndexStatement); + ddlJob->warnForPartialFailure = true; return ddlJob; } @@ -652,6 +653,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand, "concurrently"); ddlJob->metadataSyncCommand = reindexCommand; ddlJob->taskList = CreateReindexTaskList(relationId, reindexStatement); + ddlJob->warnForPartialFailure = true; ddlJobs = list_make1(ddlJob); } @@ -780,6 +782,7 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand, ddlJob->metadataSyncCommand = dropIndexCommand; ddlJob->taskList = DropIndexTaskList(distributedRelationId, distributedIndexId, dropIndexStatement); + ddlJob->warnForPartialFailure = true; ddlJobs = list_make1(ddlJob); } diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index a1a233310..7dff9cbf6 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -1377,7 +1377,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) errhint("Use DROP INDEX CONCURRENTLY IF EXISTS to remove the " "invalid index, then retry the original command."))); } - else + else if (ddlJob->warnForPartialFailure) { ereport(WARNING, (errmsg( @@ -1386,9 +1386,9 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) "state.\nIf the problematic command is a CREATE operation, " "consider using the 'IF EXISTS' syntax to drop the object," "\nif applicable, and then re-attempt the original command."))); - - PG_RE_THROW(); } + + PG_RE_THROW(); } PG_END_TRY(); } @@ -1604,9 +1604,12 @@ DDLTaskList(Oid relationId, const char *commandString) * NontransactionalNodeDDLTaskList builds a list of tasks to execute a DDL command on a * given target set of nodes with cannotBeExecutedInTransaction is set to make sure * that task list is executed outside a transaction block. + * + * Also sets warnForPartialFailure for the returned DDLJobs. */ List * -NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands) +NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands, + bool warnForPartialFailure) { List *ddlJobs = NodeDDLTaskList(targets, commands); DDLJob *ddlJob = NULL; @@ -1617,6 +1620,8 @@ NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands) { task->cannotBeExecutedInTransaction = true; } + + ddlJob->warnForPartialFailure = warnForPartialFailure; } return ddlJobs; } diff --git a/src/backend/distributed/operations/shard_cleaner.c b/src/backend/distributed/operations/shard_cleaner.c index db1cad6bc..2efce9a7b 100644 --- a/src/backend/distributed/operations/shard_cleaner.c +++ b/src/backend/distributed/operations/shard_cleaner.c @@ -92,6 +92,8 @@ static bool TryDropReplicationSlotOutsideTransaction(char *replicationSlotName, char *nodeName, int nodePort); static bool TryDropUserOutsideTransaction(char *username, char *nodeName, int nodePort); +static bool TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName, + int nodePort); static CleanupRecord * GetCleanupRecordByNameAndType(char *objectName, CleanupObject type); @@ -141,7 +143,6 @@ Datum citus_cleanup_orphaned_resources(PG_FUNCTION_ARGS) { CheckCitusVersion(ERROR); - EnsureCoordinator(); PreventInTransactionBlock(true, "citus_cleanup_orphaned_resources"); int droppedCount = DropOrphanedResourcesForCleanup(); @@ -245,12 +246,6 @@ TryDropOrphanedResources() static int DropOrphanedResourcesForCleanup() { - /* Only runs on Coordinator */ - if (!IsCoordinator()) - { - return 0; - } - List *cleanupRecordList = ListCleanupRecords(); /* @@ -608,6 +603,12 @@ TryDropResourceByCleanupRecordOutsideTransaction(CleanupRecord *record, return TryDropUserOutsideTransaction(record->objectName, nodeName, nodePort); } + case CLEANUP_OBJECT_DATABASE: + { + return TryDropDatabaseOutsideTransaction(record->objectName, nodeName, + nodePort); + } + default: { ereport(WARNING, (errmsg( @@ -888,6 +889,69 @@ TryDropUserOutsideTransaction(char *username, } +/* + * TryDropDatabaseOutsideTransaction drops the database with the given name + * if it exists. + */ +static bool +TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName, int nodePort) +{ + int connectionFlags = (OUTSIDE_TRANSACTION | FORCE_NEW_CONNECTION); + MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlags, + nodeName, nodePort, + CitusExtensionOwnerName(), + NULL); + + if (PQstatus(connection->pgConn) != CONNECTION_OK) + { + return false; + } + + /* + * We want to disable DDL propagation and set lock_timeout before issuing + * the DROP DATABASE command but we cannot do so in a way that's scoped + * to the DROP DATABASE command. This is because, we cannot use a + * transaction block for the DROP DATABASE command. + * + * For this reason, to avoid leaking the lock_timeout and DDL propagation + * settings to future commands, we force the connection to close at the end + * of the transaction. + */ + ForceConnectionCloseAtTransactionEnd(connection); + + /* + * The DROP DATABASE command should not propagate, so we disable DDL + * propagation. + */ + List *commandList = list_make3( + "SET lock_timeout TO '1s'", + "SET citus.enable_ddl_propagation TO OFF;", + psprintf("DROP DATABASE IF EXISTS %s;", quote_identifier(databaseName)) + ); + + bool executeCommand = true; + + const char *commandString = NULL; + foreach_ptr(commandString, commandList) + { + /* + * Cannot use SendOptionalCommandListToWorkerOutsideTransactionWithConnection() + * because we don't want to open a transaction block on remote nodes as DROP + * DATABASE commands cannot be run inside a transaction block. + */ + if (ExecuteOptionalRemoteCommand(connection, commandString, NULL) != + RESPONSE_OKAY) + { + executeCommand = false; + break; + } + } + + CloseConnection(connection); + return executeCommand; +} + + /* * ErrorIfCleanupRecordForShardExists errors out if a cleanup record for the given * shard name exists. diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 9046c7309..52fcf7091 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -75,6 +75,15 @@ typedef struct DDLJob const char *metadataSyncCommand; List *taskList; /* worker DDL tasks to execute */ + + /* + * Only applicable when any of the tasks cannot be executed in a + * transaction block. + * + * Controls whether to emit a warning within the utility hook in case of a + * failure. + */ + bool warnForPartialFailure; } DDLJob; extern ProcessUtility_hook_type PrevProcessUtility; @@ -94,7 +103,8 @@ extern void ProcessUtilityParseTree(Node *node, const char *queryString, extern void MarkInvalidateForeignKeyGraph(void); extern void InvalidateForeignKeyGraphForDDL(void); extern List * DDLTaskList(Oid relationId, const char *commandString); -extern List * NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands); +extern List * NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands, + bool warnForPartialFailure); extern List * NodeDDLTaskList(TargetWorkerSet targets, List *commands); extern bool AlterTableInProgress(void); extern bool DropSchemaOrDBInProgress(void); diff --git a/src/include/distributed/shard_cleaner.h b/src/include/distributed/shard_cleaner.h index 4967846b2..7609bd900 100644 --- a/src/include/distributed/shard_cleaner.h +++ b/src/include/distributed/shard_cleaner.h @@ -41,7 +41,8 @@ typedef enum CleanupObject CLEANUP_OBJECT_SUBSCRIPTION = 2, CLEANUP_OBJECT_REPLICATION_SLOT = 3, CLEANUP_OBJECT_PUBLICATION = 4, - CLEANUP_OBJECT_USER = 5 + CLEANUP_OBJECT_USER = 5, + CLEANUP_OBJECT_DATABASE = 6 } CleanupObject; /* diff --git a/src/test/regress/expected/alter_database_propagation.out b/src/test/regress/expected/alter_database_propagation.out index f01d39ab9..5c45a25e2 100644 --- a/src/test/regress/expected/alter_database_propagation.out +++ b/src/test/regress/expected/alter_database_propagation.out @@ -140,7 +140,12 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing ALTER DATABASE regression RESET lock_timeout DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx set citus.enable_create_database_propagation=on; +SET citus.next_operation_id TO 3000; create database "regression!'2"; +NOTICE: issuing ALTER DATABASE citus_temp_database_3000_0 RENAME TO "regression!'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_3000_0 RENAME TO "regression!'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx alter database "regression!'2" with CONNECTION LIMIT 100; NOTICE: issuing ALTER DATABASE "regression!'2" WITH CONNECTION LIMIT 100; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -189,7 +194,12 @@ alter DATABASE local_regression rename to local_regression2; drop database local_regression2; set citus.enable_create_database_propagation=on; drop database regression3; +SET citus.next_operation_id TO 3100; create database "regression!'4"; +NOTICE: issuing ALTER DATABASE citus_temp_database_3100_0 RENAME TO "regression!'4" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_3100_0 RENAME TO "regression!'4" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SELECT result FROM run_command_on_all_nodes( $$ ALTER TABLESPACE alter_db_tablespace RENAME TO "ts-needs\!escape" diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index da4ec4eb7..4ddbaae3f 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -427,11 +427,16 @@ SELECT * FROM public.check_database_on_all_nodes('my_template_database') ORDER B --tests for special characters in database name set citus.enable_create_database_propagation=on; SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%CREATE DATABASE%'; +set citus.grep_remote_commands = '%DATABASE%'; +SET citus.next_operation_id TO 2000; create database "mydatabase#1'2"; -NOTICE: issuing CREATE DATABASE "mydatabase#1'2" +NOTICE: issuing CREATE DATABASE citus_temp_database_2000_0 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE DATABASE "mydatabase#1'2" +NOTICE: issuing CREATE DATABASE citus_temp_database_2000_0 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_2000_0 RENAME TO "mydatabase#1'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_2000_0 RENAME TO "mydatabase#1'2" DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx set citus.grep_remote_commands = '%DROP DATABASE%'; drop database if exists "mydatabase#1'2"; @@ -1264,6 +1269,95 @@ SELECT 1 FROM run_command_on_all_nodes($$REVOKE ALL ON TABLESPACE pg_default FRO DROP DATABASE no_createdb; DROP USER no_createdb; +-- Test a failure scenario by trying to create a distributed database that +-- already exists on one of the nodes. +\c - - - :worker_1_port +CREATE DATABASE "test_\!failure"; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c - - - :master_port +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE "test_\!failure"; +ERROR: database "test_\!failure" already exists +CONTEXT: while executing command on localhost:xxxxx +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); + database_cleanedup_on_node +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_\\!failure", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE "test_\!failure1"; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c - - - :worker_1_port +DROP DATABASE "test_\!failure"; +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE "test_\!failure1"; +ERROR: database "test_\!failure1" already exists +CONTEXT: while executing command on localhost:xxxxx +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); + database_cleanedup_on_node +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": {"datacl": null, "datname": "test_\\!failure1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c - - - :master_port +-- Before dropping local "test_\!failure1" database, test a failure scenario +-- by trying to create a distributed database that already exists "on local +-- node" this time. +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE "test_\!failure1"; +ERROR: database "test_\!failure1" already exists +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); + database_cleanedup_on_node +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "test_\\!failure1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.enable_create_database_propagation TO OFF; +DROP DATABASE "test_\!failure1"; SET citus.enable_create_database_propagation TO ON; --clean up resources created by this test -- DROP TABLESPACE is not supported, so we need to drop it manually. diff --git a/src/test/regress/expected/failure_create_database.out b/src/test/regress/expected/failure_create_database.out new file mode 100644 index 000000000..81fcd4519 --- /dev/null +++ b/src/test/regress/expected/failure_create_database.out @@ -0,0 +1,386 @@ +SET citus.enable_create_database_propagation TO ON; +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE FUNCTION get_temp_databases_on_nodes() +RETURNS TEXT AS $func$ + SELECT array_agg(DISTINCT result ORDER BY result) AS temp_databases_on_nodes FROM run_command_on_all_nodes($$SELECT datname FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$) WHERE result != ''; +$func$ +LANGUAGE sql; +CREATE FUNCTION count_db_cleanup_records() +RETURNS TABLE(object_name TEXT, count INTEGER) AS $func$ + SELECT object_name, COUNT(*) FROM pg_dist_cleanup WHERE object_name LIKE 'citus_temp_database_%' GROUP BY object_name; +$func$ +LANGUAGE sql; +CREATE FUNCTION ensure_no_temp_databases_on_any_nodes() +RETURNS BOOLEAN AS $func$ + SELECT bool_and(result::boolean) AS no_temp_databases_on_any_nodes FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +$func$ +LANGUAGE sql; +-- cleanup any orphaned resources from previous runs +CALL citus_cleanup_orphaned_resources(); +SET citus.next_operation_id TO 4000; +ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +SELECT citus.mitmproxy('conn.kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^CREATE DATABASE").cancel(' || pg_backend_pid() || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: canceling statement due to user request +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4000_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4000_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^ALTER DATABASE").cancel(' || pg_backend_pid() || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: canceling statement due to user request +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4001_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4001_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection not open +CONTEXT: while executing command on localhost:xxxxx +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4002_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4002_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +WARNING: connection not open +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +-- not call citus_cleanup_orphaned_resources() but recover the prepared transactions this time +SELECT 1 FROM recover_prepared_transactions(); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "db1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE db1; +-- after recovering the prepared transactions, cleanup records should also be removed +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT citus_internal.acquire_citus_advisory_object_class_lock").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onParse(query="^WITH distributed_object_data").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection not open +CONTEXT: while executing command on localhost:xxxxx +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4004_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4004_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +CREATE DATABASE db1; +-- show that a successful database creation doesn't leave any pg_dist_cleanup records behind +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +DROP DATABASE db1; +DROP FUNCTION get_temp_databases_on_nodes(); +DROP FUNCTION ensure_no_temp_databases_on_any_nodes(); +DROP FUNCTION count_db_cleanup_records(); +SELECT 1 FROM citus_remove_node('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + diff --git a/src/test/regress/expected/isolation_database_cmd_from_any_node.out b/src/test/regress/expected/isolation_database_cmd_from_any_node.out index 771c67fe8..e952bb457 100644 --- a/src/test/regress/expected/isolation_database_cmd_from_any_node.out +++ b/src/test/regress/expected/isolation_database_cmd_from_any_node.out @@ -1,6 +1,11 @@ Parsed test spec with 2 sessions starting permutation: s1-begin s2-begin s1-acquire-citus-adv-oclass-lock s2-acquire-citus-adv-oclass-lock s1-commit s2-commit +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-acquire-citus-adv-oclass-lock: SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; @@ -18,8 +23,18 @@ acquire_citus_advisory_object_class_lock (1 row) step s2-commit: COMMIT; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock-with-oid-testdb1 s2-acquire-citus-adv-oclass-lock-with-oid-testdb1 s1-commit s2-commit s1-drop-testdb1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -39,8 +54,18 @@ acquire_citus_advisory_object_class_lock step s2-commit: COMMIT; step s1-drop-testdb1: DROP DATABASE testdb1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock-with-oid-testdb1 s2-acquire-citus-adv-oclass-lock-with-oid-testdb2 s1-commit s2-commit s1-drop-testdb1 s2-drop-testdb2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s2-create-testdb2: CREATE DATABASE testdb2; step s1-begin: BEGIN; @@ -61,8 +86,18 @@ step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-drop-testdb1: DROP DATABASE testdb1; step s2-drop-testdb2: DROP DATABASE testdb2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock s2-acquire-citus-adv-oclass-lock-with-oid-testdb2 s1-commit s2-commit s2-drop-testdb2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -81,8 +116,18 @@ acquire_citus_advisory_object_class_lock step s1-commit: COMMIT; step s2-commit: COMMIT; step s2-drop-testdb2: DROP DATABASE testdb2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-db1 s2-rollback s2-drop-testdb2 s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; @@ -90,8 +135,18 @@ step s1-create-db1: CREATE DATABASE db1; step s2-rollback: ROLLBACK; step s2-drop-testdb2: DROP DATABASE testdb2; step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-user-dbuser s1-grant-on-testdb2-to-dbuser s2-rollback s2-drop-testdb2 s1-drop-user-dbuser +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; @@ -100,8 +155,18 @@ step s1-grant-on-testdb2-to-dbuser: GRANT ALL ON DATABASE testdb2 TO dbuser; step s2-rollback: ROLLBACK; step s2-drop-testdb2: DROP DATABASE testdb2; step s1-drop-user-dbuser: DROP USER dbuser; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-testdb1 s1-create-user-dbuser s1-grant-on-testdb1-to-dbuser s2-rollback s2-drop-testdb2 s1-drop-testdb1 s1-drop-user-dbuser +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; @@ -112,8 +177,18 @@ step s2-rollback: ROLLBACK; step s2-drop-testdb2: DROP DATABASE testdb2; step s1-drop-testdb1: DROP DATABASE testdb1; step s1-drop-user-dbuser: DROP USER dbuser; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb2-rename-to-db1 s1-commit s2-rollback s1-drop-db1 s2-drop-testdb2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s2-create-testdb2: CREATE DATABASE testdb2; step s1-begin: BEGIN; @@ -126,8 +201,18 @@ ERROR: database "db1" already exists step s2-rollback: ROLLBACK; step s1-drop-db1: DROP DATABASE db1; step s2-drop-testdb2: DROP DATABASE testdb2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb2-rename-to-db1 s1-rollback s2-commit s1-drop-testdb1 s2-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s2-create-testdb2: CREATE DATABASE testdb2; step s1-begin: BEGIN; @@ -139,8 +224,18 @@ step s2-alter-testdb2-rename-to-db1: <... completed> step s2-commit: COMMIT; step s1-drop-testdb1: DROP DATABASE testdb1; step s2-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb1-rename-to-db1 s1-commit s2-rollback s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -151,8 +246,18 @@ step s2-alter-testdb1-rename-to-db1: <... completed> ERROR: database "testdb1" does not exist step s2-rollback: ROLLBACK; step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s1-create-testdb1 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb1-rename-to-db1 s1-rollback s2-commit s2-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s1-create-testdb1: CREATE DATABASE testdb1; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -162,8 +267,18 @@ step s1-rollback: ROLLBACK; step s2-alter-testdb1-rename-to-db1: <... completed> step s2-commit: COMMIT; step s2-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-create-db1 s2-rollback s2-drop-testdb2 s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; @@ -172,8 +287,18 @@ step s2-rollback: ROLLBACK; step s1-create-db1: <... completed> step s2-drop-testdb2: DROP DATABASE testdb2; step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-create-db1 s2-commit s2-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; @@ -182,8 +307,18 @@ step s2-commit: COMMIT; step s1-create-db1: <... completed> ERROR: database "db1" already exists step s2-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db2 s1-create-db1 s2-commit s2-drop-db2 s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-rename-to-db2: ALTER DATABASE testdb2 RENAME TO db2; @@ -192,16 +327,36 @@ step s2-commit: COMMIT; step s1-create-db1: <... completed> step s2-drop-db2: DROP DATABASE db2; step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-drop-testdb2 s2-rollback +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s2-begin: BEGIN; step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; step s1-drop-testdb2: DROP DATABASE testdb2; step s2-rollback: ROLLBACK; step s1-drop-testdb2: <... completed> +?column? +--------------------------------------------------------------------- + 1 +(1 row) + starting permutation: s2-create-testdb2 s1-create-db1 s2-begin s2-alter-testdb2-rename-to-db2 s1-drop-db1 s2-commit s2-drop-db2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + step s2-create-testdb2: CREATE DATABASE testdb2; step s1-create-db1: CREATE DATABASE db1; step s2-begin: BEGIN; @@ -209,3 +364,8 @@ step s2-alter-testdb2-rename-to-db2: ALTER DATABASE testdb2 RENAME TO db2; step s1-drop-db1: DROP DATABASE db1; step s2-commit: COMMIT; step s2-drop-db2: DROP DATABASE db2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + diff --git a/src/test/regress/failure_schedule b/src/test/regress/failure_schedule index e1ad362b5..8b992422e 100644 --- a/src/test/regress/failure_schedule +++ b/src/test/regress/failure_schedule @@ -35,6 +35,7 @@ test: failure_mx_metadata_sync test: failure_mx_metadata_sync_multi_trans test: failure_connection_establishment test: failure_non_main_db_2pc +test: failure_create_database # this test syncs metadata to the workers test: failure_failover_to_local_execution diff --git a/src/test/regress/spec/isolation_database_cmd_from_any_node.spec b/src/test/regress/spec/isolation_database_cmd_from_any_node.spec index 1e004cb33..8637a8942 100644 --- a/src/test/regress/spec/isolation_database_cmd_from_any_node.spec +++ b/src/test/regress/spec/isolation_database_cmd_from_any_node.spec @@ -2,11 +2,15 @@ setup { -- OCLASS for database changed in PG 16 from 25 to 26 SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS value INTO oclass_database; + + SELECT 1 FROM citus_add_node('localhost', 57636, 0); } teardown { DROP TABLE IF EXISTS oclass_database; + + select 1 from citus_remove_node('localhost', 57636); } session "s1" diff --git a/src/test/regress/sql/alter_database_propagation.sql b/src/test/regress/sql/alter_database_propagation.sql index 4904919a6..9a8b1fab8 100644 --- a/src/test/regress/sql/alter_database_propagation.sql +++ b/src/test/regress/sql/alter_database_propagation.sql @@ -49,6 +49,7 @@ alter database regression set lock_timeout to DEFAULT; alter database regression RESET lock_timeout; set citus.enable_create_database_propagation=on; +SET citus.next_operation_id TO 3000; create database "regression!'2"; alter database "regression!'2" with CONNECTION LIMIT 100; alter database "regression!'2" with IS_TEMPLATE true CONNECTION LIMIT 50; @@ -90,6 +91,7 @@ set citus.enable_create_database_propagation=on; drop database regression3; +SET citus.next_operation_id TO 3100; create database "regression!'4"; diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index 329f48612..de55258c3 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -218,7 +218,8 @@ SELECT * FROM public.check_database_on_all_nodes('my_template_database') ORDER B --tests for special characters in database name set citus.enable_create_database_propagation=on; SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%CREATE DATABASE%'; +set citus.grep_remote_commands = '%DATABASE%'; +SET citus.next_operation_id TO 2000; create database "mydatabase#1'2"; @@ -746,6 +747,63 @@ SELECT 1 FROM run_command_on_all_nodes($$REVOKE ALL ON TABLESPACE pg_default FRO DROP DATABASE no_createdb; DROP USER no_createdb; +-- Test a failure scenario by trying to create a distributed database that +-- already exists on one of the nodes. + +\c - - - :worker_1_port +CREATE DATABASE "test_\!failure"; + +\c - - - :master_port + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE "test_\!failure"; + +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; + +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure$$) ORDER BY node_type, result; + +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE "test_\!failure1"; + +\c - - - :worker_1_port +DROP DATABASE "test_\!failure"; + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE "test_\!failure1"; + +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; + +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + +\c - - - :master_port + +-- Before dropping local "test_\!failure1" database, test a failure scenario +-- by trying to create a distributed database that already exists "on local +-- node" this time. + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE "test_\!failure1"; + +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; + +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + +SET citus.enable_create_database_propagation TO OFF; + +DROP DATABASE "test_\!failure1"; + SET citus.enable_create_database_propagation TO ON; --clean up resources created by this test diff --git a/src/test/regress/sql/failure_create_database.sql b/src/test/regress/sql/failure_create_database.sql new file mode 100644 index 000000000..d117dc811 --- /dev/null +++ b/src/test/regress/sql/failure_create_database.sql @@ -0,0 +1,128 @@ +SET citus.enable_create_database_propagation TO ON; +SET client_min_messages TO WARNING; + +SELECT 1 FROM citus_add_node('localhost', :master_port, 0); + +CREATE FUNCTION get_temp_databases_on_nodes() +RETURNS TEXT AS $func$ + SELECT array_agg(DISTINCT result ORDER BY result) AS temp_databases_on_nodes FROM run_command_on_all_nodes($$SELECT datname FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$) WHERE result != ''; +$func$ +LANGUAGE sql; + +CREATE FUNCTION count_db_cleanup_records() +RETURNS TABLE(object_name TEXT, count INTEGER) AS $func$ + SELECT object_name, COUNT(*) FROM pg_dist_cleanup WHERE object_name LIKE 'citus_temp_database_%' GROUP BY object_name; +$func$ +LANGUAGE sql; + +CREATE FUNCTION ensure_no_temp_databases_on_any_nodes() +RETURNS BOOLEAN AS $func$ + SELECT bool_and(result::boolean) AS no_temp_databases_on_any_nodes FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +$func$ +LANGUAGE sql; + +-- cleanup any orphaned resources from previous runs +CALL citus_cleanup_orphaned_resources(); + +SET citus.next_operation_id TO 4000; + +ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1; +SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); + +SELECT citus.mitmproxy('conn.kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^CREATE DATABASE").cancel(' || pg_backend_pid() || ')'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^ALTER DATABASE").cancel(' || pg_backend_pid() || ')'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +-- not call citus_cleanup_orphaned_resources() but recover the prepared transactions this time +SELECT 1 FROM recover_prepared_transactions(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +DROP DATABASE db1; + +-- after recovering the prepared transactions, cleanup records should also be removed +SELECT * FROM count_db_cleanup_records(); + +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT citus_internal.acquire_citus_advisory_object_class_lock").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onParse(query="^WITH distributed_object_data").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +CREATE DATABASE db1; + +-- show that a successful database creation doesn't leave any pg_dist_cleanup records behind +SELECT * FROM count_db_cleanup_records(); + +DROP DATABASE db1; + +DROP FUNCTION get_temp_databases_on_nodes(); +DROP FUNCTION ensure_no_temp_databases_on_any_nodes(); +DROP FUNCTION count_db_cleanup_records(); + +SELECT 1 FROM citus_remove_node('localhost', :master_port); From 51009d01913b9ddc1df426bd4df5bb22ecfe15fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Wed, 28 Feb 2024 11:58:28 +0300 Subject: [PATCH 09/14] Add support for alter/drop role propagation from non-main databases (#7461) DESCRIPTION: Adds support for distributed `ALTER/DROP ROLE` commands from the databases where Citus is not installed --------- Co-authored-by: Onur Tirtir --- .../distributed/commands/utility_hook.c | 212 +++++++++++++----- src/backend/distributed/metadata/distobject.c | 14 +- .../distributed/sql/citus--12.1-1--12.2-1.sql | 2 + .../sql/downgrades/citus--12.2-1--12.1-1.sql | 3 + .../12.2-1.sql | 7 + .../latest.sql | 7 +- .../metadata_sync_from_non_maindb.out | 143 ++++++++++++ src/test/regress/expected/multi_extension.out | 62 ++--- .../role_operations_from_non_maindb.out | 138 ++++++++++++ .../expected/upgrade_list_citus_objects.out | 2 +- src/test/regress/multi_schedule | 2 +- .../sql/metadata_sync_from_non_maindb.sql | 105 ++++++++- .../sql/role_operations_from_non_maindb.sql | 106 +++++++++ 13 files changed, 713 insertions(+), 90 deletions(-) create mode 100644 src/backend/distributed/sql/udfs/citus_unmark_object_distributed/12.2-1.sql create mode 100644 src/test/regress/expected/role_operations_from_non_maindb.out create mode 100644 src/test/regress/sql/role_operations_from_non_maindb.sql diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 7dff9cbf6..e264713dd 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -93,20 +93,40 @@ "SELECT citus_internal.start_management_transaction('%lu')" #define MARK_OBJECT_DISTRIBUTED \ "SELECT citus_internal.mark_object_distributed(%d, %s, %d, %s)" +#define UNMARK_OBJECT_DISTRIBUTED \ + "SELECT pg_catalog.citus_unmark_object_distributed(%d, %d, %d,%s)" + +/* see NonMainDbDistributedStatementInfo for the explanation of these flags */ +typedef enum DistObjectOperation +{ + NO_DIST_OBJECT_OPERATION, + MARK_DISTRIBUTED_GLOBALLY, + UNMARK_DISTRIBUTED_LOCALLY +} DistObjectOperation; + /* * NonMainDbDistributedStatementInfo is used to determine whether a statement is - * supported from non-main databases and whether it should be marked as - * distributed explicitly (*). + * supported from non-main databases and whether it should be marked or unmarked + * as distributed. * - * (*) We always have to mark such objects as "distributed" but while for some - * object types we can delegate this to main database, for some others we have - * to explicitly send a command to all nodes in this code-path to achieve this. + * When creating a distributed object, we always have to mark such objects as + * "distributed" but while for some object types we can delegate this to main + * database, for some others we have to explicitly send a command to all nodes + * in this code-path to achieve this. Callers need to provide + * MARK_DISTRIBUTED_GLOBALLY when that is the case. + * + * Similarly when dropping a distributed object, we always have to unmark such + * objects as "distributed" and our utility hook on remote nodes achieve this + * via UnmarkNodeWideObjectsDistributed() because the commands that we send to + * workers are executed via main db. However for the local node, this is not the + * case as we're not in the main db. For this reason, callers need to provide + * UNMARK_DISTRIBUTED_LOCALLY to unmark an object for local node as well. */ typedef struct NonMainDbDistributedStatementInfo { int statementType; - bool explicitlyMarkAsDistributed; + DistObjectOperation DistObjectOperation; /* * checkSupportedObjectTypes is a callback function that checks whether @@ -118,15 +138,16 @@ typedef struct NonMainDbDistributedStatementInfo } NonMainDbDistributedStatementInfo; /* - * MarkObjectDistributedParams is used to pass parameters to the - * MarkObjectDistributedFromNonMainDb function. + * DistObjectOperationParams is used to pass parameters to the + * MarkObjectDistributedGloballyFromNonMainDb function and + * UnMarkObjectDistributedLocallyFromNonMainDb functions. */ -typedef struct MarkObjectDistributedParams +typedef struct DistObjectOperationParams { char *name; Oid id; uint16 catalogRelId; -} MarkObjectDistributedParams; +} DistObjectOperationParams; bool EnableDDLPropagation = true; /* ddl propagation is enabled */ @@ -166,9 +187,11 @@ static bool IsCommandToCreateOrDropMainDB(Node *parsetree); static void RunPreprocessMainDBCommand(Node *parsetree); static void RunPostprocessMainDBCommand(Node *parsetree); static bool IsStatementSupportedFromNonMainDb(Node *parsetree); -static bool StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree); -static void MarkObjectDistributedFromNonMainDb(Node *parsetree); -static MarkObjectDistributedParams GetMarkObjectDistributedParams(Node *parsetree); +static bool StatementRequiresMarkDistributedGloballyFromNonMainDb(Node *parsetree); +static bool StatementRequiresUnmarkDistributedLocallyFromNonMainDb(Node *parsetree); +static void MarkObjectDistributedGloballyFromNonMainDb(Node *parsetree); +static void UnMarkObjectDistributedLocallyFromNonMainDb(List *unmarkDistributedList); +static List * GetDistObjectOperationParams(Node *parsetree); /* * checkSupportedObjectTypes callbacks for @@ -184,12 +207,15 @@ static bool NonMainDbCheckSupportedObjectTypeForSecLabel(Node *node); */ ObjectType supportedObjectTypesForGrantStmt[] = { OBJECT_DATABASE }; static const NonMainDbDistributedStatementInfo NonMainDbSupportedStatements[] = { - { T_GrantRoleStmt, false, NULL }, - { T_CreateRoleStmt, true, NULL }, - { T_GrantStmt, false, NonMainDbCheckSupportedObjectTypeForGrant }, - { T_CreatedbStmt, false, NULL }, - { T_DropdbStmt, false, NULL }, - { T_SecLabelStmt, false, NonMainDbCheckSupportedObjectTypeForSecLabel }, + { T_GrantRoleStmt, NO_DIST_OBJECT_OPERATION, NULL }, + { T_CreateRoleStmt, MARK_DISTRIBUTED_GLOBALLY, NULL }, + { T_DropRoleStmt, UNMARK_DISTRIBUTED_LOCALLY, NULL }, + { T_AlterRoleStmt, NO_DIST_OBJECT_OPERATION, NULL }, + { T_GrantStmt, NO_DIST_OBJECT_OPERATION, NonMainDbCheckSupportedObjectTypeForGrant }, + { T_CreatedbStmt, NO_DIST_OBJECT_OPERATION, NULL }, + { T_DropdbStmt, NO_DIST_OBJECT_OPERATION, NULL }, + { T_SecLabelStmt, NO_DIST_OBJECT_OPERATION, + NonMainDbCheckSupportedObjectTypeForSecLabel }, }; @@ -1743,12 +1769,19 @@ RunPreprocessMainDBCommand(Node *parsetree) START_MANAGEMENT_TRANSACTION, GetCurrentFullTransactionId().value); RunCitusMainDBQuery(mainDBQuery->data); + mainDBQuery = makeStringInfo(); appendStringInfo(mainDBQuery, EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER, quote_literal_cstr(queryString), quote_literal_cstr(CurrentUserName())); RunCitusMainDBQuery(mainDBQuery->data); + + if (StatementRequiresUnmarkDistributedLocallyFromNonMainDb(parsetree)) + { + List *unmarkParams = GetDistObjectOperationParams(parsetree); + UnMarkObjectDistributedLocallyFromNonMainDb(unmarkParams); + } } @@ -1760,9 +1793,9 @@ static void RunPostprocessMainDBCommand(Node *parsetree) { if (IsStatementSupportedFromNonMainDb(parsetree) && - StatementRequiresMarkDistributedFromNonMainDb(parsetree)) + StatementRequiresMarkDistributedGloballyFromNonMainDb(parsetree)) { - MarkObjectDistributedFromNonMainDb(parsetree); + MarkObjectDistributedGloballyFromNonMainDb(parsetree); } } @@ -1793,11 +1826,11 @@ IsStatementSupportedFromNonMainDb(Node *parsetree) /* - * StatementRequiresMarkDistributedFromNonMainDb returns true if the statement should be marked + * StatementRequiresMarkDistributedGloballyFromNonMainDb returns true if the statement should be marked * as distributed when executed from a non-main database. */ static bool -StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree) +StatementRequiresMarkDistributedGloballyFromNonMainDb(Node *parsetree) { NodeTag type = nodeTag(parsetree); @@ -1806,7 +1839,8 @@ StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree) { if (type == NonMainDbSupportedStatements[i].statementType) { - return NonMainDbSupportedStatements[i].explicitlyMarkAsDistributed; + return NonMainDbSupportedStatements[i].DistObjectOperation == + MARK_DISTRIBUTED_GLOBALLY; } } @@ -1815,47 +1849,125 @@ StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree) /* - * MarkObjectDistributedFromNonMainDb marks the given object as distributed on the - * non-main database. + * StatementRequiresUnmarkDistributedLocallyFromNonMainDb returns true if the statement should be unmarked + * as distributed when executed from a non-main database. */ -static void -MarkObjectDistributedFromNonMainDb(Node *parsetree) +static bool +StatementRequiresUnmarkDistributedLocallyFromNonMainDb(Node *parsetree) { - MarkObjectDistributedParams markObjectDistributedParams = - GetMarkObjectDistributedParams(parsetree); - StringInfo mainDBQuery = makeStringInfo(); - appendStringInfo(mainDBQuery, - MARK_OBJECT_DISTRIBUTED, - markObjectDistributedParams.catalogRelId, - quote_literal_cstr(markObjectDistributedParams.name), - markObjectDistributedParams.id, - quote_literal_cstr(CurrentUserName())); - RunCitusMainDBQuery(mainDBQuery->data); + NodeTag type = nodeTag(parsetree); + + for (int i = 0; i < sizeof(NonMainDbSupportedStatements) / + sizeof(NonMainDbSupportedStatements[0]); i++) + { + if (type == NonMainDbSupportedStatements[i].statementType) + { + return NonMainDbSupportedStatements[i].DistObjectOperation == + UNMARK_DISTRIBUTED_LOCALLY; + } + } + + return false; } /* - * GetMarkObjectDistributedParams returns MarkObjectDistributedParams for the target + * MarkObjectDistributedGloballyFromNonMainDb marks the given object as distributed on the + * non-main database. + */ +static void +MarkObjectDistributedGloballyFromNonMainDb(Node *parsetree) +{ + List *distObjectOperationParams = + GetDistObjectOperationParams(parsetree); + + DistObjectOperationParams *distObjectOperationParam = NULL; + + foreach_ptr(distObjectOperationParam, distObjectOperationParams) + { + StringInfo mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + MARK_OBJECT_DISTRIBUTED, + distObjectOperationParam->catalogRelId, + quote_literal_cstr(distObjectOperationParam->name), + distObjectOperationParam->id, + quote_literal_cstr(CurrentUserName())); + RunCitusMainDBQuery(mainDBQuery->data); + } +} + + +/* + * UnMarkObjectDistributedLocallyFromNonMainDb unmarks the given object as distributed on the + * non-main database. + */ +static void +UnMarkObjectDistributedLocallyFromNonMainDb(List *markObjectDistributedParamList) +{ + DistObjectOperationParams *markObjectDistributedParam = NULL; + int subObjectId = 0; + char *checkObjectExistence = "false"; + foreach_ptr(markObjectDistributedParam, markObjectDistributedParamList) + { + StringInfo query = makeStringInfo(); + appendStringInfo(query, + UNMARK_OBJECT_DISTRIBUTED, + AuthIdRelationId, + markObjectDistributedParam->id, + subObjectId, checkObjectExistence); + RunCitusMainDBQuery(query->data); + } +} + + +/* + * GetDistObjectOperationParams returns DistObjectOperationParams for the target * object of given parsetree. */ -static MarkObjectDistributedParams -GetMarkObjectDistributedParams(Node *parsetree) +List * +GetDistObjectOperationParams(Node *parsetree) { + List *paramsList = NIL; if (IsA(parsetree, CreateRoleStmt)) { CreateRoleStmt *stmt = castNode(CreateRoleStmt, parsetree); - MarkObjectDistributedParams info = { - .name = stmt->role, - .catalogRelId = AuthIdRelationId, - .id = get_role_oid(stmt->role, false) - }; + DistObjectOperationParams *params = + (DistObjectOperationParams *) palloc(sizeof(DistObjectOperationParams)); + params->name = stmt->role; + params->catalogRelId = AuthIdRelationId; + params->id = get_role_oid(stmt->role, false); - return info; + paramsList = lappend(paramsList, params); + } + else if (IsA(parsetree, DropRoleStmt)) + { + DropRoleStmt *stmt = castNode(DropRoleStmt, parsetree); + RoleSpec *roleSpec; + foreach_ptr(roleSpec, stmt->roles) + { + DistObjectOperationParams *params = (DistObjectOperationParams *) palloc( + sizeof(DistObjectOperationParams)); + + Oid roleOid = get_role_oid(roleSpec->rolename, true); + + if (roleOid == InvalidOid) + { + continue; + } + + params->id = roleOid; + params->name = roleSpec->rolename; + params->catalogRelId = AuthIdRelationId; + + paramsList = lappend(paramsList, params); + } + } + else + { + elog(ERROR, "unsupported statement type"); } - /* Add else if branches for other statement types */ - - elog(ERROR, "unsupported statement type"); + return paramsList; } diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index 007d07bdc..ff5b2c7a9 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -98,10 +98,10 @@ mark_object_distributed(PG_FUNCTION_ARGS) /* - * citus_unmark_object_distributed(classid oid, objid oid, objsubid int) + * citus_unmark_object_distributed(classid oid, objid oid, objsubid int,checkobjectexistence bool) * - * removes the entry for an object address from pg_dist_object. Only removes the entry if - * the object does not exist anymore. + * Removes the entry for an object address from pg_dist_object. If checkobjectexistence is true, + * throws an error if the object still exists. */ Datum citus_unmark_object_distributed(PG_FUNCTION_ARGS) @@ -109,6 +109,12 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS) Oid classid = PG_GETARG_OID(0); Oid objid = PG_GETARG_OID(1); int32 objsubid = PG_GETARG_INT32(2); + bool checkObjectExistence = true; + if (!PG_ARGISNULL(3)) + { + checkObjectExistence = PG_GETARG_BOOL(3); + } + ObjectAddress address = { 0 }; ObjectAddressSubSet(address, classid, objid, objsubid); @@ -119,7 +125,7 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } - if (ObjectExists(&address)) + if (checkObjectExistence && ObjectExists(&address)) { ereport(ERROR, (errmsg("object still exists"), errdetail("the %s \"%s\" still exists", diff --git a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql index 68823b3be..2d5f88676 100644 --- a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql +++ b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql @@ -7,6 +7,8 @@ #include "udfs/start_management_transaction/12.2-1.sql" #include "udfs/execute_command_on_remote_nodes_as_user/12.2-1.sql" #include "udfs/mark_object_distributed/12.2-1.sql" +DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid, oid, int); +#include "udfs/citus_unmark_object_distributed/12.2-1.sql" #include "udfs/commit_management_command_2pc/12.2-1.sql" ALTER TABLE pg_catalog.pg_dist_transaction ADD COLUMN outer_xid xid8; diff --git a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql index 5b2828cfe..581c65ea8 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql @@ -18,6 +18,9 @@ DROP FUNCTION citus_internal.mark_object_distributed( classId Oid, objectName text, objectId Oid, connectionUser text ); +DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid,oid,int,boolean); +#include "../udfs/citus_unmark_object_distributed/10.0-1.sql" + DROP FUNCTION citus_internal.commit_management_command_2pc(); ALTER TABLE pg_catalog.pg_dist_transaction DROP COLUMN outer_xid; diff --git a/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/12.2-1.sql new file mode 100644 index 000000000..3c1b1bdec --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/12.2-1.sql @@ -0,0 +1,7 @@ +CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean DEFAULT true) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_unmark_object_distributed$$; +COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean) + IS 'Removes an object from citus.pg_dist_object after deletion. If checkobjectexistence is true, object existence check performed.' + 'Otherwise, object existence check is skipped.'; diff --git a/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql index 3f60c60c3..3c1b1bdec 100644 --- a/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql @@ -1,6 +1,7 @@ -CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int) +CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean DEFAULT true) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$citus_unmark_object_distributed$$; -COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int) - IS 'remove an object address from citus.pg_dist_object once the object has been deleted'; +COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean) + IS 'Removes an object from citus.pg_dist_object after deletion. If checkobjectexistence is true, object existence check performed.' + 'Otherwise, object existence check is skipped.'; diff --git a/src/test/regress/expected/metadata_sync_from_non_maindb.out b/src/test/regress/expected/metadata_sync_from_non_maindb.out index 91ca1c82d..6630b39bd 100644 --- a/src/test/regress/expected/metadata_sync_from_non_maindb.out +++ b/src/test/regress/expected/metadata_sync_from_non_maindb.out @@ -164,6 +164,149 @@ revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; \c regression drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; +--test for user operations +--test for create user +\c regression - - :master_port +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db - - :master_port +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; +\c metadata_sync_2pc_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; +create role test_role3; +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- XXX: date is not correct on one of the workers due to https://github.com/citusdata/citus/issues/7533 +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":"infinity"},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":"infinity"}] +(3 rows) + +--test for alter user +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; +\c metadata_sync_2pc_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- XXX: date is not correct on one of the workers due to https://github.com/citusdata/citus/issues/7533 +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":"infinity"},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":"infinity"}] +(3 rows) + +--test for drop user +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db - - :worker_1_port +DROP ROLE test_role1, "test_role2-needs\!escape"; +\c metadata_sync_2pc_db - - :master_port +DROP ROLE test_role3; +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- XXX: date is not correct on one of the workers due to https://github.com/citusdata/citus/issues/7533 +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":"infinity"},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":"infinity"}] +(3 rows) + +-- Clean up: drop the database on worker node 2 +\c regression - - :worker_2_port +DROP ROLE if exists test_role1, "test_role2-needs\!escape", test_role3; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + +(3 rows) + set citus.enable_create_database_propagation to on; drop database metadata_sync_2pc_db; drop schema metadata_sync_2pc_schema; diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 0aecd652f..aaafce715 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1420,37 +1420,39 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Snapshot of state at 12.2-1 ALTER EXTENSION citus UPDATE TO '12.2-1'; SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object + previous_object | current_object --------------------------------------------------------------------- - | function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) void - | function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) void - | function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) void - | function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") void - | function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) void - | function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) void - | function citus_internal.add_tenant_schema(oid,integer) void - | function citus_internal.adjust_local_clock_to_remote(cluster_clock) void - | function citus_internal.commit_management_command_2pc() void - | function citus_internal.database_command(text) void - | function citus_internal.delete_colocation_metadata(integer) void - | function citus_internal.delete_partition_metadata(regclass) void - | function citus_internal.delete_placement_metadata(bigint) void - | function citus_internal.delete_shard_metadata(bigint) void - | function citus_internal.delete_tenant_schema(oid) void - | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void - | function citus_internal.global_blocked_processes() SETOF record - | function citus_internal.is_replication_origin_tracking_active() boolean - | function citus_internal.local_blocked_processes() SETOF record - | function citus_internal.mark_node_not_synced(integer,integer) void - | function citus_internal.mark_object_distributed(oid,text,oid,text) void - | function citus_internal.start_management_transaction(xid8) void - | function citus_internal.start_replication_origin_tracking() void - | function citus_internal.stop_replication_origin_tracking() void - | function citus_internal.unregister_tenant_schema_globally(oid,text) void - | function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) void - | function citus_internal.update_placement_metadata(bigint,integer,integer) void - | function citus_internal.update_relation_colocation(oid,integer) void -(28 rows) + function citus_unmark_object_distributed(oid,oid,integer) void | + | function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) void + | function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) void + | function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) void + | function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") void + | function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) void + | function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) void + | function citus_internal.add_tenant_schema(oid,integer) void + | function citus_internal.adjust_local_clock_to_remote(cluster_clock) void + | function citus_internal.commit_management_command_2pc() void + | function citus_internal.database_command(text) void + | function citus_internal.delete_colocation_metadata(integer) void + | function citus_internal.delete_partition_metadata(regclass) void + | function citus_internal.delete_placement_metadata(bigint) void + | function citus_internal.delete_shard_metadata(bigint) void + | function citus_internal.delete_tenant_schema(oid) void + | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void + | function citus_internal.global_blocked_processes() SETOF record + | function citus_internal.is_replication_origin_tracking_active() boolean + | function citus_internal.local_blocked_processes() SETOF record + | function citus_internal.mark_node_not_synced(integer,integer) void + | function citus_internal.mark_object_distributed(oid,text,oid,text) void + | function citus_internal.start_management_transaction(xid8) void + | function citus_internal.start_replication_origin_tracking() void + | function citus_internal.stop_replication_origin_tracking() void + | function citus_internal.unregister_tenant_schema_globally(oid,text) void + | function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) void + | function citus_internal.update_placement_metadata(bigint,integer,integer) void + | function citus_internal.update_relation_colocation(oid,integer) void + | function citus_unmark_object_distributed(oid,oid,integer,boolean) void +(30 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/role_operations_from_non_maindb.out b/src/test/regress/expected/role_operations_from_non_maindb.out new file mode 100644 index 000000000..3b51c89b0 --- /dev/null +++ b/src/test/regress/expected/role_operations_from_non_maindb.out @@ -0,0 +1,138 @@ +-- Create a new database +set citus.enable_create_database_propagation to on; +CREATE DATABASE role_operations_test_db; +SET citus.superuser TO 'postgres'; +-- Connect to the new database +\c role_operations_test_db +-- Test CREATE ROLE with various options +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; +\c role_operations_test_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"}] +(3 rows) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_dist_object d + JOIN pg_roles r ON d.objid = r.oid + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape') + order by r.rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1"},{"rolname":"test_role2-needs\\!escape"}] + [{"rolname":"test_role1"},{"rolname":"test_role2-needs\\!escape"}] + [{"rolname":"test_role1"},{"rolname":"test_role2-needs\\!escape"}] +(3 rows) + +\c role_operations_test_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; +\c role_operations_test_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"}] +(3 rows) + +\c role_operations_test_db - - :master_port +-- Test DROP ROLE +DROP ROLE no_such_role; -- fails nicely +ERROR: role "no_such_role" does not exist +DROP ROLE IF EXISTS no_such_role; -- doesn't fail +NOTICE: role "no_such_role" does not exist, skipping +CREATE ROLE new_role; +DROP ROLE IF EXISTS no_such_role, new_role; -- doesn't fail +NOTICE: role "no_such_role" does not exist, skipping +DROP ROLE IF EXISTS test_role1, "test_role2-needs\!escape"; +\c regression - - :master_port +--verify that roles and dist_object are dropped +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + +(3 rows) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_roles r + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + order by r.rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT count(*) leaked_pg_dist_object_records_for_roles + FROM pg_dist_object LEFT JOIN pg_authid ON (objid = oid) + WHERE classid = 1260 AND oid IS NULL +$$); + result +--------------------------------------------------------------------- + 0 + 0 + 0 +(3 rows) + +-- Clean up: drop the database +set citus.enable_create_database_propagation to on; +DROP DATABASE role_operations_test_db; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 4f17695be..ca31b222b 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -174,7 +174,7 @@ ORDER BY 1; function citus_text_send_as_jsonb(text) function citus_total_relation_size(regclass,boolean) function citus_truncate_trigger() - function citus_unmark_object_distributed(oid,oid,integer) + function citus_unmark_object_distributed(oid,oid,integer,boolean) function citus_update_node(integer,text,integer,boolean,integer) function citus_update_shard_statistics(bigint) function citus_update_table_statistics(regclass) diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 85de7b8b8..3fec50aac 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -108,7 +108,7 @@ test: object_propagation_debug test: undistribute_table test: run_command_on_all_nodes test: background_task_queue_monitor -test: other_databases grant_role_from_non_maindb seclabel_non_maindb +test: other_databases grant_role_from_non_maindb role_operations_from_non_maindb seclabel_non_maindb test: citus_internal_access # Causal clock test diff --git a/src/test/regress/sql/metadata_sync_from_non_maindb.sql b/src/test/regress/sql/metadata_sync_from_non_maindb.sql index 93445be27..67e1e98d1 100644 --- a/src/test/regress/sql/metadata_sync_from_non_maindb.sql +++ b/src/test/regress/sql/metadata_sync_from_non_maindb.sql @@ -80,9 +80,112 @@ revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; \c regression drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; +--test for user operations + +--test for create user +\c regression - - :master_port +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db - - :master_port +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; + +\c metadata_sync_2pc_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; + +create role test_role3; + +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + +-- XXX: date is not correct on one of the workers due to https://github.com/citusdata/citus/issues/7533 +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +--test for alter user +select 1 from citus_remove_node('localhost', :worker_2_port); +\c metadata_sync_2pc_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; + +\c metadata_sync_2pc_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; + +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + +-- XXX: date is not correct on one of the workers due to https://github.com/citusdata/citus/issues/7533 +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +--test for drop user +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db - - :worker_1_port +DROP ROLE test_role1, "test_role2-needs\!escape"; + +\c metadata_sync_2pc_db - - :master_port +DROP ROLE test_role3; + +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + +-- XXX: date is not correct on one of the workers due to https://github.com/citusdata/citus/issues/7533 +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +-- Clean up: drop the database on worker node 2 +\c regression - - :worker_2_port +DROP ROLE if exists test_role1, "test_role2-needs\!escape", test_role3; + +\c regression - - :master_port + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + set citus.enable_create_database_propagation to on; drop database metadata_sync_2pc_db; drop schema metadata_sync_2pc_schema; - reset citus.enable_create_database_propagation; reset search_path; diff --git a/src/test/regress/sql/role_operations_from_non_maindb.sql b/src/test/regress/sql/role_operations_from_non_maindb.sql new file mode 100644 index 000000000..5f569208b --- /dev/null +++ b/src/test/regress/sql/role_operations_from_non_maindb.sql @@ -0,0 +1,106 @@ +-- Create a new database +set citus.enable_create_database_propagation to on; +CREATE DATABASE role_operations_test_db; +SET citus.superuser TO 'postgres'; +-- Connect to the new database +\c role_operations_test_db +-- Test CREATE ROLE with various options +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; + +\c role_operations_test_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; + +\c regression - - :master_port + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_dist_object d + JOIN pg_roles r ON d.objid = r.oid + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape') + order by r.rolname + ) t +$$); + +\c role_operations_test_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; + +\c role_operations_test_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; + +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + +\c role_operations_test_db - - :master_port +-- Test DROP ROLE +DROP ROLE no_such_role; -- fails nicely +DROP ROLE IF EXISTS no_such_role; -- doesn't fail + +CREATE ROLE new_role; +DROP ROLE IF EXISTS no_such_role, new_role; -- doesn't fail +DROP ROLE IF EXISTS test_role1, "test_role2-needs\!escape"; + +\c regression - - :master_port +--verify that roles and dist_object are dropped +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + ORDER BY rolname + ) t +$$); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_roles r + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + order by r.rolname + ) t +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT count(*) leaked_pg_dist_object_records_for_roles + FROM pg_dist_object LEFT JOIN pg_authid ON (objid = oid) + WHERE classid = 1260 AND oid IS NULL +$$); + +-- Clean up: drop the database +set citus.enable_create_database_propagation to on; +DROP DATABASE role_operations_test_db; +reset citus.enable_create_database_propagation; From d59c93bc504ad32621d66583de6b65f936b0ed13 Mon Sep 17 00:00:00 2001 From: sminux Date: Tue, 5 Mar 2024 10:49:35 +0300 Subject: [PATCH 10/14] fix bad copy-paste rightComparisonLimit (#7547) DESCRIPTION: change for #7543 --- src/backend/columnar/columnar_tableam.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 40486d08f..ca3a5f4c4 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -2946,7 +2946,7 @@ MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion) } else { - rightComparisionLimit = strlen(leftVersion); + rightComparisionLimit = strlen(rightVersion); } /* we can error out early if hypens are not in the same position */ From edcdbe67b11c6ec5023dd18e537f3b876f51187d Mon Sep 17 00:00:00 2001 From: eaydingol <60466783+eaydingol@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:46:49 +0300 Subject: [PATCH 11/14] Fix: store the previous shard cost for order verification (#7550) Store the previous shard cost so that the invariant checking performs as expected. --- src/backend/distributed/operations/shard_rebalancer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index d1868d3c4..03dc4c1b8 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -384,6 +384,7 @@ CheckRebalanceStateInvariants(const RebalanceState *state) Assert(shardCost->cost <= prevShardCost->cost); } totalCost += shardCost->cost; + prevShardCost = shardCost; } /* Check that utilization field is up to date. */ From f0043b64a17382e340bed5c61ec0025d0af34379 Mon Sep 17 00:00:00 2001 From: Karina <55838532+Green-Chan@users.noreply.github.com> Date: Thu, 7 Mar 2024 13:08:19 +0300 Subject: [PATCH 12/14] Fix server crash when trying to execute activate_node_snapshot() on a single-node cluster (#7552) This fixes #7551 reported by Egor Chindyaskin Function activate_node_snapshot() is not meant to be called on a cluster without worker nodes. This commit adds ERROR report for such case to prevent server crash. --- src/backend/distributed/test/metadata_sync.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/backend/distributed/test/metadata_sync.c b/src/backend/distributed/test/metadata_sync.c index dec20c772..ce025cff9 100644 --- a/src/backend/distributed/test/metadata_sync.c +++ b/src/backend/distributed/test/metadata_sync.c @@ -50,6 +50,13 @@ activate_node_snapshot(PG_FUNCTION_ARGS) * so we are using first primary worker node just for test purposes. */ WorkerNode *dummyWorkerNode = GetFirstPrimaryWorkerNode(); + if (dummyWorkerNode == NULL) + { + ereport(ERROR, (errmsg("no worker nodes found"), + errdetail("Function activate_node_snapshot is meant to be " + "used when running tests on a multi-node cluster " + "with workers."))); + } /* * Create MetadataSyncContext which is used throughout nodes' activation. From 12f56438fc85a676a79b1a2886cc69ab872c9d14 Mon Sep 17 00:00:00 2001 From: copetol <40788226+copetol@users.noreply.github.com> Date: Fri, 8 Mar 2024 16:21:42 +0300 Subject: [PATCH 13/14] Fix segfault when using certain DO block in function (#7554) When using a CASE WHEN expression in the body of the function that is used in the DO block, a segmentation fault occured. This fixes that. Fixes #7381 --------- Co-authored-by: Konstantin Morozov --- .../planner/function_call_delegation.c | 4 +++ .../expected/function_with_case_when.out | 32 +++++++++++++++++++ src/test/regress/multi_schedule | 1 + .../regress/sql/function_with_case_when.sql | 27 ++++++++++++++++ 4 files changed, 64 insertions(+) create mode 100644 src/test/regress/expected/function_with_case_when.out create mode 100644 src/test/regress/sql/function_with_case_when.sql diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index f17b02347..4a79dc25a 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -91,6 +91,10 @@ bool InDelegatedFunctionCall = false; static bool contain_param_walker(Node *node, void *context) { + if (node == NULL) + { + return false; + } if (IsA(node, Param)) { Param *paramNode = (Param *) node; diff --git a/src/test/regress/expected/function_with_case_when.out b/src/test/regress/expected/function_with_case_when.out new file mode 100644 index 000000000..18df5be0a --- /dev/null +++ b/src/test/regress/expected/function_with_case_when.out @@ -0,0 +1,32 @@ +CREATE SCHEMA function_with_case; +SET search_path TO function_with_case; +-- create function +CREATE OR REPLACE FUNCTION test_err(v1 text) + RETURNS text + LANGUAGE plpgsql + SECURITY DEFINER +AS $function$ + +begin + return v1 || ' - ok'; +END; +$function$; +do $$ declare + lNewValues text; + val text; +begin + val = 'test'; + lNewValues = test_err(v1 => case when val::text = 'test'::text then 'yes' else 'no' end); + raise notice 'lNewValues= %', lNewValues; +end;$$ ; +NOTICE: lNewValues= yes - ok +CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE +-- call function +SELECT test_err('test'); + test_err +--------------------------------------------------------------------- + test - ok +(1 row) + +DROP SCHEMA function_with_case CASCADE; +NOTICE: drop cascades to function test_err(text) diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 3fec50aac..67a6e23a8 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -110,6 +110,7 @@ test: run_command_on_all_nodes test: background_task_queue_monitor test: other_databases grant_role_from_non_maindb role_operations_from_non_maindb seclabel_non_maindb test: citus_internal_access +test: function_with_case_when # Causal clock test test: clock diff --git a/src/test/regress/sql/function_with_case_when.sql b/src/test/regress/sql/function_with_case_when.sql new file mode 100644 index 000000000..03c6678e4 --- /dev/null +++ b/src/test/regress/sql/function_with_case_when.sql @@ -0,0 +1,27 @@ +CREATE SCHEMA function_with_case; +SET search_path TO function_with_case; + +-- create function +CREATE OR REPLACE FUNCTION test_err(v1 text) + RETURNS text + LANGUAGE plpgsql + SECURITY DEFINER +AS $function$ + +begin + return v1 || ' - ok'; +END; +$function$; +do $$ declare + lNewValues text; + val text; +begin + val = 'test'; + lNewValues = test_err(v1 => case when val::text = 'test'::text then 'yes' else 'no' end); + raise notice 'lNewValues= %', lNewValues; +end;$$ ; + +-- call function +SELECT test_err('test'); + +DROP SCHEMA function_with_case CASCADE; From 8afa2d0386ff303be29060b709f88994b9648b6c Mon Sep 17 00:00:00 2001 From: eaydingol <60466783+eaydingol@users.noreply.github.com> Date: Sun, 10 Mar 2024 10:20:08 +0300 Subject: [PATCH 14/14] Change the order in which the locks are acquired (#7542) This PR changes the order in which the locks are acquired (for the target and reference tables), when a modify request is initiated from a worker node that is not the "FirstWorkerNode". To prevent concurrent writes, locks are acquired on the first worker node for the replicated tables. When the update statement originates from the first worker node, it acquires the lock on the reference table(s) first, followed by the target table(s). However, if the update statement is initiated in another worker node, the lock requests are sent to the first worker in a different order. This PR unifies the modification order on the first worker node. With the third commit, independent of the node that received the request, the locks are acquired for the modified table and then the reference tables on the first node. The first commit shows a sample output for the test prior to the fix. Fixes #7477 --------- Co-authored-by: Jelte Fennema-Nio --- src/backend/distributed/utils/resource_lock.c | 28 ++++++--- src/test/regress/expected/issue_7477.out | 62 +++++++++++++++++++ src/test/regress/multi_schedule | 2 +- src/test/regress/sql/issue_7477.sql | 44 +++++++++++++ 4 files changed, 127 insertions(+), 9 deletions(-) create mode 100644 src/test/regress/expected/issue_7477.out create mode 100644 src/test/regress/sql/issue_7477.sql diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index 13e88a16e..8ac269e43 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -707,13 +707,27 @@ SerializeNonCommutativeWrites(List *shardIntervalList, LOCKMODE lockMode) } List *replicatedShardList = NIL; - if (AnyTableReplicated(shardIntervalList, &replicatedShardList)) - { - if (ClusterHasKnownMetadataWorkers() && !IsFirstWorkerNode()) - { - LockShardListResourcesOnFirstWorker(lockMode, replicatedShardList); - } + bool anyTableReplicated = AnyTableReplicated(shardIntervalList, &replicatedShardList); + /* + * Acquire locks on the modified table. + * If the table is replicated, the locks are first acquired on the first worker node then locally. + * But if we're already on the first worker, acquiring on the first worker node and locally are the same operation. + * So we only acquire locally in that case. + */ + if (anyTableReplicated && ClusterHasKnownMetadataWorkers() && !IsFirstWorkerNode()) + { + LockShardListResourcesOnFirstWorker(lockMode, replicatedShardList); + } + LockShardListResources(shardIntervalList, lockMode); + + /* + * Next, acquire locks on the reference tables that are referenced by a foreign key if there are any. + * Note that LockReferencedReferenceShardResources() first acquires locks on the first worker, + * then locally. + */ + if (anyTableReplicated) + { ShardInterval *firstShardInterval = (ShardInterval *) linitial(replicatedShardList); if (ReferenceTableShardId(firstShardInterval->shardId)) @@ -728,8 +742,6 @@ SerializeNonCommutativeWrites(List *shardIntervalList, LOCKMODE lockMode) LockReferencedReferenceShardResources(firstShardInterval->shardId, lockMode); } } - - LockShardListResources(shardIntervalList, lockMode); } diff --git a/src/test/regress/expected/issue_7477.out b/src/test/regress/expected/issue_7477.out new file mode 100644 index 000000000..224d85c6e --- /dev/null +++ b/src/test/regress/expected/issue_7477.out @@ -0,0 +1,62 @@ +--- Test for updating a table that has a foreign key reference to another reference table. +--- Issue #7477: Distributed deadlock after issuing a simple UPDATE statement +--- https://github.com/citusdata/citus/issues/7477 +CREATE TABLE table1 (id INT PRIMARY KEY); +SELECT create_reference_table('table1'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO table1 VALUES (1); +CREATE TABLE table2 ( + id INT, + info TEXT, + CONSTRAINT table1_id_fk FOREIGN KEY (id) REFERENCES table1 (id) + ); +SELECT create_reference_table('table2'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO table2 VALUES (1, 'test'); +--- Runs the update command in parallel on workers. +--- Due to bug #7477, before the fix, the result is non-deterministic +--- and have several rows of the form: +--- localhost | 57638 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: canceling the transaction since it was involved in a distributed deadlock +SELECT * FROM master_run_on_worker( + ARRAY['localhost', 'localhost','localhost', 'localhost','localhost', + 'localhost','localhost', 'localhost','localhost', 'localhost']::text[], + ARRAY[57638, 57637, 57637, 57638, 57637, 57638, 57637, 57638, 57638, 57637]::int[], + ARRAY['UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1' + ]::text[], + true); + node_name | node_port | success | result +--------------------------------------------------------------------- + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 +(10 rows) + +--- cleanup +DROP TABLE table2; +DROP TABLE table1; diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 67a6e23a8..af5921e60 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -103,7 +103,7 @@ test: multi_dropped_column_aliases foreign_key_restriction_enforcement test: binary_protocol test: alter_table_set_access_method test: alter_distributed_table -test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 +test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 issue_7477 test: object_propagation_debug test: undistribute_table test: run_command_on_all_nodes diff --git a/src/test/regress/sql/issue_7477.sql b/src/test/regress/sql/issue_7477.sql new file mode 100644 index 000000000..b9c1578e9 --- /dev/null +++ b/src/test/regress/sql/issue_7477.sql @@ -0,0 +1,44 @@ + +--- Test for updating a table that has a foreign key reference to another reference table. +--- Issue #7477: Distributed deadlock after issuing a simple UPDATE statement +--- https://github.com/citusdata/citus/issues/7477 + +CREATE TABLE table1 (id INT PRIMARY KEY); +SELECT create_reference_table('table1'); +INSERT INTO table1 VALUES (1); + +CREATE TABLE table2 ( + id INT, + info TEXT, + CONSTRAINT table1_id_fk FOREIGN KEY (id) REFERENCES table1 (id) + ); +SELECT create_reference_table('table2'); +INSERT INTO table2 VALUES (1, 'test'); + +--- Runs the update command in parallel on workers. +--- Due to bug #7477, before the fix, the result is non-deterministic +--- and have several rows of the form: +--- localhost | 57638 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: canceling the transaction since it was involved in a distributed deadlock + +SELECT * FROM master_run_on_worker( + ARRAY['localhost', 'localhost','localhost', 'localhost','localhost', + 'localhost','localhost', 'localhost','localhost', 'localhost']::text[], + ARRAY[57638, 57637, 57637, 57638, 57637, 57638, 57637, 57638, 57638, 57637]::int[], + ARRAY['UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1' + ]::text[], + true); + +--- cleanup +DROP TABLE table2; +DROP TABLE table1;