From 3a6fdada111c1eb70d1f1d594d70af6a667cf601 Mon Sep 17 00:00:00 2001 From: gindibay Date: Sat, 14 Oct 2023 05:35:34 +0300 Subject: [PATCH 01/60] Changes if to switch statements --- .../distributed/deparser/citus_deparseutils.c | 62 ++++++++++--------- 1 file changed, 32 insertions(+), 30 deletions(-) diff --git a/src/backend/distributed/deparser/citus_deparseutils.c b/src/backend/distributed/deparser/citus_deparseutils.c index 52d96930e..f96244a27 100644 --- a/src/backend/distributed/deparser/citus_deparseutils.c +++ b/src/backend/distributed/deparser/citus_deparseutils.c @@ -31,36 +31,38 @@ optionToStatement(StringInfo buf, DefElem *option, const struct { if (strcmp(name, opt_formats[i].name) == 0) { - if (opt_formats[i].type == OPTION_FORMAT_STRING) - { - char *value = defGetString(option); - appendStringInfo(buf, opt_formats[i].format, quote_identifier(value)); - } - else if (opt_formats[i].type == OPTION_FORMAT_INTEGER) - { - int32 value = defGetInt32(option); - appendStringInfo(buf, opt_formats[i].format, value); - } - else if (opt_formats[i].type == OPTION_FORMAT_BOOLEAN) - { - bool value = defGetBoolean(option); - appendStringInfo(buf, opt_formats[i].format, value ? "true" : "false"); - } -#if PG_VERSION_NUM >= PG_VERSION_15 - else if (opt_formats[i].type == OPTION_FORMAT_OBJECT_ID) - { - Oid value = defGetObjectId(option); - appendStringInfo(buf, opt_formats[i].format, value); - } -#endif - else if (opt_formats[i].type == OPTION_FORMAT_LITERAL_CSTR) - { - char *value = defGetString(option); - appendStringInfo(buf, opt_formats[i].format, quote_literal_cstr(value)); - } - else - { - elog(ERROR, "unrecognized option type: %d", opt_formats[i].type); + switch (opt_formats[i].type) { + case OPTION_FORMAT_STRING: { + char *value = defGetString(option); + appendStringInfo(buf, opt_formats[i].format, quote_identifier(value)); + break; + } + case OPTION_FORMAT_INTEGER: { + int32 value = defGetInt32(option); + appendStringInfo(buf, opt_formats[i].format, value); + break; + } + case OPTION_FORMAT_BOOLEAN: { + bool value = defGetBoolean(option); + appendStringInfo(buf, opt_formats[i].format, value ? "true" : "false"); + break; + } + #if PG_VERSION_NUM >= PG_VERSION_15 + case OPTION_FORMAT_OBJECT_ID: { + Oid value = defGetObjectId(option); + appendStringInfo(buf, opt_formats[i].format, value); + break; + } + #endif + case OPTION_FORMAT_LITERAL_CSTR: { + char *value = defGetString(option); + appendStringInfo(buf, opt_formats[i].format, quote_literal_cstr(value)); + break; + } + default: { + elog(ERROR, "unrecognized option type: %d", opt_formats[i].type); + break; + } } break; } From 674fd3226c293e94c3fe56ac0739a2b7011a843b Mon Sep 17 00:00:00 2001 From: gindibay Date: Sat, 14 Oct 2023 19:50:32 +0300 Subject: [PATCH 02/60] Sets enable_create_database_propagation true --- src/backend/distributed/shared_library_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 00cdb0027..886517464 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -1268,7 +1268,7 @@ RegisterCitusConfigVariables(void) "and DROP DATABASE statements to workers"), NULL, &EnableCreateDatabasePropagation, - false, + true, PGC_USERSET, GUC_STANDARD, NULL, NULL, NULL); From c6d1ef9e41a25255bc8ca082680cd58f6866d5f0 Mon Sep 17 00:00:00 2001 From: gindibay Date: Sat, 14 Oct 2023 20:19:18 +0300 Subject: [PATCH 03/60] Rollbacks enable_create_database_propagation --- .../distributed/deparser/citus_deparseutils.c | 33 +++++++++++++------ src/backend/distributed/shared_library_init.c | 2 +- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/backend/distributed/deparser/citus_deparseutils.c b/src/backend/distributed/deparser/citus_deparseutils.c index f96244a27..6492c14f2 100644 --- a/src/backend/distributed/deparser/citus_deparseutils.c +++ b/src/backend/distributed/deparser/citus_deparseutils.c @@ -31,40 +31,53 @@ optionToStatement(StringInfo buf, DefElem *option, const struct { if (strcmp(name, opt_formats[i].name) == 0) { - switch (opt_formats[i].type) { - case OPTION_FORMAT_STRING: { + switch (opt_formats[i].type) + { + case OPTION_FORMAT_STRING: + { char *value = defGetString(option); appendStringInfo(buf, opt_formats[i].format, quote_identifier(value)); break; } - case OPTION_FORMAT_INTEGER: { + + case OPTION_FORMAT_INTEGER: + { int32 value = defGetInt32(option); appendStringInfo(buf, opt_formats[i].format, value); break; } - case OPTION_FORMAT_BOOLEAN: { + + case OPTION_FORMAT_BOOLEAN: + { bool value = defGetBoolean(option); - appendStringInfo(buf, opt_formats[i].format, value ? "true" : "false"); + appendStringInfo(buf, opt_formats[i].format, value ? "true" : + "false"); break; } + #if PG_VERSION_NUM >= PG_VERSION_15 - case OPTION_FORMAT_OBJECT_ID: { + case OPTION_FORMAT_OBJECT_ID: + { Oid value = defGetObjectId(option); appendStringInfo(buf, opt_formats[i].format, value); break; } + #endif - case OPTION_FORMAT_LITERAL_CSTR: { + case OPTION_FORMAT_LITERAL_CSTR: + { char *value = defGetString(option); - appendStringInfo(buf, opt_formats[i].format, quote_literal_cstr(value)); + appendStringInfo(buf, opt_formats[i].format, quote_literal_cstr( + value)); break; } - default: { + + default: + { elog(ERROR, "unrecognized option type: %d", opt_formats[i].type); break; } } - break; } } } diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 886517464..00cdb0027 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -1268,7 +1268,7 @@ RegisterCitusConfigVariables(void) "and DROP DATABASE statements to workers"), NULL, &EnableCreateDatabasePropagation, - true, + false, PGC_USERSET, GUC_STANDARD, NULL, NULL, NULL); From a497a782399eae23178dd038a5a7686823c359a1 Mon Sep 17 00:00:00 2001 From: gindibay Date: Sat, 14 Oct 2023 20:39:39 +0300 Subject: [PATCH 04/60] Fixes review comments --- .../regress/expected/create_drop_database_propagation.out | 6 +++--- src/test/regress/sql/create_drop_database_propagation.sql | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index 37829a6ee..32552d01a 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -21,15 +21,15 @@ CREATE DATABASE mydatabase IS_TEMPLATE = false; SELECT pd.datname, pd.encoding, pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, +pd.datcollate , pd. datctype , pd.datacl, pa.rolname AS database_owner, pt.spcname AS tablespace FROM pg_database pd JOIN pg_authid pa ON pd.datdba = pa.oid join pg_tablespace pt on pd.dattablespace = pt.oid WHERE datname = 'mydatabase'; - datname | encoding | datistemplate | datallowconn | datconnlimit | datcollate | datctype | datacl | database_owner | database_owner | tablespace + datname | encoding | datistemplate | datallowconn | datconnlimit | datcollate | datctype | datacl | database_owner | tablespace --------------------------------------------------------------------- - mydatabase | 6 | f | t | 10 | C | C | | create_drop_db_test_user | create_drop_db_test_user | create_drop_db_tablespace + mydatabase | 6 | f | t | 10 | C | C | | create_drop_db_test_user | create_drop_db_tablespace (1 row) \c - - - :worker_1_port diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index d84654054..d21956595 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -29,7 +29,7 @@ CREATE DATABASE mydatabase SELECT pd.datname, pd.encoding, pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, +pd.datcollate , pd. datctype , pd.datacl, pa.rolname AS database_owner, pt.spcname AS tablespace FROM pg_database pd JOIN pg_authid pa ON pd.datdba = pa.oid From 1e91b144e2ca5a16cbc1d66f6b10a1b1ced05ac4 Mon Sep 17 00:00:00 2001 From: gindibay Date: Sat, 14 Oct 2023 20:49:26 +0300 Subject: [PATCH 05/60] Fixed review issues --- .../distributed/sql/citus--12.1-1--12.2-1.sql | 14 +------------- .../citus_internal_database_command/12.2-1.sql | 10 ++++++++++ .../citus_internal_database_command/latest.sql | 10 ++++++++++ 3 files changed, 21 insertions(+), 13 deletions(-) create mode 100644 src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql create mode 100644 src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql diff --git a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql index 5d869f40e..578a182ef 100644 --- a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql +++ b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql @@ -1,16 +1,4 @@ -- citus--12.1-1--12.2-1 - --- --- citus_internal_database_command creates a database according to the given command. - -CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text) - RETURNS void - LANGUAGE C - STRICT -AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; -COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS - 'run a database command without transaction block restrictions'; - -- bump version to 12.2-1 - +#include "udfs/citus_internal_database_command/12.2-1.sql" #include "udfs/citus_add_rebalance_strategy/12.2-1.sql" diff --git a/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql new file mode 100644 index 000000000..232e3ad14 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql @@ -0,0 +1,10 @@ +-- +-- citus_internal_database_command creates a database according to the given command. + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text) + RETURNS void + LANGUAGE C + STRICT +AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS + 'run a database command without transaction block restrictions'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql new file mode 100644 index 000000000..232e3ad14 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql @@ -0,0 +1,10 @@ +-- +-- citus_internal_database_command creates a database according to the given command. + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text) + RETURNS void + LANGUAGE C + STRICT +AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS + 'run a database command without transaction block restrictions'; From 71a4633dad767154668975e4482c08d493b685cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Tue, 17 Oct 2023 16:39:37 +0300 Subject: [PATCH 06/60] Fixes typo and renames multi_process_utility (#7259) --- src/backend/distributed/commands/index.c | 6 +-- .../distributed/commands/utility_hook.c | 40 +++++++++---------- src/backend/distributed/commands/vacuum.c | 4 +- .../executor/executor_util_tasks.c | 4 +- src/backend/distributed/shared_library_init.c | 2 +- .../distributed/utils/citus_copyfuncs.c | 2 +- .../distributed/utils/citus_outfuncs.c | 2 +- .../distributed/commands/utility_hook.h | 2 +- .../distributed/multi_physical_planner.h | 2 +- 9 files changed, 32 insertions(+), 32 deletions(-) diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 8271cc4f4..275f253b3 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -938,7 +938,7 @@ CreateIndexTaskList(IndexStmt *indexStmt) task->dependentTaskList = NULL; task->anchorShardId = shardId; task->taskPlacementList = ActiveShardPlacementList(shardId); - task->cannotBeExecutedInTransction = indexStmt->concurrent; + task->cannotBeExecutedInTransaction = indexStmt->concurrent; taskList = lappend(taskList, task); @@ -983,7 +983,7 @@ CreateReindexTaskList(Oid relationId, ReindexStmt *reindexStmt) task->dependentTaskList = NULL; task->anchorShardId = shardId; task->taskPlacementList = ActiveShardPlacementList(shardId); - task->cannotBeExecutedInTransction = + task->cannotBeExecutedInTransaction = IsReindexWithParam_compat(reindexStmt, "concurrently"); taskList = lappend(taskList, task); @@ -1309,7 +1309,7 @@ DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt) task->dependentTaskList = NULL; task->anchorShardId = shardId; task->taskPlacementList = ActiveShardPlacementList(shardId); - task->cannotBeExecutedInTransction = dropStmt->concurrent; + task->cannotBeExecutedInTransaction = dropStmt->concurrent; taskList = lappend(taskList, task); diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index cf8e0644e..579b6979e 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -95,13 +95,13 @@ int UtilityHookLevel = 0; /* Local functions forward declarations for helper functions */ -static void ProcessUtilityInternal(PlannedStmt *pstmt, - const char *queryString, - ProcessUtilityContext context, - ParamListInfo params, - struct QueryEnvironment *queryEnv, - DestReceiver *dest, - QueryCompletion *completionTag); +static void citus_ProcessUtilityInternal(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + struct QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *completionTag); static void set_indexsafe_procflags(void); static char * CurrentSearchPath(void); static void IncrementUtilityHookCountersIfNecessary(Node *parsetree); @@ -130,7 +130,7 @@ ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityConte /* - * multi_ProcessUtility is the main entry hook for implementing Citus-specific + * citus_ProcessUtility is the main entry hook for implementing Citus-specific * utility behavior. Its primary responsibilities are intercepting COPY and DDL * commands and augmenting the coordinator's command with corresponding tasks * to be run on worker nodes, after suitably ensuring said commands' options @@ -139,7 +139,7 @@ ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityConte * TRUNCATE and VACUUM are also supported. */ void -multi_ProcessUtility(PlannedStmt *pstmt, +citus_ProcessUtility(PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, ProcessUtilityContext context, @@ -329,8 +329,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, PG_TRY(); { - ProcessUtilityInternal(pstmt, queryString, context, params, queryEnv, dest, - completionTag); + citus_ProcessUtilityInternal(pstmt, queryString, context, params, queryEnv, dest, + completionTag); if (UtilityHookLevel == 1) { @@ -404,7 +404,7 @@ multi_ProcessUtility(PlannedStmt *pstmt, /* - * ProcessUtilityInternal is a helper function for multi_ProcessUtility where majority + * citus_ProcessUtilityInternal is a helper function for citus_ProcessUtility where majority * of the Citus specific utility statements are handled here. The distinction between * both functions is that Citus_ProcessUtility does not handle CALL and DO statements. * The reason for the distinction is implemented to be able to find the "top-level" DDL @@ -412,13 +412,13 @@ multi_ProcessUtility(PlannedStmt *pstmt, * this goal. */ static void -ProcessUtilityInternal(PlannedStmt *pstmt, - const char *queryString, - ProcessUtilityContext context, - ParamListInfo params, - struct QueryEnvironment *queryEnv, - DestReceiver *dest, - QueryCompletion *completionTag) +citus_ProcessUtilityInternal(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + struct QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *completionTag) { Node *parsetree = pstmt->utilityStmt; List *ddlJobs = NIL; @@ -1386,7 +1386,7 @@ PostStandardProcessUtility(Node *parsetree) * on the local table first. However, in order to decide whether the * command leads to an invalidation, we need to check before the command * is being executed since we read pg_constraint table. Thus, we maintain a - * local flag and do the invalidation after multi_ProcessUtility, + * local flag and do the invalidation after citus_ProcessUtility, * before ExecuteDistributedDDLJob(). */ InvalidateForeignKeyGraphForDDL(); diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index ee03aeae1..21638ba7f 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -279,7 +279,7 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum task->replicationModel = REPLICATION_MODEL_INVALID; task->anchorShardId = shardId; task->taskPlacementList = ActiveShardPlacementList(shardId); - task->cannotBeExecutedInTransction = ((vacuumParams.options) & VACOPT_VACUUM); + task->cannotBeExecutedInTransaction = ((vacuumParams.options) & VACOPT_VACUUM); taskList = lappend(taskList, task); } @@ -719,7 +719,7 @@ ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumPa SetTaskQueryStringList(task, unqualifiedVacuumCommands); task->dependentTaskList = NULL; task->replicationModel = REPLICATION_MODEL_INVALID; - task->cannotBeExecutedInTransction = ((vacuumParams.options) & VACOPT_VACUUM); + task->cannotBeExecutedInTransaction = ((vacuumParams.options) & VACOPT_VACUUM); bool hasPeerWorker = false; diff --git a/src/backend/distributed/executor/executor_util_tasks.c b/src/backend/distributed/executor/executor_util_tasks.c index abf721196..483fd55a7 100644 --- a/src/backend/distributed/executor/executor_util_tasks.c +++ b/src/backend/distributed/executor/executor_util_tasks.c @@ -61,7 +61,7 @@ TaskListRequiresRollback(List *taskList) } Task *task = (Task *) linitial(taskList); - if (task->cannotBeExecutedInTransction) + if (task->cannotBeExecutedInTransaction) { /* vacuum, create index concurrently etc. */ return false; @@ -164,7 +164,7 @@ TaskListCannotBeExecutedInTransaction(List *taskList) Task *task = NULL; foreach_ptr(task, taskList) { - if (task->cannotBeExecutedInTransction) + if (task->cannotBeExecutedInTransaction) { return true; } diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index e5d593295..1ac20c8bc 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -543,7 +543,7 @@ _PG_init(void) */ PrevProcessUtility = (ProcessUtility_hook != NULL) ? ProcessUtility_hook : standard_ProcessUtility; - ProcessUtility_hook = multi_ProcessUtility; + ProcessUtility_hook = citus_ProcessUtility; /* * Acquire symbols for columnar functions that citus calls. diff --git a/src/backend/distributed/utils/citus_copyfuncs.c b/src/backend/distributed/utils/citus_copyfuncs.c index 7e1379ef3..fe4429f04 100644 --- a/src/backend/distributed/utils/citus_copyfuncs.c +++ b/src/backend/distributed/utils/citus_copyfuncs.c @@ -326,7 +326,7 @@ CopyNodeTask(COPYFUNC_ARGS) COPY_STRING_FIELD(fetchedExplainAnalyzePlan); COPY_SCALAR_FIELD(fetchedExplainAnalyzeExecutionDuration); COPY_SCALAR_FIELD(isLocalTableModification); - COPY_SCALAR_FIELD(cannotBeExecutedInTransction); + COPY_SCALAR_FIELD(cannotBeExecutedInTransaction); } diff --git a/src/backend/distributed/utils/citus_outfuncs.c b/src/backend/distributed/utils/citus_outfuncs.c index b4062751a..9b4ac809c 100644 --- a/src/backend/distributed/utils/citus_outfuncs.c +++ b/src/backend/distributed/utils/citus_outfuncs.c @@ -535,7 +535,7 @@ OutTask(OUTFUNC_ARGS) WRITE_STRING_FIELD(fetchedExplainAnalyzePlan); WRITE_FLOAT_FIELD(fetchedExplainAnalyzeExecutionDuration, "%.2f"); WRITE_BOOL_FIELD(isLocalTableModification); - WRITE_BOOL_FIELD(cannotBeExecutedInTransction); + WRITE_BOOL_FIELD(cannotBeExecutedInTransaction); } diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index f02f83fe3..34b2945ac 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -78,7 +78,7 @@ typedef struct DDLJob extern ProcessUtility_hook_type PrevProcessUtility; -extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString, +extern void citus_ProcessUtility(PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, ProcessUtilityContext context, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, diff --git a/src/include/distributed/multi_physical_planner.h b/src/include/distributed/multi_physical_planner.h index b7acc0574..35d83eb33 100644 --- a/src/include/distributed/multi_physical_planner.h +++ b/src/include/distributed/multi_physical_planner.h @@ -329,7 +329,7 @@ typedef struct Task /* * Vacuum, create/drop/reindex concurrently cannot be executed in a transaction. */ - bool cannotBeExecutedInTransction; + bool cannotBeExecutedInTransaction; Const *partitionKeyValue; int colocationId; From db13afaa7b77777003b7a724a01fdb0f5df481d1 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Tue, 17 Oct 2023 16:58:17 +0300 Subject: [PATCH 07/60] Fix flaky columnar_create.sql test (#7266) --- src/test/regress/expected/columnar_create.out | 43 +++++++++---------- src/test/regress/sql/columnar_create.sql | 32 +++++++++----- 2 files changed, 43 insertions(+), 32 deletions(-) diff --git a/src/test/regress/expected/columnar_create.out b/src/test/regress/expected/columnar_create.out index 73b891177..a134fd063 100644 --- a/src/test/regress/expected/columnar_create.out +++ b/src/test/regress/expected/columnar_create.out @@ -178,32 +178,31 @@ SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_ CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar; -- reserve some chunks and a stripe INSERT INTO columnar_temp SELECT i FROM generate_series(1,5) i; -SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id -FROM pg_class WHERE relname='columnar_temp' \gset -SELECT pg_backend_pid() AS val INTO old_backend_pid; +SELECT columnar.get_storage_id(oid) as oid INTO columnar_temp_storage_id +FROM pg_class WHERE relname='columnar_temp'; \c - - - :master_port SET search_path TO columnar_create; --- wait until old backend to expire to make sure that temp table cleanup is complete -SELECT columnar_test_helpers.pg_waitpid(val) FROM old_backend_pid; - pg_waitpid ---------------------------------------------------------------------- +-- wait until temporary table and its metadata is removed +DO $$ +DECLARE + loop_wait_count integer := 0; +BEGIN + WHILE ( + (SELECT COUNT(*) > 0 FROM pg_class WHERE relname='columnar_temp') OR + (SELECT columnar_test_helpers.columnar_metadata_has_storage_id(oid) FROM columnar_temp_storage_id) + ) + LOOP + IF loop_wait_count > 1000 THEN + RAISE EXCEPTION 'Timeout while waiting for temporary table to be dropped'; + END IF; -(1 row) - -DROP TABLE old_backend_pid; --- show that temporary table itself and its metadata is removed -SELECT COUNT(*)=0 FROM pg_class WHERE relname='columnar_temp'; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_temp_storage_id); - columnar_metadata_has_storage_id ---------------------------------------------------------------------- - f -(1 row) + PERFORM pg_sleep(0.001); + loop_wait_count := loop_wait_count + 1; + END LOOP; +END; +$$ language plpgsql; +DROP TABLE columnar_temp_storage_id; -- connect to another session and create a temp table with same name CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar; -- reserve some chunks and a stripe diff --git a/src/test/regress/sql/columnar_create.sql b/src/test/regress/sql/columnar_create.sql index 408ce126e..a0708aeac 100644 --- a/src/test/regress/sql/columnar_create.sql +++ b/src/test/regress/sql/columnar_create.sql @@ -136,22 +136,34 @@ CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar; -- reserve some chunks and a stripe INSERT INTO columnar_temp SELECT i FROM generate_series(1,5) i; -SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id -FROM pg_class WHERE relname='columnar_temp' \gset - -SELECT pg_backend_pid() AS val INTO old_backend_pid; +SELECT columnar.get_storage_id(oid) as oid INTO columnar_temp_storage_id +FROM pg_class WHERE relname='columnar_temp'; \c - - - :master_port SET search_path TO columnar_create; --- wait until old backend to expire to make sure that temp table cleanup is complete -SELECT columnar_test_helpers.pg_waitpid(val) FROM old_backend_pid; +-- wait until temporary table and its metadata is removed +DO $$ +DECLARE + loop_wait_count integer := 0; +BEGIN + WHILE ( + (SELECT COUNT(*) > 0 FROM pg_class WHERE relname='columnar_temp') OR + (SELECT columnar_test_helpers.columnar_metadata_has_storage_id(oid) FROM columnar_temp_storage_id) + ) + LOOP + IF loop_wait_count > 1000 THEN + RAISE EXCEPTION 'Timeout while waiting for temporary table to be dropped'; + END IF; -DROP TABLE old_backend_pid; + PERFORM pg_sleep(0.001); --- show that temporary table itself and its metadata is removed -SELECT COUNT(*)=0 FROM pg_class WHERE relname='columnar_temp'; -SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_temp_storage_id); + loop_wait_count := loop_wait_count + 1; + END LOOP; +END; +$$ language plpgsql; + +DROP TABLE columnar_temp_storage_id; -- connect to another session and create a temp table with same name CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar; From 2d1444188c9021d8a424b638e9aa77343f28cf9c Mon Sep 17 00:00:00 2001 From: zhjwpku Date: Wed, 18 Oct 2023 16:53:00 +0800 Subject: [PATCH 08/60] Fix wrong comments around HasDistributionKey() (#7223) HasDistributionKey & HasDistributionKeyCacheEntry returns true when the corresponding table has a distribution key, the comments state the opposite, which should be fixed. Signed-off-by: Zhao Junwang Co-authored-by: Onur Tirtir --- src/backend/distributed/metadata/metadata_cache.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 55d0f11c5..85a945308 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -521,8 +521,7 @@ IsCitusTableTypeCacheEntry(CitusTableCacheEntry *tableEntry, CitusTableType tabl /* - * HasDistributionKey returs true if given Citus table doesn't have a - * distribution key. + * HasDistributionKey returns true if given Citus table has a distribution key. */ bool HasDistributionKey(Oid relationId) @@ -538,8 +537,8 @@ HasDistributionKey(Oid relationId) /* - * HasDistributionKey returs true if given cache entry identifies a Citus - * table that doesn't have a distribution key. + * HasDistributionKeyCacheEntry returns true if given cache entry identifies a + * Citus table that has a distribution key. */ bool HasDistributionKeyCacheEntry(CitusTableCacheEntry *tableEntry) From dad81042a5996d54199e5b73a550857049cb2f3d Mon Sep 17 00:00:00 2001 From: gindibay Date: Mon, 23 Oct 2023 10:01:18 +0300 Subject: [PATCH 09/60] Adds undistribute sql --- src/backend/distributed/commands/database.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 8aee9213f..7080eeba8 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -240,6 +240,8 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, return NIL; } + + AlterDatabaseSetStmt *stmt = castNode(AlterDatabaseSetStmt, node); EnsureCoordinator(); @@ -351,6 +353,13 @@ citus_internal_database_command(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +static char * GetUnmarkDatabaseDistributedSql(char* dbName){ + StringInfoData pg_dist_object_delete = { 0 }; + initStringInfo(&pg_dist_object_delete); + appendStringInfo(&pg_dist_object_delete, "delete from pg_dist_object where " + "object_id in (select oid from pg_database where datname = '%s')",dbName); + return pg_dist_object_delete.data; +} List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, @@ -373,6 +382,8 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, return NIL; } + + ObjectAddress dbAddress = { 0 }; ObjectAddressSet(dbAddress, DatabaseRelationId, databaseOid); if (!IsObjectDistributed(&dbAddress)) @@ -381,6 +392,7 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, } UnmarkObjectDistributed(&dbAddress); + char *unmarkDatabaseDistributedSql = GetUnmarkDatabaseDistributedSql(stmt->dbname); char *dropDatabaseCommand = DeparseTreeNode(node); @@ -390,7 +402,8 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, quote_literal_cstr(dropDatabaseCommand)); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, + List *commands = list_make4(DISABLE_DDL_PROPAGATION, + unmarkDatabaseDistributedSql, (void *) internalDropCommand->data, ENABLE_DDL_PROPAGATION); From ea0a908702da5d35f7d03b6f7378a1b1ee87ff28 Mon Sep 17 00:00:00 2001 From: gindibay Date: Mon, 23 Oct 2023 12:31:25 +0300 Subject: [PATCH 10/60] Fixes pg_dist_objects record in coordinators --- src/backend/distributed/commands/database.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 7080eeba8..444745b4c 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -357,7 +357,7 @@ static char * GetUnmarkDatabaseDistributedSql(char* dbName){ StringInfoData pg_dist_object_delete = { 0 }; initStringInfo(&pg_dist_object_delete); appendStringInfo(&pg_dist_object_delete, "delete from pg_dist_object where " - "object_id in (select oid from pg_database where datname = '%s')",dbName); + "objid in (select oid from pg_database where datname = '%s')",dbName); return pg_dist_object_delete.data; } From 690276c51621c15c6ecc2a25dfd184e663d7633b Mon Sep 17 00:00:00 2001 From: gindibay Date: Mon, 23 Oct 2023 12:35:53 +0300 Subject: [PATCH 11/60] Fixes indentation --- src/backend/distributed/commands/database.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 444745b4c..88aea7abb 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -241,7 +241,6 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, } - AlterDatabaseSetStmt *stmt = castNode(AlterDatabaseSetStmt, node); EnsureCoordinator(); @@ -353,14 +352,19 @@ citus_internal_database_command(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -static char * GetUnmarkDatabaseDistributedSql(char* dbName){ + +static char * +GetUnmarkDatabaseDistributedSql(char *dbName) +{ StringInfoData pg_dist_object_delete = { 0 }; initStringInfo(&pg_dist_object_delete); appendStringInfo(&pg_dist_object_delete, "delete from pg_dist_object where " - "objid in (select oid from pg_database where datname = '%s')",dbName); + "objid in (select oid from pg_database where datname = '%s')", + dbName); return pg_dist_object_delete.data; } + List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) @@ -383,7 +387,6 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, } - ObjectAddress dbAddress = { 0 }; ObjectAddressSet(dbAddress, DatabaseRelationId, databaseOid); if (!IsObjectDistributed(&dbAddress)) From 1fe16fa7464033b16064b87be067ed30ccd90d02 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Mon, 23 Oct 2023 13:01:48 +0300 Subject: [PATCH 12/60] Remove unnecessary pre-fastpath code (#7262) This code was here because we first implemented `fast path planner` via [#2606](https://github.com/citusdata/citus/pull/2606) and then later `deferred pruning` [#3369](https://github.com/citusdata/citus/pull/3369) So, for some years, this code was useful. --- .../planner/multi_router_planner.c | 22 +++---------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index 0d7a0de78..e70de5bbd 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -2324,27 +2324,11 @@ PlanRouterQuery(Query *originalQuery, TargetShardIntervalForFastPathQuery(originalQuery, &isMultiShardQuery, distributionKeyValue, partitionValueConst); - - /* - * This could only happen when there is a parameter on the distribution key. - * We defer error here, later the planner is forced to use a generic plan - * by assigning arbitrarily high cost to the plan. - */ - if (UpdateOrDeleteOrMergeQuery(originalQuery) && isMultiShardQuery) - { - planningError = DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, - "Router planner cannot handle multi-shard " - "modify queries", NULL, NULL); - return planningError; - } + Assert(!isMultiShardQuery); *prunedShardIntervalListList = shardIntervalList; - - if (!isMultiShardQuery) - { - ereport(DEBUG2, (errmsg("Distributed planning for a fast-path router " - "query"))); - } + ereport(DEBUG2, (errmsg("Distributed planning for a fast-path router " + "query"))); } else { From 10198b18e84e78478b009c3da8ecbb89aaedb474 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Mon, 23 Oct 2023 13:43:43 +0300 Subject: [PATCH 13/60] Technical readme small fixes (#7261) --- src/backend/distributed/README.md | 33 ++++++++++--------- .../planner/fast_path_router_planner.c | 2 +- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/src/backend/distributed/README.md b/src/backend/distributed/README.md index 0a3164e0f..7c4f43add 100644 --- a/src/backend/distributed/README.md +++ b/src/backend/distributed/README.md @@ -245,6 +245,7 @@ CREATE TABLE country_codes ( country_code VARCHAR(3) PRIMARY KEY, country_name VARCHAR(50) ); +SELECT create_reference_table('country_codes'); -- Reference Table: Order Status CREATE TABLE order_status ( @@ -269,14 +270,17 @@ The aim of this planner is to avoid relying on PostgreSQL's standard_planner() f ### Main C Functions Involved: -- `FastPathRouterPlan()`: The primary function for creating the fast-path query plan. +- `FastPathPlanner()`: The primary function for creating the fast-path query plan. - `FastPathRouterQuery()`: Validates if a query is eligible for fast-path routing by checking its structure and the WHERE clause. With set client_min_messages to debug4; you should see the following in the DEBUG messages: "DEBUG: Distributed planning for a fast-path router query" ```sql -- Fetches the count of users born in the same year, but only --- for a single country +-- for a single country, with a filter on the distribution column +-- Normally we have a single user with id = 15 because it's a PRIMARY KEY +-- this is just to demonstrate that fast-path can handle complex queries +-- with EXTRACT(), COUNT(), GROUP BY, HAVING, etc. SELECT EXTRACT(YEAR FROM date_of_birth) as birth_year, COUNT(*) FROM users_table WHERE country_code = 'USA' AND user_id = 15 @@ -382,11 +386,10 @@ FROM users_table u, orders_table o WHERE u.user_id = o.user_id AND u.user_id = 42; -- With Subqueries: - -- Fetch the username and their total order amount -- for a specific user SELECT u.username, - (SELECT MAX(o.product_id) FROM orders_table o + (SELECT COUNT(*) FROM orders_table o WHERE o.user_id = 42 AND o.user_id = u.user_id) FROM users_table u @@ -692,7 +695,7 @@ Assume that there are two subqueries; each subquery is individually joined on th -- The join condition between them is: sub1.user_id != sub2.user_id, which does not preserve distribution key equality. -- Citus qualifies sub1 as the anchor subquery and checks whether all other subqueries are joined on the distribution key. -- In this case, sub2 is not joined on the distribution key, so Citus decides to recursively plan the whole sub2. -SELECT a.user_id, b.user_id +SELECT sub1.user_id, sub2.user_id FROM ( SELECT u.user_id FROM users_table u @@ -884,7 +887,7 @@ Citus has a rules-based optimizer. The core function `MultiLogicalPlanCreate()` For instance, one simple optimization pushes the "filter" operation below the "MultiCollect." Such rules are defined in the function `Commutative()` in `multi_logical_optimizer.c`. -The most interesting part of the optimizer is usually in the final stage, when handling the more complex operators (GROUP BY, DISTINCT window functions, ORDER BY, aggregates). These operators are conjoined in a `MultiExtendedOpNode`. In many cases, they can only partially be pushed down into the worker nodes, which results in one `MultiExtendedOpNode` above the `MultiCollection` (which will run on the coordinator and aggregates across worker nodes), and another `MultiExtendedOpNode` below the `MultiCollect` (which will be pushed down to worker nodes). The bulk of the logic for generating the two nodes lives in `MasterExtendedOpNode()` and `WorkerExtendedOpNode()`, respectively. +The most interesting part of the optimizer is usually in the final stage, when handling the more complex operators (GROUP BY, DISTINCT window functions, ORDER BY, aggregates). These operators are conjoined in a `MultiExtendedOpNode`. In many cases, they can only partially be pushed down into the worker nodes, which results in one `MultiExtendedOpNode` above the `MultiCollect` (which will run on the coordinator and aggregates across worker nodes), and another `MultiExtendedOpNode` below the `MultiCollect` (which will be pushed down to worker nodes). The bulk of the logic for generating the two nodes lives in `MasterExtendedOpNode()` and `WorkerExtendedOpNode()`, respectively. ##### Aggregate functions @@ -1034,8 +1037,8 @@ SELECT * FROM cte_1; -- but as the same cte used twice -- Citus converts the CTE to intermediate result WITH cte_1 AS (SELECT DISTINCT user_id FROM orders_table) -SELECT * FROM cte_1 as c1 JOIN - cte_1 as c2 USING (user_id); +SELECT * FROM cte_1 as c1 + JOIN cte_1 as c2 USING (user_id); ``` - **Citus Specific Materialization**: @@ -1051,8 +1054,7 @@ As of writing this document, Citus does NOT support ```sql WITH users_that_have_orders AS (SELECT users_table.* FROM users_table JOIN orders_table USING (user_id)) -SELECT - max(date_of_birth) +SELECT max(date_of_birth) FROM users_that_have_orders GROUP BY GROUPING SETS (user_id, email); ... @@ -1099,7 +1101,7 @@ INSERT INTO orders_table (order_id, user_id) VALUES ``` **Debug Info**: - Debug information shows how the query is rebuilt for different user_ids. + Debug information shows how the query is rebuilt for different user_ids. Here, the shard_count is 4. ```sql -- for user_id: 1 DEBUG: query after rebuilding: INSERT INTO public.orders_table_102041 AS citus_table_alias (order_id, user_id) VALUES ('1'::bigint,'1'::bigint), ('3'::bigint,'1'::bigint) @@ -1133,7 +1135,7 @@ DEBUG: query after rebuilding: INSERT INTO public.orders_table_102064 AS citus **Examples**: The following section will delve into examples, starting with simple ones and moving to more complex scenarios. -### INSERT.. SELECT Advanced Scenarios +### INSERT.. SELECT Query Planning **Overview**: The `INSERT .. SELECT` pushdown logic builds upon the pushdown planning for `SELECT` commands. The key requirements include colocated tables and matching distribution columns. Relevant C functions are `CreateDistributedInsertSelectPlan`, `DistributedInsertSelectSupported()`, and `AllDistributionKeysInQueryAreEqual`. @@ -1267,7 +1269,7 @@ WHERE user_id IN (SELECT user_id FROM high_value_users); Used for more complex queries, like those with subqueries or joins that can't be pushed down. The queries are planned recursively. ```sql DELETE FROM users_table WHERE user_id -IN (SELECT user_id FROM orders_table WHERE total > 100 ORDER BY total DESC LIMIT 5); +IN (SELECT user_id FROM orders_table WHERE order_date < '2023-01-01' ORDER BY order_date LIMIT 5); ``` ### Correlated/Lateral Subqueries in Planning @@ -1279,8 +1281,7 @@ Correlated or LATERAL subqueries have special behavior in Citus. They can often **Key Code Details**: For more information on the code, check the following functions: `DeferErrorIfCannotPushdownSubquery()` -> - `ContainsReferencesToOuterQuery()` -> - `DeferErrorIfSubqueryRequiresMerge()`. + `ContainsReferencesToOuterQuery()`, `DeferErrorIfSubqueryRequiresMerge()`, `DeferredErrorIfUnsupportedLateralSubquery()`. LATERAL queries are different/unique: even if the subquery requires a merge step such as a `LIMIT`, if the correlation is on the distribution column, we can push it down. See [#4385](https://github.com/citusdata/citus/pull/4385). @@ -1409,7 +1410,7 @@ WITH recent_orders AS ( ) SELECT u.* FROM users_table u -JOIN recent_orders o ON u.user_id = o.product_id; +JOIN recent_orders o ON u.user_id = o.product_id JOIN orders_table o2 ON o2.product_id = o.product_id; ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns ``` diff --git a/src/backend/distributed/planner/fast_path_router_planner.c b/src/backend/distributed/planner/fast_path_router_planner.c index 933ee7425..ed256296c 100644 --- a/src/backend/distributed/planner/fast_path_router_planner.c +++ b/src/backend/distributed/planner/fast_path_router_planner.c @@ -154,7 +154,7 @@ GeneratePlaceHolderPlannedStmt(Query *parse) * being a fast path router query. * The requirements for the fast path query can be listed below: * - * - SELECT query without CTES, sublinks-subqueries, set operations + * - SELECT/UPDATE/DELETE query without CTES, sublinks-subqueries, set operations * - The query should touch only a single hash distributed or reference table * - The distribution with equality operator should be in the WHERE clause * and it should be ANDed with any other filters. Also, the distribution From c9dae2684fdbbf3d91f740b0d5360542df74dd17 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 10:09:13 +0300 Subject: [PATCH 14/60] tests db as role --- src/backend/distributed/commands/common.c | 30 +++++++++++++++++++ src/backend/distributed/commands/database.c | 20 ++----------- src/backend/distributed/commands/role.c | 4 +-- .../distributed/commands/utility_hook.c | 20 +++++-------- src/include/distributed/commands.h | 3 ++ 5 files changed, 44 insertions(+), 33 deletions(-) diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index 797981d47..bfc03cca1 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -28,6 +28,8 @@ #include "distributed/metadata/distobject.h" #include "distributed/multi_executor.h" #include "distributed/worker_transaction.h" +#include "catalog/pg_database.h" +#include "commands/dbcommands.h" /* @@ -339,3 +341,31 @@ DropTextSearchConfigObjectAddress(Node *node, bool missing_ok, bool isPostproces return objectAddresses; } + +void UnmarkRolesAndDatabaseDistributed(Node *node) +{ + if (IsA(node, DropRoleStmt)) + { + DropRoleStmt *stmt = castNode(DropRoleStmt, node); + List *allDropRoles = stmt->roles; + + List *distributedDropRoles = FilterDistributedRoles(allDropRoles); + if (list_length(distributedDropRoles) > 0) + { + UnmarkRolesDistributed(distributedDropRoles); + } + + } + else if (IsA(node, DropdbStmt)) + { + elog(LOG, "Unmarking database1 as distributed"); + DropdbStmt *stmt = castNode(DropdbStmt, node); + char *dbName = stmt->dbname; + + Oid dbOid = get_database_oid(dbName, stmt->missing_ok); + ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*dbAddress, DatabaseRelationId, dbOid); + UnmarkObjectDistributed(dbAddress); + elog(LOG, "Unmarking database %s as distributed", dbName); + } +} diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 88aea7abb..26626d562 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -352,19 +352,6 @@ citus_internal_database_command(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } - -static char * -GetUnmarkDatabaseDistributedSql(char *dbName) -{ - StringInfoData pg_dist_object_delete = { 0 }; - initStringInfo(&pg_dist_object_delete); - appendStringInfo(&pg_dist_object_delete, "delete from pg_dist_object where " - "objid in (select oid from pg_database where datname = '%s')", - dbName); - return pg_dist_object_delete.data; -} - - List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) @@ -375,6 +362,7 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, } EnsureCoordinator(); + EnsureSequentialModeForRoleDDL(); DropdbStmt *stmt = (DropdbStmt *) node; @@ -394,9 +382,6 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, return NIL; } - UnmarkObjectDistributed(&dbAddress); - char *unmarkDatabaseDistributedSql = GetUnmarkDatabaseDistributedSql(stmt->dbname); - char *dropDatabaseCommand = DeparseTreeNode(node); StringInfo internalDropCommand = makeStringInfo(); @@ -405,8 +390,7 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, quote_literal_cstr(dropDatabaseCommand)); - List *commands = list_make4(DISABLE_DDL_PROPAGATION, - unmarkDatabaseDistributedSql, + List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) internalDropCommand->data, ENABLE_DDL_PROPAGATION); diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 754be1a2b..34be44637 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -65,7 +65,6 @@ static DefElem * makeDefElemBool(char *name, bool value); static List * GenerateRoleOptionsList(HeapTuple tuple); static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options); static List * GenerateGrantRoleStmtsOfRole(Oid roleid); -static void EnsureSequentialModeForRoleDDL(void); static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple, TupleDesc DbRoleSettingDescription); @@ -1080,6 +1079,7 @@ UnmarkRolesDistributed(List *roles) } ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid); + elog(LOG, "Unmarking role %s as distributed", role->rolename); UnmarkObjectDistributed(&roleAddress); } } @@ -1278,7 +1278,7 @@ CreateRoleStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) * with the role the role needs to be visible on all connections used by the transaction, * meaning we can only use 1 connection per node. */ -static void +void EnsureSequentialModeForRoleDDL(void) { if (!IsTransactionBlock()) diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index dd729cad0..108f6b50a 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -80,6 +80,7 @@ #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/syscache.h" +#include "catalog/pg_database.h" bool EnableDDLPropagation = true; /* ddl propagation is enabled */ @@ -148,6 +149,7 @@ multi_ProcessUtility(PlannedStmt *pstmt, DestReceiver *dest, QueryCompletion *completionTag) { + elog(LOG, "multi_ProcessUtility called"); if (readOnlyTree) { pstmt = copyObject(pstmt); @@ -578,6 +580,8 @@ ProcessUtilityInternal(PlannedStmt *pstmt, PreprocessLockStatement((LockStmt *) parsetree, context); } + + /* * We only process ALTER TABLE ... ATTACH PARTITION commands in the function below * and distribute the partition if necessary. @@ -724,22 +728,12 @@ ProcessUtilityInternal(PlannedStmt *pstmt, } /* - * Make sure that dropping the role deletes the pg_dist_object entries. There is a - * separate logic for roles, since roles are not included as dropped objects in the + * Make sure that dropping the role and database deletes the pg_dist_object entries. There is a + * separate logic for roles and database, since roles database are not included as dropped objects in the * drop event trigger. To handle it both on worker and coordinator nodes, it is not * implemented as a part of process functions but here. */ - if (IsA(parsetree, DropRoleStmt)) - { - DropRoleStmt *stmt = castNode(DropRoleStmt, parsetree); - List *allDropRoles = stmt->roles; - - List *distributedDropRoles = FilterDistributedRoles(allDropRoles); - if (list_length(distributedDropRoles) > 0) - { - UnmarkRolesDistributed(distributedDropRoles); - } - } + UnmarkRolesAndDatabaseDistributed(parsetree); pstmt->utilityStmt = parsetree; diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index b1f65177e..cde0bd4f0 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -193,6 +193,7 @@ extern List * DropTextSearchConfigObjectAddress(Node *node, bool missing_ok, boo isPostprocess); extern List * DropTextSearchDictObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +extern void UnmarkRolesAndDatabaseDistributed(Node *node); /* index.c */ typedef void (*PGIndexProcessor)(Form_pg_index, List **, int); @@ -241,6 +242,7 @@ extern List * PostprocessCreateDatabaseStmt(Node *node, const char *queryString) extern List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); + /* domain.c - forward declarations */ extern List * CreateDomainStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); @@ -510,6 +512,7 @@ extern List * RenameRoleStmtObjectAddress(Node *stmt, bool missing_ok, bool extern void UnmarkRolesDistributed(List *roles); extern List * FilterDistributedRoles(List *roles); +extern void EnsureSequentialModeForRoleDDL(void); /* schema.c - forward declarations */ extern List * PostprocessCreateSchemaStmt(Node *node, const char *queryString); From 2d009d46d3042dd1500c31877901f190f1d47628 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 10:32:27 +0300 Subject: [PATCH 15/60] Fixes worker pg_dist_object removal issue --- src/backend/distributed/commands/database.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 88aea7abb..94de7f508 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -365,6 +365,20 @@ GetUnmarkDatabaseDistributedSql(char *dbName) } +static void +UnmarkObjectDistributedForDropDb(const ObjectAddress *distAddress, char *dbName) +{ + UnmarkObjectDistributed(distAddress); + + if (EnableMetadataSync) + { + char *workerPgDistObjectUpdateCommand = + GetUnmarkDatabaseDistributedSql(dbName); + SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand); + } +} + + List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) @@ -394,7 +408,7 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, return NIL; } - UnmarkObjectDistributed(&dbAddress); + UnmarkObjectDistributedForDropDb(&dbAddress, stmt->dbname); char *unmarkDatabaseDistributedSql = GetUnmarkDatabaseDistributedSql(stmt->dbname); char *dropDatabaseCommand = DeparseTreeNode(node); From 73f0db2aeddd6c39dfd6aed88afa7261349cfad3 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 14:09:36 +0300 Subject: [PATCH 16/60] Fixes create and drop database transaction use --- src/backend/distributed/commands/common.c | 27 ---------- src/backend/distributed/commands/database.c | 51 +++++++++---------- .../commands/distribute_object_ops.c | 2 +- .../distributed/commands/utility_hook.c | 22 +++++++- src/backend/distributed/metadata/distobject.c | 27 ++++++++++ src/include/distributed/commands.h | 3 +- .../distributed/commands/utility_hook.h | 1 + src/include/distributed/metadata/distobject.h | 1 + 8 files changed, 77 insertions(+), 57 deletions(-) diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index bfc03cca1..b338792d8 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -342,30 +342,3 @@ DropTextSearchConfigObjectAddress(Node *node, bool missing_ok, bool isPostproces return objectAddresses; } -void UnmarkRolesAndDatabaseDistributed(Node *node) -{ - if (IsA(node, DropRoleStmt)) - { - DropRoleStmt *stmt = castNode(DropRoleStmt, node); - List *allDropRoles = stmt->roles; - - List *distributedDropRoles = FilterDistributedRoles(allDropRoles); - if (list_length(distributedDropRoles) > 0) - { - UnmarkRolesDistributed(distributedDropRoles); - } - - } - else if (IsA(node, DropdbStmt)) - { - elog(LOG, "Unmarking database1 as distributed"); - DropdbStmt *stmt = castNode(DropdbStmt, node); - char *dbName = stmt->dbname; - - Oid dbOid = get_database_oid(dbName, stmt->missing_ok); - ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress)); - ObjectAddressSet(*dbAddress, DatabaseRelationId, dbOid); - UnmarkObjectDistributed(dbAddress); - elog(LOG, "Unmarking database %s as distributed", dbName); - } -} diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 26626d562..34813085e 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -273,16 +273,11 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString) char *createDatabaseCommand = DeparseTreeNode(node); - StringInfo internalCreateCommand = makeStringInfo(); - appendStringInfo(internalCreateCommand, - "SELECT pg_catalog.citus_internal_database_command(%s)", - quote_literal_cstr(createDatabaseCommand)); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) internalCreateCommand->data, + (void *) createDatabaseCommand, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NontransactionalNodeDDLTask(NON_COORDINATOR_NODES, commands); } @@ -356,55 +351,59 @@ List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { + bool isPostProcess = false; if (!EnableCreateDatabasePropagation || !ShouldPropagate()) { return NIL; } EnsureCoordinator(); - EnsureSequentialModeForRoleDDL(); DropdbStmt *stmt = (DropdbStmt *) node; - Oid databaseOid = get_database_oid(stmt->dbname, stmt->missing_ok); + List *addresses = GetObjectAddressListFromParseTree(node, stmt->missing_ok, isPostProcess); - if (databaseOid == InvalidOid) + if (list_length(addresses) == 0) { - /* let regular ProcessUtility deal with IF NOT EXISTS */ return NIL; } - - ObjectAddress dbAddress = { 0 }; - ObjectAddressSet(dbAddress, DatabaseRelationId, databaseOid); - if (!IsObjectDistributed(&dbAddress)) + ObjectAddress *address = (ObjectAddress *) linitial(addresses); + if (address->objectId == InvalidOid ||!IsObjectDistributed(address)) { return NIL; } char *dropDatabaseCommand = DeparseTreeNode(node); - StringInfo internalDropCommand = makeStringInfo(); - appendStringInfo(internalDropCommand, - "SELECT pg_catalog.citus_internal_database_command(%s)", - quote_literal_cstr(dropDatabaseCommand)); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) internalDropCommand->data, + (void *) dropDatabaseCommand, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NontransactionalNodeDDLTask(NON_COORDINATOR_NODES, commands); } +static ObjectAddress *GetDatabaseAddressFromDatabaseName(char *databaseName) +{ + Oid databaseOid = get_database_oid(databaseName, false); + ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*dbAddress, DatabaseRelationId, databaseOid); + return dbAddress; +} + +List * +DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) +{ + DropdbStmt *stmt = castNode(DropdbStmt, node); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname); + return list_make1(dbAddress); +} List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreatedbStmt *stmt = castNode(CreatedbStmt, node); - Oid databaseOid = get_database_oid(stmt->dbname, missing_ok); - ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress)); - ObjectAddressSet(*dbAddress, DatabaseRelationId, databaseOid); - + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname); return list_make1(dbAddress); } diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 49a96e016..2888bef1d 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -484,7 +484,7 @@ static DistributeObjectOps Database_Drop = { .postprocess = NULL, .objectType = OBJECT_DATABASE, .operationType = DIST_OPS_DROP, - .address = NULL, + .address = DropDatabaseStmtObjectAddress, .markDistributed = false, }; diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 108f6b50a..3cd2ccd1f 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -729,7 +729,7 @@ ProcessUtilityInternal(PlannedStmt *pstmt, /* * Make sure that dropping the role and database deletes the pg_dist_object entries. There is a - * separate logic for roles and database, since roles database are not included as dropped objects in the + * separate logic for roles and database, since roles and database are not included as dropped objects in the * drop event trigger. To handle it both on worker and coordinator nodes, it is not * implemented as a part of process functions but here. */ @@ -1482,13 +1482,31 @@ DDLTaskList(Oid relationId, const char *commandString) return taskList; } +/* + * NontransactionalNodeDDLTask builds a list of tasks to execute a DDL command on a + * given target set of nodes with cannotBeExecutedInTransction is set to make sure + * that list is being executed without a transaction. + */ +List * NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands ){ + List *ddlJobs = NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + DDLJob *ddlJob = NULL; + foreach_ptr(ddlJob, ddlJobs) + { + Task *task = NULL; + foreach_ptr(task, ddlJob->taskList) + { + task->cannotBeExecutedInTransction = true; + } + } + return ddlJobs; +} /* * NodeDDLTaskList builds a list of tasks to execute a DDL command on a * given target set of nodes. */ List * -NodeDDLTaskList(TargetWorkerSet targets, List *commands) +NodeDDLTaskList(TargetWorkerSet targets, List *commands ) { DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetObjectAddress = InvalidObjectAddress; diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index c420e6ec3..722f51bc9 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -48,6 +48,8 @@ #include "utils/lsyscache.h" #include "utils/regproc.h" #include "utils/rel.h" +#include "catalog/pg_database.h" +#include "commands/dbcommands.h" static char * CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress); @@ -355,6 +357,31 @@ ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, return spiStatus; } +void UnmarkRolesAndDatabaseDistributed(Node *node) +{ + if (IsA(node, DropRoleStmt)) + { + DropRoleStmt *stmt = castNode(DropRoleStmt, node); + List *allDropRoles = stmt->roles; + + List *distributedDropRoles = FilterDistributedRoles(allDropRoles); + if (list_length(distributedDropRoles) > 0) + { + UnmarkRolesDistributed(distributedDropRoles); + } + + } + else if (IsA(node, DropdbStmt)) + { + DropdbStmt *stmt = castNode(DropdbStmt, node); + char *dbName = stmt->dbname; + + Oid dbOid = get_database_oid(dbName, stmt->missing_ok); + ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*dbAddress, DatabaseRelationId, dbOid); + UnmarkObjectDistributed(dbAddress); + } +} /* * UnmarkObjectDistributed removes the entry from pg_dist_object that marks this object as diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index cde0bd4f0..95d6e9a13 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -193,7 +193,6 @@ extern List * DropTextSearchConfigObjectAddress(Node *node, bool missing_ok, boo isPostprocess); extern List * DropTextSearchDictObjectAddress(Node *node, bool missing_ok, bool isPostprocess); -extern void UnmarkRolesAndDatabaseDistributed(Node *node); /* index.c */ typedef void (*PGIndexProcessor)(Form_pg_index, List **, int); @@ -241,6 +240,8 @@ extern List * PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString extern List * PostprocessCreateDatabaseStmt(Node *node, const char *queryString); extern List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); /* domain.c - forward declarations */ diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 9ae57b49a..3295d110c 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -94,6 +94,7 @@ extern void ProcessUtilityParseTree(Node *node, const char *queryString, extern void MarkInvalidateForeignKeyGraph(void); extern void InvalidateForeignKeyGraphForDDL(void); extern List * DDLTaskList(Oid relationId, const char *commandString); +extern List * NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands ); extern List * NodeDDLTaskList(TargetWorkerSet targets, List *commands); extern bool AlterTableInProgress(void); extern bool DropSchemaOrDBInProgress(void); diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index ba984091c..86fada5f7 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -27,6 +27,7 @@ extern void MarkObjectDistributed(const ObjectAddress *distAddress); extern void MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress); extern void MarkObjectDistributedLocally(const ObjectAddress *distAddress); extern void UnmarkObjectDistributed(const ObjectAddress *address); +extern void UnmarkRolesAndDatabaseDistributed(Node *node); extern bool IsTableOwnedByExtension(Oid relationId); extern bool ObjectAddressDependsOnExtension(const ObjectAddress *target); extern bool IsAnyObjectAddressOwnedByExtension(const List *targets, From e9e64a69c1dce29a5bd976cac0fe8238edbc6e07 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 14:18:10 +0300 Subject: [PATCH 17/60] Fixes indentation --- src/backend/distributed/commands/common.c | 1 - src/backend/distributed/commands/database.c | 12 ++++++--- .../distributed/commands/utility_hook.c | 27 ++++++++++--------- src/backend/distributed/metadata/distobject.c | 8 +++--- src/include/distributed/commands.h | 6 +++-- .../distributed/commands/utility_hook.h | 2 +- 6 files changed, 34 insertions(+), 22 deletions(-) diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index b338792d8..9a87df9f1 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -341,4 +341,3 @@ DropTextSearchConfigObjectAddress(Node *node, bool missing_ok, bool isPostproces return objectAddresses; } - diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 34813085e..6522571fa 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -347,6 +347,7 @@ citus_internal_database_command(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } + List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) @@ -361,7 +362,8 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, DropdbStmt *stmt = (DropdbStmt *) node; - List *addresses = GetObjectAddressListFromParseTree(node, stmt->missing_ok, isPostProcess); + List *addresses = GetObjectAddressListFromParseTree(node, stmt->missing_ok, + isPostProcess); if (list_length(addresses) == 0) { @@ -369,7 +371,7 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, } ObjectAddress *address = (ObjectAddress *) linitial(addresses); - if (address->objectId == InvalidOid ||!IsObjectDistributed(address)) + if (address->objectId == InvalidOid || !IsObjectDistributed(address)) { return NIL; } @@ -384,7 +386,9 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, return NontransactionalNodeDDLTask(NON_COORDINATOR_NODES, commands); } -static ObjectAddress *GetDatabaseAddressFromDatabaseName(char *databaseName) + +static ObjectAddress * +GetDatabaseAddressFromDatabaseName(char *databaseName) { Oid databaseOid = get_database_oid(databaseName, false); ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress)); @@ -392,6 +396,7 @@ static ObjectAddress *GetDatabaseAddressFromDatabaseName(char *databaseName) return dbAddress; } + List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { @@ -400,6 +405,7 @@ DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) return list_make1(dbAddress); } + List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 3cd2ccd1f..b0fae6727 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -581,7 +581,6 @@ ProcessUtilityInternal(PlannedStmt *pstmt, } - /* * We only process ALTER TABLE ... ATTACH PARTITION commands in the function below * and distribute the partition if necessary. @@ -1482,31 +1481,35 @@ DDLTaskList(Oid relationId, const char *commandString) return taskList; } + /* * NontransactionalNodeDDLTask builds a list of tasks to execute a DDL command on a * given target set of nodes with cannotBeExecutedInTransction is set to make sure * that list is being executed without a transaction. */ -List * NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands ){ +List * +NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands) +{ List *ddlJobs = NodeDDLTaskList(NON_COORDINATOR_NODES, commands); - DDLJob *ddlJob = NULL; - foreach_ptr(ddlJob, ddlJobs) - { - Task *task = NULL; - foreach_ptr(task, ddlJob->taskList) - { - task->cannotBeExecutedInTransction = true; - } - } + DDLJob *ddlJob = NULL; + foreach_ptr(ddlJob, ddlJobs) + { + Task *task = NULL; + foreach_ptr(task, ddlJob->taskList) + { + task->cannotBeExecutedInTransction = true; + } + } return ddlJobs; } + /* * NodeDDLTaskList builds a list of tasks to execute a DDL command on a * given target set of nodes. */ List * -NodeDDLTaskList(TargetWorkerSet targets, List *commands ) +NodeDDLTaskList(TargetWorkerSet targets, List *commands) { DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetObjectAddress = InvalidObjectAddress; diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index 722f51bc9..af8354ee3 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -357,7 +357,9 @@ ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, return spiStatus; } -void UnmarkRolesAndDatabaseDistributed(Node *node) + +void +UnmarkRolesAndDatabaseDistributed(Node *node) { if (IsA(node, DropRoleStmt)) { @@ -369,12 +371,11 @@ void UnmarkRolesAndDatabaseDistributed(Node *node) { UnmarkRolesDistributed(distributedDropRoles); } - } else if (IsA(node, DropdbStmt)) { DropdbStmt *stmt = castNode(DropdbStmt, node); - char *dbName = stmt->dbname; + char *dbName = stmt->dbname; Oid dbOid = get_database_oid(dbName, stmt->missing_ok); ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress)); @@ -383,6 +384,7 @@ void UnmarkRolesAndDatabaseDistributed(Node *node) } } + /* * UnmarkObjectDistributed removes the entry from pg_dist_object that marks this object as * distributed. This will prevent updates to that object to be propagated to the worker. diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 95d6e9a13..06dda53eb 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -240,8 +240,10 @@ extern List * PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString extern List * PostprocessCreateDatabaseStmt(Node *node, const char *queryString); extern List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); -extern List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); -extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +extern List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); +extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool + isPostprocess); /* domain.c - forward declarations */ diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index 3295d110c..93d9e8355 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -94,7 +94,7 @@ extern void ProcessUtilityParseTree(Node *node, const char *queryString, extern void MarkInvalidateForeignKeyGraph(void); extern void InvalidateForeignKeyGraphForDDL(void); extern List * DDLTaskList(Oid relationId, const char *commandString); -extern List * NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands ); +extern List * NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands); extern List * NodeDDLTaskList(TargetWorkerSet targets, List *commands); extern bool AlterTableInProgress(void); extern bool DropSchemaOrDBInProgress(void); From fb9f75bb241cf196c612241f4a26cada03fd37e4 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 14:43:12 +0300 Subject: [PATCH 18/60] Removes unnecessary logs --- src/backend/distributed/commands/utility_hook.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index b0fae6727..27b2b9ddc 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -149,7 +149,6 @@ multi_ProcessUtility(PlannedStmt *pstmt, DestReceiver *dest, QueryCompletion *completionTag) { - elog(LOG, "multi_ProcessUtility called"); if (readOnlyTree) { pstmt = copyObject(pstmt); From 1dcee370e11421f6232c314799804329b8f8d4df Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 14:44:34 +0300 Subject: [PATCH 19/60] Fixes create_drop_database_test --- .../regress/expected/create_drop_database_propagation.out | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index 32552d01a..088a445d6 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -107,15 +107,15 @@ set citus.enable_create_database_propagation=on; SET citus.log_remote_commands = true; set citus.grep_remote_commands = '%CREATE DATABASE%'; create database "mydatabase#1'2"; -NOTICE: issuing SELECT pg_catalog.citus_internal_database_command('CREATE DATABASE "mydatabase#1''2"') +NOTICE: issuing CREATE DATABASE "mydatabase#1'2" DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT pg_catalog.citus_internal_database_command('CREATE DATABASE "mydatabase#1''2"') +NOTICE: issuing CREATE DATABASE "mydatabase#1'2" DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx set citus.grep_remote_commands = '%DROP DATABASE%'; drop database if exists "mydatabase#1'2"; -NOTICE: issuing SELECT pg_catalog.citus_internal_database_command('DROP DATABASE IF EXISTS "mydatabase#1''2"') +NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2" DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT pg_catalog.citus_internal_database_command('DROP DATABASE IF EXISTS "mydatabase#1''2"') +NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2" DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx --clean up resources created by this test drop tablespace create_drop_db_tablespace; From f779947b2b3522cf951497fbcea787cb9f08f15d Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 14:57:36 +0300 Subject: [PATCH 20/60] Removes unnecessary logs --- src/backend/distributed/commands/role.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 34be44637..792efd934 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -1079,7 +1079,6 @@ UnmarkRolesDistributed(List *roles) } ObjectAddressSet(roleAddress, AuthIdRelationId, roleOid); - elog(LOG, "Unmarking role %s as distributed", role->rolename); UnmarkObjectDistributed(&roleAddress); } } From b5cbc048b70eef8fd4072943d6b4bce6218392b0 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 15:12:08 +0300 Subject: [PATCH 21/60] Fixes missing ok issue --- src/backend/distributed/commands/database.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 6522571fa..1ba0a1639 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -388,9 +388,9 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, static ObjectAddress * -GetDatabaseAddressFromDatabaseName(char *databaseName) +GetDatabaseAddressFromDatabaseName(char *databaseName, bool missingOk) { - Oid databaseOid = get_database_oid(databaseName, false); + Oid databaseOid = get_database_oid(databaseName, missingOk); ObjectAddress *dbAddress = palloc0(sizeof(ObjectAddress)); ObjectAddressSet(*dbAddress, DatabaseRelationId, databaseOid); return dbAddress; @@ -401,7 +401,7 @@ List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { DropdbStmt *stmt = castNode(DropdbStmt, node); - ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, stmt->missing_ok); return list_make1(dbAddress); } @@ -410,6 +410,6 @@ List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreatedbStmt *stmt = castNode(CreatedbStmt, node); - ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname,stmt->missing_ok); return list_make1(dbAddress); } From 3a0a5ae3449e0157138e98ddf4d13423786dcc12 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 15:13:22 +0300 Subject: [PATCH 22/60] Fixes missin ok issue --- src/backend/distributed/commands/database.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 1ba0a1639..d6c02d492 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -410,6 +410,6 @@ List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreatedbStmt *stmt = castNode(CreatedbStmt, node); - ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname,stmt->missing_ok); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname,missing_ok); return list_make1(dbAddress); } From 2f52fbd13ea2b4600b1fd49900e8661303b5192d Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 15:14:05 +0300 Subject: [PATCH 23/60] Fixes missing ok issue --- src/backend/distributed/commands/database.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index d6c02d492..8d2b14f61 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -401,7 +401,7 @@ List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { DropdbStmt *stmt = castNode(DropdbStmt, node); - ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, stmt->missing_ok); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, missing_ok); return list_make1(dbAddress); } From c7b040c3cdb16998f03b0036f7d0fbd71e00f9a2 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 15:19:50 +0300 Subject: [PATCH 24/60] Fixes indentation --- src/backend/distributed/commands/database.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 8d2b14f61..7f0f03aa6 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -401,7 +401,8 @@ List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { DropdbStmt *stmt = castNode(DropdbStmt, node); - ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, missing_ok); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, + missing_ok); return list_make1(dbAddress); } @@ -410,6 +411,7 @@ List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) { CreatedbStmt *stmt = castNode(CreatedbStmt, node); - ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname,missing_ok); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, + missing_ok); return list_make1(dbAddress); } From c7da2cd122c4870e6e371bdcabf05086604e0230 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 15:44:59 +0300 Subject: [PATCH 25/60] Fixes cannotBeExecutedInTransction --- src/backend/distributed/commands/utility_hook.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index d52352907..28159b616 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -1483,7 +1483,7 @@ DDLTaskList(Oid relationId, const char *commandString) /* * NontransactionalNodeDDLTask builds a list of tasks to execute a DDL command on a - * given target set of nodes with cannotBeExecutedInTransction is set to make sure + * given target set of nodes with cannotBeExecutedInTransaction is set to make sure * that list is being executed without a transaction. */ List * @@ -1496,7 +1496,7 @@ NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands) Task *task = NULL; foreach_ptr(task, ddlJob->taskList) { - task->cannotBeExecutedInTransction = true; + task->cannotBeExecutedInTransaction = true; } } return ddlJobs; From 48d749e2e8daceeb3f13d2165d9b6684fc548898 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 19:15:46 +0300 Subject: [PATCH 26/60] Fixes message for non-transaction-safe commands --- src/backend/distributed/commands/utility_hook.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 28159b616..85ae27356 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -1265,9 +1265,10 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) { ereport(WARNING, (errmsg( - "CONCURRENTLY-enabled index commands can fail partially, " - "leaving behind an INVALID index.\n Use DROP INDEX " - "CONCURRENTLY IF EXISTS to remove the invalid index."))); + "Commands that are not transaction-safe may result in partial failure" + ", potentially leading to an inconsistent state. If the problematic command" + " is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the " + "object, if applicable, and then reattempt the original command."))); PG_RE_THROW(); } } From c437850db5d60868f7814e0cb1bed1579464e4f8 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 19:25:26 +0300 Subject: [PATCH 27/60] Beatufies error message --- src/backend/distributed/commands/utility_hook.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 85ae27356..fc763f369 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -1266,9 +1266,9 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) ereport(WARNING, (errmsg( "Commands that are not transaction-safe may result in partial failure" - ", potentially leading to an inconsistent state. If the problematic command" + ", potentially leading to an inconsistent state.\nIf the problematic command" " is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the " - "object, if applicable, and then reattempt the original command."))); + "object,\nif applicable, and then reattempt the original command."))); PG_RE_THROW(); } } From c8fcf080c261e99484dd164fa2b0c47be384a200 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 24 Oct 2023 20:44:43 +0300 Subject: [PATCH 28/60] Fixes out files for transaction message --- .../failure_create_index_concurrently.out | 30 +++++++++++-------- src/test/regress/expected/single_node.out | 5 ++-- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/src/test/regress/expected/failure_create_index_concurrently.out b/src/test/regress/expected/failure_create_index_concurrently.out index a198ddc70..94d0f373d 100644 --- a/src/test/regress/expected/failure_create_index_concurrently.out +++ b/src/test/regress/expected/failure_create_index_concurrently.out @@ -26,8 +26,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then reattempt the original command. ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -59,8 +60,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then reattempt the original command. ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -86,8 +88,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid( (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then reattempt the original command. ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -111,8 +114,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid( (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then reattempt the original command. ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -137,8 +141,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP INDEX CONCURRENTLY").kill()'); (1 row) DROP INDEX CONCURRENTLY IF EXISTS idx_index_test; -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then reattempt the original command. ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -164,8 +169,9 @@ SELECT create_distributed_table('index_test_2', 'a'); INSERT INTO index_test_2 VALUES (1, 1), (1, 2); CREATE UNIQUE INDEX CONCURRENTLY index_test_2_a_idx ON index_test_2(a); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then reattempt the original command. ERROR: could not create unique index "index_test_2_a_idx_1880019" DETAIL: Key (a)=(1) is duplicated. CONTEXT: while executing command on localhost:xxxxx diff --git a/src/test/regress/expected/single_node.out b/src/test/regress/expected/single_node.out index f485763c5..3b24fd5f5 100644 --- a/src/test/regress/expected/single_node.out +++ b/src/test/regress/expected/single_node.out @@ -88,8 +88,9 @@ SELECT create_distributed_table('failover_to_local', 'a', shard_count=>32); (1 row) CREATE INDEX CONCURRENTLY ON failover_to_local(a); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. - Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then reattempt the original command. ERROR: the total number of connections on the server is more than max_connections(100) HINT: Consider using a higher value for max_connections -- reset global GUC changes From 2bf1472c8e56d13d88169ed6c1c38cede98c8c23 Mon Sep 17 00:00:00 2001 From: Gokhan Gulbiz Date: Thu, 26 Oct 2023 14:54:58 +0300 Subject: [PATCH 29/60] Move GHA environment variables to workflow file (#7275) Since GHA does not interpolate env variables in a matrix context, This PR defines them in a separate job and uses them in other jobs. --- .github/workflows/build_and_test.yml | 131 +++++++++++------- .../workflows/packaging-test-pipelines.yml | 8 +- 2 files changed, 85 insertions(+), 54 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 5944c38db..1f22ff034 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -13,10 +13,33 @@ on: pull_request: types: [opened, reopened,synchronize] jobs: + # Since GHA does not interpolate env varibles in matrix context, we need to + # define them in a separate job and use them in other jobs. + params: + runs-on: ubuntu-latest + name: Initialize parameters + outputs: + build_image_name: "citus/extbuilder" + test_image_name: "citus/exttester" + citusupgrade_image_name: "citus/citusupgradetester" + fail_test_image_name: "citus/failtester" + pgupgrade_image_name: "citus/pgupgradetester" + style_checker_image_name: "citus/stylechecker" + style_checker_tools_version: "0.8.18" + image_suffix: "-v9d71045" + pg14_version: "14.9" + pg15_version: "15.4" + pg16_version: "16.0" + upgrade_pg_versions: "14.9-15.4-16.0" + steps: + # Since GHA jobs needs at least one step we use a noop step here. + - name: Set up parameters + run: echo 'noop' check-sql-snapshots: + needs: params runs-on: ubuntu-20.04 container: - image: ${{ vars.build_image_name }}:latest + image: ${{ needs.params.outputs.build_image_name }}:latest options: --user root steps: - uses: actions/checkout@v3.5.0 @@ -25,9 +48,10 @@ jobs: git config --global --add safe.directory ${GITHUB_WORKSPACE} ci/check_sql_snapshots.sh check-style: + needs: params runs-on: ubuntu-20.04 container: - image: ${{ vars.style_checker_image_name }}:${{ vars.style_checker_tools_version }}${{ vars.image_suffix }} + image: ${{ needs.params.outputs.style_checker_image_name }}:${{ needs.params.outputs.style_checker_tools_version }}${{ needs.params.outputs.image_suffix }} steps: - name: Check Snapshots run: | @@ -68,18 +92,19 @@ jobs: - name: Check for missing downgrade scripts run: ci/check_migration_files.sh build: + needs: params name: Build for PG ${{ matrix.pg_version}} strategy: fail-fast: false matrix: image_name: - - ${{ vars.build_image_name }} + - ${{ needs.params.outputs.build_image_name }} image_suffix: - - ${{ vars.image_suffix}} + - ${{ needs.params.outputs.image_suffix}} pg_version: - - ${{ vars.pg14_version }} - - ${{ vars.pg15_version }} - - ${{ vars.pg16_version }} + - ${{ needs.params.outputs.pg14_version }} + - ${{ needs.params.outputs.pg15_version }} + - ${{ needs.params.outputs.pg16_version }} runs-on: ubuntu-20.04 container: image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ matrix.image_suffix }}" @@ -106,11 +131,11 @@ jobs: suite: - regress image_name: - - ${{ vars.test_image_name }} + - ${{ needs.params.outputs.test_image_name }} pg_version: - - ${{ vars.pg14_version }} - - ${{ vars.pg15_version }} - - ${{ vars.pg16_version }} + - ${{ needs.params.outputs.pg14_version }} + - ${{ needs.params.outputs.pg15_version }} + - ${{ needs.params.outputs.pg16_version }} make: - check-split - check-multi @@ -129,69 +154,70 @@ jobs: - check-enterprise-isolation-logicalrep-3 include: - make: check-failure - pg_version: ${{ vars.pg14_version }} + pg_version: ${{ needs.params.outputs.pg14_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-failure - pg_version: ${{ vars.pg15_version }} + pg_version: ${{ needs.params.outputs.pg15_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-failure - pg_version: ${{ vars.pg16_version }} + pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-enterprise-failure - pg_version: ${{ vars.pg14_version }} + pg_version: ${{ needs.params.outputs.pg14_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-enterprise-failure - pg_version: ${{ vars.pg15_version }} + pg_version: ${{ needs.params.outputs.pg15_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-enterprise-failure - pg_version: ${{ vars.pg16_version }} + pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-pytest - pg_version: ${{ vars.pg14_version }} + pg_version: ${{ needs.params.outputs.pg14_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-pytest - pg_version: ${{ vars.pg15_version }} + pg_version: ${{ needs.params.outputs.pg15_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-pytest - pg_version: ${{ vars.pg16_version }} + pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: installcheck suite: cdc - image_name: ${{ vars.test_image_name }} - pg_version: ${{ vars.pg15_version }} + image_name: ${{ needs.params.outputs.test_image_name }} + pg_version: ${{ needs.params.outputs.pg15_version }} - make: installcheck suite: cdc - image_name: ${{ vars.test_image_name }} - pg_version: ${{ vars.pg16_version }} + image_name: ${{ needs.params.outputs.test_image_name }} + pg_version: ${{ needs.params.outputs.pg16_version }} - make: check-query-generator - pg_version: ${{ vars.pg14_version }} + pg_version: ${{ needs.params.outputs.pg14_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-query-generator - pg_version: ${{ vars.pg15_version }} + pg_version: ${{ needs.params.outputs.pg15_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} - make: check-query-generator - pg_version: ${{ vars.pg16_version }} + pg_version: ${{ needs.params.outputs.pg16_version }} suite: regress - image_name: ${{ vars.fail_test_image_name }} + image_name: ${{ needs.params.outputs.fail_test_image_name }} runs-on: ubuntu-20.04 container: - image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ vars.image_suffix }}" + image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ needs.params.outputs.image_suffix }}" options: --user root --dns=8.8.8.8 # Due to Github creates a default network for each job, we need to use # --dns= to have similar DNS settings as our other CI systems or local # machines. Otherwise, we may see different results. needs: + - params - build steps: - uses: actions/checkout@v3.5.0 @@ -212,19 +238,20 @@ jobs: name: PG${{ matrix.pg_version }} - check-arbitrary-configs-${{ matrix.parallel }} runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"] container: - image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ vars.image_suffix }}" + image: "${{ matrix.image_name }}:${{ matrix.pg_version }}${{ needs.params.outputs.image_suffix }}" options: --user root needs: + - params - build strategy: fail-fast: false matrix: image_name: - - ${{ vars.fail_test_image_name }} + - ${{ needs.params.outputs.fail_test_image_name }} pg_version: - - ${{ vars.pg14_version }} - - ${{ vars.pg15_version }} - - ${{ vars.pg16_version }} + - ${{ needs.params.outputs.pg14_version }} + - ${{ needs.params.outputs.pg15_version }} + - ${{ needs.params.outputs.pg16_version }} parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs steps: - uses: actions/checkout@v3.5.0 @@ -258,9 +285,10 @@ jobs: name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade runs-on: ubuntu-20.04 container: - image: "${{ vars.pgupgrade_image_name }}:${{ vars.upgrade_pg_versions }}${{ vars.image_suffix }}" + image: "${{ needs.params.outputs.pgupgrade_image_name }}:${{ needs.params.outputs.upgrade_pg_versions }}${{ needs.params.outputs.image_suffix }}" options: --user root needs: + - params - build strategy: fail-fast: false @@ -305,12 +333,13 @@ jobs: flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade codecov_token: ${{ secrets.CODECOV_TOKEN }} test-citus-upgrade: - name: PG${{ vars.pg14_version }} - check-citus-upgrade + name: PG${{ needs.params.outputs.pg14_version }} - check-citus-upgrade runs-on: ubuntu-20.04 container: - image: "${{ vars.citusupgrade_image_name }}:${{ vars.pg14_version }}${{ vars.image_suffix }}" + image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ needs.params.outputs.pg14_version }}${{ needs.params.outputs.image_suffix }}" options: --user root needs: + - params - build steps: - uses: actions/checkout@v3.5.0 @@ -354,8 +383,9 @@ jobs: CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }} runs-on: ubuntu-20.04 container: - image: ${{ vars.test_image_name }}:${{ vars.pg16_version }}${{ vars.image_suffix }} + image: ${{ needs.params.outputs.test_image_name }}:${{ needs.params.outputs.pg16_version }}${{ needs.params.outputs.image_suffix }} needs: + - params - test-citus - test-arbitrary-configs - test-citus-upgrade @@ -448,11 +478,12 @@ jobs: name: Test flakyness runs-on: ubuntu-20.04 container: - image: ${{ vars.fail_test_image_name }}:${{ vars.pg16_version }}${{ vars.image_suffix }} + image: ${{ needs.params.outputs.fail_test_image_name }}:${{ needs.params.outputs.pg16_version }}${{ needs.params.outputs.image_suffix }} options: --user root env: runs: 8 needs: + - params - build - test-flakyness-pre - prepare_parallelization_matrix_32 diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 9d3fb81be..0fb4b7092 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -24,14 +24,14 @@ jobs: - name: Get Postgres Versions id: get-postgres-versions run: | - # Postgres versions are stored in .circleci/config.yml file in "build-[pg-version] format. Below command - # extracts the versions and get the unique values. - pg_versions=`grep -Eo 'build-[[:digit:]]{2}' .circleci/config.yml|sed -e "s/^build-//"|sort|uniq|tr '\n' ','| head -c -1` + # Postgres versions are stored in .github/workflows/build_and_test.yml file in "pg[pg-version]_version" + # format. Below command extracts the versions and get the unique values. + pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE 'pg[0-9]+_version: "[0-9.]+"' | sed -E 's/pg([0-9]+)_version: "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',') pg_versions_array="[ ${pg_versions} ]" echo "Supported PG Versions: ${pg_versions_array}" # Below line is needed to set the output variable to be used in the next job echo "pg_versions=${pg_versions_array}" >> $GITHUB_OUTPUT - + shell: bash rpm_build_tests: name: rpm_build_tests needs: get_postgres_versions_from_file From 641e4136762de8ca092646aadf36096890f68587 Mon Sep 17 00:00:00 2001 From: gindibay Date: Thu, 26 Oct 2023 20:36:43 +0300 Subject: [PATCH 30/60] Adds metadatasync for create database --- src/backend/distributed/commands/database.c | 251 ++++++++++++++++++ .../distributed/metadata/metadata_sync.c | 7 + src/include/distributed/commands.h | 1 + 3 files changed, 259 insertions(+) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 7f0f03aa6..443c0f366 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -35,6 +35,15 @@ #include "distributed/deparse_shard_query.h" #include "distributed/listutils.h" #include "distributed/adaptive_executor.h" +#include "access/htup_details.h" +#include "catalog/pg_tablespace.h" +#include "access/heapam.h" +#include "utils/relcache.h" +#include "utils/rel.h" +#include "utils/lsyscache.h" +#include "catalog/pg_collation.h" +#include "utils/relcache.h" +#include "catalog/pg_database_d.h" static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid); @@ -415,3 +424,245 @@ CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) missing_ok); return list_make1(dbAddress); } + + +static char * +GetTablespaceName(Oid tablespaceOid) +{ + HeapTuple tuple = SearchSysCache1(TABLESPACEOID, ObjectIdGetDatum(tablespaceOid)); + if (!HeapTupleIsValid(tuple)) + { + return NULL; + } + + Form_pg_tablespace tablespaceForm = (Form_pg_tablespace) GETSTRUCT(tuple); + char *tablespaceName = NameStr(tablespaceForm->spcname); + + ReleaseSysCache(tuple); + + return tablespaceName; +} + + +/* + * DatabaseCollationInfo is used to store collation related information of a database + */ +typedef struct DatabaseCollationInfo +{ + char *collation; + char *ctype; + char *icu_locale; + char *collversion; +} DatabaseCollationInfo; + +/* + * GetDatabaseCollation gets oid of a database and returns all the collation related information + * We need this method since collation related info in Form_pg_database is not accessible + */ +static DatabaseCollationInfo +GetDatabaseCollation(Oid db_oid) +{ + HeapTuple tup; + DatabaseCollationInfo info; + Datum collationDatum, ctypeDatum, icuLocaleDatum, collverDatum; + bool isNull; + Relation rel; + TupleDesc tupdesc; + Snapshot snapshot; + + snapshot = RegisterSnapshot(GetLatestSnapshot()); + rel = table_open(DatabaseRelationId, AccessShareLock); + tup = get_catalog_object_by_oid(rel, Anum_pg_database_oid, db_oid); + if (!HeapTupleIsValid(tup)) + { + elog(ERROR, "cache lookup failed for database %u", db_oid); + } + + tupdesc = RelationGetDescr(rel); + collationDatum = heap_getattr(tup, Anum_pg_database_datcollate, tupdesc, &isNull); + if (isNull) + { + info.collation = NULL; + } + else + { + info.collation = TextDatumGetCString(collationDatum); + } + + ctypeDatum = heap_getattr(tup, Anum_pg_database_datctype, tupdesc, &isNull); + if (isNull) + { + info.ctype = NULL; + } + else + { + info.ctype = TextDatumGetCString(ctypeDatum); + } + + icuLocaleDatum = heap_getattr(tup, Anum_pg_database_daticulocale, tupdesc, &isNull); + if (isNull) + { + info.icu_locale = NULL; + } + else + { + info.icu_locale = TextDatumGetCString(icuLocaleDatum); + } + + collverDatum = heap_getattr(tup, Anum_pg_database_datcollversion, tupdesc, &isNull); + if (isNull) + { + info.collversion = NULL; + } + else + { + info.collversion = TextDatumGetCString(collverDatum); + } + + table_close(rel, AccessShareLock); + UnregisterSnapshot(snapshot); + heap_freetuple(tup); + + return info; +} + + +static void +FreeDatabaseCollationInfo(DatabaseCollationInfo collInfo) +{ + if (collInfo.collation != NULL) + { + pfree(collInfo.collation); + } + if (collInfo.ctype != NULL) + { + pfree(collInfo.ctype); + } + if (collInfo.icu_locale != NULL) + { + pfree(collInfo.icu_locale); + } +} + + +/* + * GenerateCreateDatabaseStatementFromPgDatabase is gets the pg_database tuple and returns the CREATE DATABASE statement + */ +static char * +GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) +{ + DatabaseCollationInfo collInfo = GetDatabaseCollation(databaseForm->oid); + elog(LOG, "collInfo: %s %s %s %s", collInfo.collation, collInfo.ctype, + collInfo.icu_locale, collInfo.collversion); + + StringInfoData str; + initStringInfo(&str); + + appendStringInfo(&str, "CREATE DATABASE %s", quote_identifier(NameStr( + databaseForm-> + datname))); + + if (databaseForm->datdba != InvalidOid) + { + appendStringInfo(&str, " OWNER = %s", GetUserNameFromId(databaseForm->datdba, + false)); + } + + if (databaseForm->encoding != -1) + { + appendStringInfo(&str, " ENCODING = '%s'", pg_encoding_to_char( + databaseForm->encoding)); + } + + if (collInfo.collation != NULL) + { + appendStringInfo(&str, " LC_COLLATE = '%s'", collInfo.collation); + } + if (collInfo.ctype != NULL) + { + appendStringInfo(&str, " LC_CTYPE = '%s'", collInfo.ctype); + } + + if (collInfo.icu_locale != NULL) + { + appendStringInfo(&str, " ICU_LOCALE = '%s'", collInfo.icu_locale); + } + + if (databaseForm->datlocprovider != 0) + { + appendStringInfo(&str, " LOCALE_PROVIDER = '%c'", databaseForm->datlocprovider); + } + + if (collInfo.collversion != NULL) + { + appendStringInfo(&str, " COLLATION_VERSION = '%s'", collInfo.collversion); + } + + if (databaseForm->dattablespace != InvalidOid) + { + appendStringInfo(&str, " TABLESPACE = %s", quote_identifier(GetTablespaceName( + databaseForm-> + dattablespace))); + } + + appendStringInfo(&str, " ALLOW_CONNECTIONS = '%s'", databaseForm->datallowconn ? + "true" : "false"); + + if (databaseForm->datconnlimit >= 0) + { + appendStringInfo(&str, " CONNECTION LIMIT %d", databaseForm->datconnlimit); + } + + appendStringInfo(&str, " IS_TEMPLATE = '%s'", databaseForm->datistemplate ? "true" : + "false"); + + FreeDatabaseCollationInfo(collInfo); + + + return str.data; +} + + +/* + * GenerateCreateDatabaseCommandList is gets the pg_database tuples and returns the CREATE DATABASE statement list + * for all the databases in the cluster.citus_internal_database_command UDF is used to send the CREATE DATABASE + * statement to the workers since the CREATE DATABASE statement gives error in transaction context. + */ +List * +GenerateCreateDatabaseCommandList(void) +{ + List *commands = NIL; + HeapTuple tuple; + Relation pgDatabaseRel; + TableScanDesc scan; + + pgDatabaseRel = table_open(DatabaseRelationId, AccessShareLock); + scan = table_beginscan_catalog(pgDatabaseRel, 0, NULL); + + while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) + { + Form_pg_database databaseForm = (Form_pg_database) GETSTRUCT(tuple); + + char *createStmt = GenerateCreateDatabaseStatementFromPgDatabase(databaseForm); + + + StringInfo outerDbStmt; + outerDbStmt = makeStringInfo(); + + /* Generate the CREATE DATABASE statement */ + appendStringInfo(outerDbStmt, + "select pg_catalog.citus_internal_database_command( %s)", + quote_literal_cstr( + createStmt)); + + elog(LOG, "outerDbStmt: %s", outerDbStmt->data); + + /* Add the statement to the list of commands */ + commands = lappend(commands, outerDbStmt->data); + } + + heap_endscan(scan); + table_close(pgDatabaseRel, AccessShareLock); + + return commands; +} diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 40bdae0ea..54fa801ae 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -4501,6 +4501,13 @@ PropagateNodeWideObjectsCommandList(void) /* collect all commands */ List *ddlCommands = NIL; + if (EnableCreateDatabasePropagation) + { + /* Get commands for database creation */ + List *createDatabaseCommands = GenerateCreateDatabaseCommandList(); + ddlCommands = list_concat(ddlCommands, createDatabaseCommands); + } + if (EnableAlterRoleSetPropagation) { /* diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 06dda53eb..a4f890f9e 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -244,6 +244,7 @@ extern List * DropDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +extern List * GenerateCreateDatabaseCommandList(void); /* domain.c - forward declarations */ From 473f6cbf05b4f8e0e7cc8383a79d24e9e4c54ade Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 08:37:46 +0300 Subject: [PATCH 31/60] Fixes metadata sync --- src/backend/distributed/commands/database.c | 21 +- .../distributed/commands/utility_hook.c | 2 +- .../create_drop_database_propagation.out | 202 +++++++----- src/test/regress/expected/pg15.out | 296 +++++++++++++----- .../sql/create_drop_database_propagation.sql | 161 ++++++---- src/test/regress/sql/pg15.sql | 230 ++++++++++---- 6 files changed, 642 insertions(+), 270 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 443c0f366..e7a8299ae 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -545,6 +545,23 @@ FreeDatabaseCollationInfo(DatabaseCollationInfo collInfo) } + +static char *get_locale_provider_string(char datlocprovider) +{ + switch (datlocprovider) + { + case 'c': + return "libc"; + case 'i': + return "icu"; + case 'l': + return "locale"; + default: + return ""; + } +} + + /* * GenerateCreateDatabaseStatementFromPgDatabase is gets the pg_database tuple and returns the CREATE DATABASE statement */ @@ -552,8 +569,6 @@ static char * GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) { DatabaseCollationInfo collInfo = GetDatabaseCollation(databaseForm->oid); - elog(LOG, "collInfo: %s %s %s %s", collInfo.collation, collInfo.ctype, - collInfo.icu_locale, collInfo.collversion); StringInfoData str; initStringInfo(&str); @@ -590,7 +605,7 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) if (databaseForm->datlocprovider != 0) { - appendStringInfo(&str, " LOCALE_PROVIDER = '%c'", databaseForm->datlocprovider); + appendStringInfo(&str, " LOCALE_PROVIDER = '%s'", get_locale_provider_string(databaseForm->datlocprovider)); } if (collInfo.collversion != NULL) diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index fc763f369..0d400d139 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -1490,7 +1490,7 @@ DDLTaskList(Oid relationId, const char *commandString) List * NontransactionalNodeDDLTask(TargetWorkerSet targets, List *commands) { - List *ddlJobs = NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + List *ddlJobs = NodeDDLTaskList(targets, commands); DDLJob *ddlJob = NULL; foreach_ptr(ddlJob, ddlJobs) { diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index 088a445d6..78ce0025e 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -19,89 +19,143 @@ CREATE DATABASE mydatabase TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = true IS_TEMPLATE = false; -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datistemplate | datallowconn | datconnlimit | datcollate | datctype | datacl | database_owner | tablespace +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result --------------------------------------------------------------------- - mydatabase | 6 | f | t | 10 | C | C | | create_drop_db_test_user | create_drop_db_tablespace -(1 row) + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] +(3 rows) -\c - - - :worker_1_port -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datistemplate | datallowconn | datconnlimit | datcollate | datctype | datacl | database_owner | database_owner | tablespace ---------------------------------------------------------------------- - mydatabase | 6 | f | t | 10 | C | C | | create_drop_db_test_user | create_drop_db_test_user | create_drop_db_tablespace -(1 row) - -\c - - - :worker_2_port -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datistemplate | datallowconn | datconnlimit | datcollate | datctype | datacl | database_owner | database_owner | tablespace ---------------------------------------------------------------------- - mydatabase | 6 | f | t | 10 | C | C | | create_drop_db_test_user | create_drop_db_test_user | create_drop_db_tablespace -(1 row) - -\c - - - :master_port -set citus.enable_create_database_propagation=on; drop database mydatabase; -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datistemplate | datallowconn | datconnlimit | datcollate | datctype | datacl | database_owner | database_owner | tablespace +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result --------------------------------------------------------------------- -(0 rows) -\c - - - :worker_1_port -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datistemplate | datallowconn | datconnlimit | datcollate | datctype | datacl | database_owner | database_owner | tablespace + + +(3 rows) + +-- test database syncing after node addition +select citus_remove_node('localhost', :worker_2_port); + citus_remove_node --------------------------------------------------------------------- -(0 rows) -\c - - - :worker_2_port -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datistemplate | datallowconn | datconnlimit | datcollate | datctype | datacl | database_owner | database_owner | tablespace +(1 row) + +--test with is_template true and allow connections false +CREATE DATABASE mydatabase + WITH TEMPLATE = 'template0' + OWNER = create_drop_db_test_user + CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + LC_COLLATE = 'C' + LC_CTYPE = 'C' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result --------------------------------------------------------------------- -(0 rows) + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] +(2 rows) + +select citus_add_node('localhost', :worker_2_port); + citus_add_node +--------------------------------------------------------------------- + 30 +(1 row) + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] +(3 rows) + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database mydatabase; +NOTICE: issuing DROP DATABASE mydatabase +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing DROP DATABASE mydatabase +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + + + +(3 rows) -\c - - - :master_port --tests for special characters in database name set citus.enable_create_database_propagation=on; SET citus.log_remote_commands = true; diff --git a/src/test/regress/expected/pg15.out b/src/test/regress/expected/pg15.out index caee521a7..3ee5a15cf 100644 --- a/src/test/regress/expected/pg15.out +++ b/src/test/regress/expected/pg15.out @@ -1548,6 +1548,8 @@ CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace \c - - - :master_port create user create_drop_db_test_user; set citus.enable_create_database_propagation=on; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; CREATE DATABASE mydatabase WITH TEMPLATE = 'template0' OWNER = create_drop_db_test_user @@ -1564,87 +1566,231 @@ CREATE DATABASE mydatabase ALLOW_CONNECTIONS = true IS_TEMPLATE = false OID = 966345; -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datlocprovider | datistemplate | datallowconn | datconnlimit | datcollate | datctype | daticulocale | datcollversion | datacl | database_owner | database_owner | tablespace +NOTICE: issuing CREATE DATABASE mydatabase TEMPLATE template0 OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE '' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'und' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 +NOTICE: issuing CREATE DATABASE mydatabase TEMPLATE template0 OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE '' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'und' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result --------------------------------------------------------------------- - mydatabase | 6 | i | f | t | 10 | C | C | und | 1.0 | | create_drop_db_test_user | create_drop_db_test_user | create_drop_db_tablespace -(1 row) + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] +(3 rows) -\c - - - :worker_1_port -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datlocprovider | datistemplate | datallowconn | datconnlimit | datcollate | datctype | daticulocale | datcollversion | datacl | database_owner | database_owner | tablespace ---------------------------------------------------------------------- - mydatabase | 6 | i | f | t | 10 | C | C | und | 1.0 | | create_drop_db_test_user | create_drop_db_test_user | create_drop_db_tablespace -(1 row) - -\c - - - :worker_2_port -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datlocprovider | datistemplate | datallowconn | datconnlimit | datcollate | datctype | daticulocale | datcollversion | datacl | database_owner | database_owner | tablespace ---------------------------------------------------------------------- - mydatabase | 6 | i | f | t | 10 | C | C | und | 1.0 | | create_drop_db_test_user | create_drop_db_test_user | create_drop_db_tablespace -(1 row) - -\c - - - :master_port -set citus.enable_create_database_propagation=on; drop database mydatabase; -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datlocprovider | datistemplate | datallowconn | datconnlimit | datcollate | datctype | daticulocale | datcollversion | datacl | database_owner | database_owner | tablespace +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result --------------------------------------------------------------------- -(0 rows) -\c - - - :worker_1_port -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datlocprovider | datistemplate | datallowconn | datconnlimit | datcollate | datctype | daticulocale | datcollversion | datacl | database_owner | database_owner | tablespace ---------------------------------------------------------------------- -(0 rows) -\c - - - :worker_2_port -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - datname | encoding | datlocprovider | datistemplate | datallowconn | datconnlimit | datcollate | datctype | daticulocale | datcollversion | datacl | database_owner | database_owner | tablespace + +(3 rows) + +select citus_remove_node('localhost', :worker_2_port); + citus_remove_node --------------------------------------------------------------------- -(0 rows) + +(1 row) + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; +CREATE DATABASE mydatabase2 + WITH OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = 'en_US.utf8' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false + OID = 966345; +NOTICE: issuing CREATE DATABASE mydatabase2 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase2' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] +(2 rows) + +SET citus.log_remote_commands = true; +select citus_add_node('localhost', :worker_2_port); +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off';select pg_catalog.citus_internal_database_command( 'CREATE DATABASE postgres OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE regression OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE template1 OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''true''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE template0 OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''false'' IS_TEMPLATE = ''true''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE mydatabase2 OWNER = create_drop_db_test_user ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' COLLATION_VERSION = ''1.0'' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');ALTER ROLE ALL IN DATABASE regression SET lc_messages = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_monetary = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_numeric = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_time = 'C';ALTER ROLE ALL IN DATABASE regression SET bytea_output = 'hex';ALTER ROLE ALL IN DATABASE regression SET timezone_abbreviations = 'Default';SET citus.enable_ddl_propagation TO 'on' + citus_add_node +--------------------------------------------------------------------- + 30 +(1 row) + +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase2' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] +(3 rows) + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database mydatabase2; +NOTICE: issuing DROP DATABASE mydatabase2 +NOTICE: issuing DROP DATABASE mydatabase2 +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + + + +(3 rows) + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; +-- create a template database with all options set and allow connections false +CREATE DATABASE my_template_database + WITH TEMPLATE = 'template0' + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = 'en_US.utf8' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + ICU_LOCALE = 'en-US' + LOCALE_PROVIDER = 'icu' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = false + IS_TEMPLATE = true; +NOTICE: issuing CREATE DATABASE my_template_database TEMPLATE template0 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'en-US' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true +NOTICE: issuing CREATE DATABASE my_template_database TEMPLATE template0 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'en-US' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] +(3 rows) + +SET citus.log_remote_commands = true; +--template databases could not be dropped so we need to change the template flag +SELECT result from run_command_on_all_nodes( + $$ + UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database' + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + UPDATE 1 + UPDATE 1 + UPDATE 1 +(3 rows) + +; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; +NOTICE: issuing DROP DATABASE my_template_database +NOTICE: issuing DROP DATABASE my_template_database +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + + + +(3 rows) \c - - - :master_port drop tablespace create_drop_db_tablespace; @@ -1653,4 +1799,4 @@ drop tablespace create_drop_db_tablespace; \c - - - :worker_2_port drop tablespace create_drop_db_tablespace; \c - - - :master_port -drop user create_drop_db_test_user; \ No newline at end of file +drop user create_drop_db_test_user; diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index d21956595..6f9202a4b 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -16,6 +16,60 @@ create user create_drop_db_test_user; set citus.enable_create_database_propagation=on; + +CREATE DATABASE mydatabase + WITH OWNER = create_drop_db_test_user + TEMPLATE = 'template0' + ENCODING = 'UTF8' + CONNECTION LIMIT = 10 + LC_COLLATE = 'C' + LC_CTYPE = 'C' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false; + + + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + + +drop database mydatabase; + + + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + +-- test database syncing after node addition + +select citus_remove_node('localhost', :worker_2_port); + +--test with is_template true and allow connections false CREATE DATABASE mydatabase WITH TEMPLATE = 'template0' OWNER = create_drop_db_test_user @@ -24,75 +78,62 @@ CREATE DATABASE mydatabase LC_COLLATE = 'C' LC_CTYPE = 'C' TABLESPACE = create_drop_db_tablespace - ALLOW_CONNECTIONS = true + ALLOW_CONNECTIONS = false IS_TEMPLATE = false; -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; -\c - - - :worker_1_port +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; -\c - - - :worker_2_port +select citus_add_node('localhost', :worker_2_port); -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; -\c - - - :master_port -set citus.enable_create_database_propagation=on; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; drop database mydatabase; -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; +SET citus.log_remote_commands = false; -\c - - - :worker_1_port - -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - -\c - - - :worker_2_port - -SELECT pd.datname, pd.encoding, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.datacl, rolname AS database_owner, -pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - -\c - - - :master_port +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; --tests for special characters in database name set citus.enable_create_database_propagation=on; diff --git a/src/test/regress/sql/pg15.sql b/src/test/regress/sql/pg15.sql index f0c6706d2..ac523b521 100644 --- a/src/test/regress/sql/pg15.sql +++ b/src/test/regress/sql/pg15.sql @@ -995,6 +995,8 @@ CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace \c - - - :master_port create user create_drop_db_test_user; set citus.enable_create_database_propagation=on; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; CREATE DATABASE mydatabase WITH TEMPLATE = 'template0' OWNER = create_drop_db_test_user @@ -1012,70 +1014,184 @@ CREATE DATABASE mydatabase IS_TEMPLATE = false OID = 966345; -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - -\c - - - :worker_1_port -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; -\c - - - :worker_2_port -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; - - -\c - - - :master_port -set citus.enable_create_database_propagation=on; drop database mydatabase; -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; -\c - - - :worker_1_port -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; +select citus_remove_node('localhost', :worker_2_port); -\c - - - :worker_2_port -SELECT pd.datname, pd.encoding, pd.datlocprovider, -pd.datistemplate, pd.datallowconn, pd.datconnlimit, -pd.datcollate , pd. datctype , pd.daticulocale, pd.datcollversion, -pd.datacl, rolname AS database_owner, pa.rolname AS database_owner, pt.spcname AS tablespace -FROM pg_database pd -JOIN pg_authid pa ON pd.datdba = pa.oid -join pg_tablespace pt on pd.dattablespace = pt.oid -WHERE datname = 'mydatabase'; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; + +CREATE DATABASE mydatabase2 + WITH OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = 'en_US.utf8' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false + OID = 966345; + +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase2' + ) q2 + $$ +) ORDER BY result; + + +SET citus.log_remote_commands = true; +select citus_add_node('localhost', :worker_2_port); + +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase2' + ) q2 + $$ +) ORDER BY result; + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database mydatabase2; + +SET citus.log_remote_commands = false; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; + +-- create a template database with all options set and allow connections false +CREATE DATABASE my_template_database + WITH TEMPLATE = 'template0' + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = 'en_US.utf8' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + ICU_LOCALE = 'en-US' + LOCALE_PROVIDER = 'icu' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = false + IS_TEMPLATE = true; + +SET citus.log_remote_commands = false; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + + + +SET citus.log_remote_commands = true; + +--template databases could not be dropped so we need to change the template flag +SELECT result from run_command_on_all_nodes( + $$ + UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database' + $$ +) ORDER BY result; + +; + +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; + +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + \c - - - :master_port drop tablespace create_drop_db_tablespace; From a797584f1c2b53cf5d2254a3d446ad4bfd0286bd Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 09:07:19 +0300 Subject: [PATCH 32/60] Fixes tests --- src/backend/distributed/commands/database.c | 38 ++++++++++++------- .../create_drop_database_propagation.out | 6 +-- 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index e7a8299ae..4e9528ffe 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -545,20 +545,29 @@ FreeDatabaseCollationInfo(DatabaseCollationInfo collInfo) } - -static char *get_locale_provider_string(char datlocprovider) +static char * +get_locale_provider_string(char datlocprovider) { - switch (datlocprovider) - { - case 'c': - return "libc"; - case 'i': - return "icu"; - case 'l': - return "locale"; - default: - return ""; - } + switch (datlocprovider) + { + case 'c': + { + return "libc"; + } + + case 'i': + { + return "icu"; + } + + case 'l': + { + return "locale"; + } + + default: + return ""; + } } @@ -605,7 +614,8 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) if (databaseForm->datlocprovider != 0) { - appendStringInfo(&str, " LOCALE_PROVIDER = '%s'", get_locale_provider_string(databaseForm->datlocprovider)); + appendStringInfo(&str, " LOCALE_PROVIDER = '%s'", get_locale_provider_string( + databaseForm->datlocprovider)); } if (collInfo.collversion != NULL) diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index 78ce0025e..e05616a42 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -10,10 +10,10 @@ CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace create user create_drop_db_test_user; set citus.enable_create_database_propagation=on; CREATE DATABASE mydatabase - WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user - CONNECTION LIMIT = 10 + WITH OWNER = create_drop_db_test_user + TEMPLATE = 'template0' ENCODING = 'UTF8' + CONNECTION LIMIT = 10 LC_COLLATE = 'C' LC_CTYPE = 'C' TABLESPACE = create_drop_db_tablespace From 569f4db60c0c8044c2bc8af70bb7465d1a81b4e3 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 09:51:15 +0300 Subject: [PATCH 33/60] Fixes pg 14 compile warnings --- src/backend/distributed/commands/database.c | 22 +++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 4e9528ffe..cb32231f1 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -451,8 +451,10 @@ typedef struct DatabaseCollationInfo { char *collation; char *ctype; + #if PG_VERSION_NUM >= PG_VERSION_15 char *icu_locale; char *collversion; + #endif } DatabaseCollationInfo; /* @@ -462,17 +464,15 @@ typedef struct DatabaseCollationInfo static DatabaseCollationInfo GetDatabaseCollation(Oid db_oid) { - HeapTuple tup; DatabaseCollationInfo info; Datum collationDatum, ctypeDatum, icuLocaleDatum, collverDatum; bool isNull; - Relation rel; TupleDesc tupdesc; Snapshot snapshot; snapshot = RegisterSnapshot(GetLatestSnapshot()); - rel = table_open(DatabaseRelationId, AccessShareLock); - tup = get_catalog_object_by_oid(rel, Anum_pg_database_oid, db_oid); + Relation rel = table_open(DatabaseRelationId, AccessShareLock); + HeapTuple tup = get_catalog_object_by_oid(rel, Anum_pg_database_oid, db_oid); if (!HeapTupleIsValid(tup)) { elog(ERROR, "cache lookup failed for database %u", db_oid); @@ -499,6 +499,8 @@ GetDatabaseCollation(Oid db_oid) info.ctype = TextDatumGetCString(ctypeDatum); } + #if PG_VERSION_NUM >= PG_VERSION_15 + icuLocaleDatum = heap_getattr(tup, Anum_pg_database_daticulocale, tupdesc, &isNull); if (isNull) { @@ -518,6 +520,7 @@ GetDatabaseCollation(Oid db_oid) { info.collversion = TextDatumGetCString(collverDatum); } + #endif table_close(rel, AccessShareLock); UnregisterSnapshot(snapshot); @@ -607,6 +610,7 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) appendStringInfo(&str, " LC_CTYPE = '%s'", collInfo.ctype); } + #if PG_VERSION_NUM >= PG_VERSION_15 if (collInfo.icu_locale != NULL) { appendStringInfo(&str, " ICU_LOCALE = '%s'", collInfo.icu_locale); @@ -622,6 +626,7 @@ GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) { appendStringInfo(&str, " COLLATION_VERSION = '%s'", collInfo.collversion); } + #endif if (databaseForm->dattablespace != InvalidOid) { @@ -658,11 +663,9 @@ GenerateCreateDatabaseCommandList(void) { List *commands = NIL; HeapTuple tuple; - Relation pgDatabaseRel; - TableScanDesc scan; - pgDatabaseRel = table_open(DatabaseRelationId, AccessShareLock); - scan = table_beginscan_catalog(pgDatabaseRel, 0, NULL); + Relation pgDatabaseRel = table_open(DatabaseRelationId, AccessShareLock); + TableScanDesc scan = table_beginscan_catalog(pgDatabaseRel, 0, NULL); while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { @@ -671,8 +674,7 @@ GenerateCreateDatabaseCommandList(void) char *createStmt = GenerateCreateDatabaseStatementFromPgDatabase(databaseForm); - StringInfo outerDbStmt; - outerDbStmt = makeStringInfo(); + StringInfo outerDbStmt = makeStringInfo(); /* Generate the CREATE DATABASE statement */ appendStringInfo(outerDbStmt, From fe26aebf6cb0f50cc25f206d948f722fbbacf23a Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 09:52:58 +0300 Subject: [PATCH 34/60] Fixes useless declarations --- src/backend/distributed/commands/database.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index cb32231f1..33b8f6a51 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -467,10 +467,8 @@ GetDatabaseCollation(Oid db_oid) DatabaseCollationInfo info; Datum collationDatum, ctypeDatum, icuLocaleDatum, collverDatum; bool isNull; - TupleDesc tupdesc; - Snapshot snapshot; - snapshot = RegisterSnapshot(GetLatestSnapshot()); + Snapshot snapshot = RegisterSnapshot(GetLatestSnapshot()); Relation rel = table_open(DatabaseRelationId, AccessShareLock); HeapTuple tup = get_catalog_object_by_oid(rel, Anum_pg_database_oid, db_oid); if (!HeapTupleIsValid(tup)) @@ -478,7 +476,7 @@ GetDatabaseCollation(Oid db_oid) elog(ERROR, "cache lookup failed for database %u", db_oid); } - tupdesc = RelationGetDescr(rel); + TupleDesc tupdesc = RelationGetDescr(rel); collationDatum = heap_getattr(tup, Anum_pg_database_datcollate, tupdesc, &isNull); if (isNull) { From 2a6c40d6436a51189afbd55d007756bf3ee86c4c Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 10:10:42 +0300 Subject: [PATCH 35/60] Fixes flaky tests --- .../expected/create_drop_database_propagation.out | 12 ++++++------ src/test/regress/expected/pg15.out | 12 ++++++------ .../regress/sql/create_drop_database_propagation.sql | 4 ++-- src/test/regress/sql/pg15.sql | 4 ++-- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index e05616a42..9f98c740c 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -63,10 +63,10 @@ SELECT result from run_command_on_all_nodes( (3 rows) -- test database syncing after node addition -select citus_remove_node('localhost', :worker_2_port); - citus_remove_node +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) --test with is_template true and allow connections false @@ -100,10 +100,10 @@ SELECT result from run_command_on_all_nodes( [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] (2 rows) -select citus_add_node('localhost', :worker_2_port); - citus_add_node +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? --------------------------------------------------------------------- - 30 + 1 (1 row) SELECT result from run_command_on_all_nodes( diff --git a/src/test/regress/expected/pg15.out b/src/test/regress/expected/pg15.out index 3ee5a15cf..e58387551 100644 --- a/src/test/regress/expected/pg15.out +++ b/src/test/regress/expected/pg15.out @@ -1611,10 +1611,10 @@ SELECT result from run_command_on_all_nodes( (3 rows) -select citus_remove_node('localhost', :worker_2_port); - citus_remove_node +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? --------------------------------------------------------------------- - + 1 (1 row) SET citus.log_remote_commands = true; @@ -1654,11 +1654,11 @@ SELECT result from run_command_on_all_nodes( (2 rows) SET citus.log_remote_commands = true; -select citus_add_node('localhost', :worker_2_port); +select 1 from citus_add_node('localhost', :worker_2_port); NOTICE: issuing SET citus.enable_ddl_propagation TO 'off';select pg_catalog.citus_internal_database_command( 'CREATE DATABASE postgres OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE regression OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE template1 OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''true''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE template0 OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''false'' IS_TEMPLATE = ''true''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE mydatabase2 OWNER = create_drop_db_test_user ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' COLLATION_VERSION = ''1.0'' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');ALTER ROLE ALL IN DATABASE regression SET lc_messages = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_monetary = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_numeric = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_time = 'C';ALTER ROLE ALL IN DATABASE regression SET bytea_output = 'hex';ALTER ROLE ALL IN DATABASE regression SET timezone_abbreviations = 'Default';SET citus.enable_ddl_propagation TO 'on' - citus_add_node + ?column? --------------------------------------------------------------------- - 30 + 1 (1 row) SET citus.log_remote_commands = false; diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index 6f9202a4b..6b95fd83e 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -67,7 +67,7 @@ SELECT result from run_command_on_all_nodes( -- test database syncing after node addition -select citus_remove_node('localhost', :worker_2_port); +select 1 from citus_remove_node('localhost', :worker_2_port); --test with is_template true and allow connections false CREATE DATABASE mydatabase @@ -97,7 +97,7 @@ SELECT result from run_command_on_all_nodes( $$ ) ORDER BY result; -select citus_add_node('localhost', :worker_2_port); +select 1 from citus_add_node('localhost', :worker_2_port); SELECT result from run_command_on_all_nodes( $$ diff --git a/src/test/regress/sql/pg15.sql b/src/test/regress/sql/pg15.sql index ac523b521..845d5fab5 100644 --- a/src/test/regress/sql/pg15.sql +++ b/src/test/regress/sql/pg15.sql @@ -1048,7 +1048,7 @@ SELECT result from run_command_on_all_nodes( ) ORDER BY result; -select citus_remove_node('localhost', :worker_2_port); +select 1 from citus_remove_node('localhost', :worker_2_port); SET citus.log_remote_commands = true; @@ -1085,7 +1085,7 @@ SELECT result from run_command_on_all_nodes( SET citus.log_remote_commands = true; -select citus_add_node('localhost', :worker_2_port); +select 1 from citus_add_node('localhost', :worker_2_port); SET citus.log_remote_commands = false; SELECT result from run_command_on_all_nodes( From 8811d910a5472fddf2bd1e4f7f40f1e0ff9761b5 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 10:28:09 +0300 Subject: [PATCH 36/60] Fixes pg 14 compile errors --- src/backend/distributed/commands/database.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 33b8f6a51..c33afb77e 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -465,7 +465,6 @@ static DatabaseCollationInfo GetDatabaseCollation(Oid db_oid) { DatabaseCollationInfo info; - Datum collationDatum, ctypeDatum, icuLocaleDatum, collverDatum; bool isNull; Snapshot snapshot = RegisterSnapshot(GetLatestSnapshot()); @@ -477,7 +476,7 @@ GetDatabaseCollation(Oid db_oid) } TupleDesc tupdesc = RelationGetDescr(rel); - collationDatum = heap_getattr(tup, Anum_pg_database_datcollate, tupdesc, &isNull); + Datum collationDatum = heap_getattr(tup, Anum_pg_database_datcollate, tupdesc, &isNull); if (isNull) { info.collation = NULL; @@ -487,7 +486,7 @@ GetDatabaseCollation(Oid db_oid) info.collation = TextDatumGetCString(collationDatum); } - ctypeDatum = heap_getattr(tup, Anum_pg_database_datctype, tupdesc, &isNull); + Datum ctypeDatum = heap_getattr(tup, Anum_pg_database_datctype, tupdesc, &isNull); if (isNull) { info.ctype = NULL; @@ -499,7 +498,7 @@ GetDatabaseCollation(Oid db_oid) #if PG_VERSION_NUM >= PG_VERSION_15 - icuLocaleDatum = heap_getattr(tup, Anum_pg_database_daticulocale, tupdesc, &isNull); + Datum icuLocaleDatum = heap_getattr(tup, Anum_pg_database_daticulocale, tupdesc, &isNull); if (isNull) { info.icu_locale = NULL; @@ -509,7 +508,7 @@ GetDatabaseCollation(Oid db_oid) info.icu_locale = TextDatumGetCString(icuLocaleDatum); } - collverDatum = heap_getattr(tup, Anum_pg_database_datcollversion, tupdesc, &isNull); + Datum collverDatum = heap_getattr(tup, Anum_pg_database_datcollversion, tupdesc, &isNull); if (isNull) { info.collversion = NULL; @@ -539,13 +538,15 @@ FreeDatabaseCollationInfo(DatabaseCollationInfo collInfo) { pfree(collInfo.ctype); } + #if PG_VERSION_NUM >= PG_VERSION_15 if (collInfo.icu_locale != NULL) { pfree(collInfo.icu_locale); } + #endif } - +#if PG_VERSION_NUM >= PG_VERSION_15 static char * get_locale_provider_string(char datlocprovider) { @@ -570,6 +571,7 @@ get_locale_provider_string(char datlocprovider) return ""; } } +#endif /* From b41feadd97d3c7375a17bbe0b56dd43a8ae8dd86 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 10:39:16 +0300 Subject: [PATCH 37/60] Fixes indent --- src/backend/distributed/commands/database.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index c33afb77e..42ef2ed43 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -476,7 +476,8 @@ GetDatabaseCollation(Oid db_oid) } TupleDesc tupdesc = RelationGetDescr(rel); - Datum collationDatum = heap_getattr(tup, Anum_pg_database_datcollate, tupdesc, &isNull); + Datum collationDatum = heap_getattr(tup, Anum_pg_database_datcollate, tupdesc, + &isNull); if (isNull) { info.collation = NULL; @@ -498,7 +499,8 @@ GetDatabaseCollation(Oid db_oid) #if PG_VERSION_NUM >= PG_VERSION_15 - Datum icuLocaleDatum = heap_getattr(tup, Anum_pg_database_daticulocale, tupdesc, &isNull); + Datum icuLocaleDatum = heap_getattr(tup, Anum_pg_database_daticulocale, tupdesc, + &isNull); if (isNull) { info.icu_locale = NULL; @@ -508,7 +510,8 @@ GetDatabaseCollation(Oid db_oid) info.icu_locale = TextDatumGetCString(icuLocaleDatum); } - Datum collverDatum = heap_getattr(tup, Anum_pg_database_datcollversion, tupdesc, &isNull); + Datum collverDatum = heap_getattr(tup, Anum_pg_database_datcollversion, tupdesc, + &isNull); if (isNull) { info.collversion = NULL; @@ -546,6 +549,7 @@ FreeDatabaseCollationInfo(DatabaseCollationInfo collInfo) #endif } + #if PG_VERSION_NUM >= PG_VERSION_15 static char * get_locale_provider_string(char datlocprovider) @@ -571,6 +575,8 @@ get_locale_provider_string(char datlocprovider) return ""; } } + + #endif From b28210c04fe5445807f9d5eac9b8fd9566fbc343 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 11:03:05 +0300 Subject: [PATCH 38/60] Adds shard selects to debug --- src/test/regress/sql/pg15.sql | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/test/regress/sql/pg15.sql b/src/test/regress/sql/pg15.sql index 845d5fab5..109a2da41 100644 --- a/src/test/regress/sql/pg15.sql +++ b/src/test/regress/sql/pg15.sql @@ -1047,8 +1047,11 @@ SELECT result from run_command_on_all_nodes( $$ ) ORDER BY result; +select * from pg_dist_partition; +select * from pg_dist_placement; +select * from pg_dist_shard; -select 1 from citus_remove_node('localhost', :worker_2_port); +select citus_remove_node('localhost', :worker_2_port); SET citus.log_remote_commands = true; From 3cb5cab8cd714a3ab642c8f44d644cfff777e193 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 14:01:23 +0300 Subject: [PATCH 39/60] Fixes test problem --- src/test/regress/multi_1_schedule | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 6cd6225ef..2f664409a 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -33,6 +33,8 @@ test: ref_citus_local_fkeys test: alter_database_owner test: distributed_triggers test: create_single_shard_table +test: create_drop_database_propagation + # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential test: single_shard_table_udfs test: schema_based_sharding @@ -52,7 +54,6 @@ test: multi_read_from_secondaries test: grant_on_database_propagation test: alter_database_propagation -test: create_drop_database_propagation test: citus_shards From b4dec6eee8ebcfe53add2fa9b014a6888c6b128b Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 14:15:43 +0300 Subject: [PATCH 40/60] Fixes test --- src/test/regress/sql/pg15.sql | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/test/regress/sql/pg15.sql b/src/test/regress/sql/pg15.sql index 109a2da41..57ecae410 100644 --- a/src/test/regress/sql/pg15.sql +++ b/src/test/regress/sql/pg15.sql @@ -1047,10 +1047,6 @@ SELECT result from run_command_on_all_nodes( $$ ) ORDER BY result; -select * from pg_dist_partition; -select * from pg_dist_placement; -select * from pg_dist_shard; - select citus_remove_node('localhost', :worker_2_port); From 66b433310c7f090d3af8e5b13570deafd8bf6ece Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 14:46:41 +0300 Subject: [PATCH 41/60] Moves pg15 tests to fix errors --- src/test/regress/multi_schedule | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 65a272566..56d61e295 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -1,5 +1,6 @@ test: multi_test_helpers multi_test_helpers_superuser test: multi_cluster_management +test: pg15 test: create_role_propagation test: multi_create_fdw test: multi_test_catalog_views @@ -63,7 +64,6 @@ test: cte_inline recursive_view_local_table values sequences_with_different_type test: pg13 pg12 # run pg14 sequentially as it syncs metadata test: pg14 -test: pg15 test: pg15_jsonpath detect_conn_close test: pg16 test: drop_column_partitioned_table From 93dab802069ebe879dbac4a582b65cb2716b531c Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 15:33:55 +0300 Subject: [PATCH 42/60] Moves pg15 tests --- src/test/regress/multi_1_schedule | 1 + src/test/regress/sql/pg15.sql | 227 ----------------- .../pg15_create_drop_database_propagation.sql | 235 ++++++++++++++++++ 3 files changed, 236 insertions(+), 227 deletions(-) create mode 100644 src/test/regress/sql/pg15_create_drop_database_propagation.sql diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 2f664409a..766a1bc73 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -34,6 +34,7 @@ test: alter_database_owner test: distributed_triggers test: create_single_shard_table test: create_drop_database_propagation +test: pg15_create_drop_database_propagation # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential test: single_shard_table_udfs diff --git a/src/test/regress/sql/pg15.sql b/src/test/regress/sql/pg15.sql index 57ecae410..fe60222dd 100644 --- a/src/test/regress/sql/pg15.sql +++ b/src/test/regress/sql/pg15.sql @@ -976,230 +976,3 @@ SET client_min_messages TO ERROR; DROP SCHEMA pg15 CASCADE; DROP ROLE rls_tenant_1; DROP ROLE rls_tenant_2; - - --- create/drop database for pg > 15 - - -\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3' -CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; - -\c - - - :worker_1_port -\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts4' -CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; - -\c - - - :worker_2_port -\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts5' -CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; - -\c - - - :master_port -create user create_drop_db_test_user; -set citus.enable_create_database_propagation=on; -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%CREATE DATABASE%'; -CREATE DATABASE mydatabase - WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user - CONNECTION LIMIT = 10 - ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = '' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - ICU_LOCALE = 'und' - LOCALE_PROVIDER = 'icu' - COLLATION_VERSION = '1.0' - TABLESPACE = create_drop_db_tablespace - ALLOW_CONNECTIONS = true - IS_TEMPLATE = false - OID = 966345; - -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'mydatabase' - ) q2 - $$ -) ORDER BY result; - - -drop database mydatabase; - -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'mydatabase' - ) q2 - $$ -) ORDER BY result; - -select citus_remove_node('localhost', :worker_2_port); - - -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%CREATE DATABASE%'; - -CREATE DATABASE mydatabase2 - WITH OWNER = create_drop_db_test_user - ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = 'en_US.utf8' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - COLLATION_VERSION = '1.0' - TABLESPACE = create_drop_db_tablespace - ALLOW_CONNECTIONS = true - IS_TEMPLATE = false - OID = 966345; - -SET citus.log_remote_commands = false; -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'mydatabase2' - ) q2 - $$ -) ORDER BY result; - - -SET citus.log_remote_commands = true; -select 1 from citus_add_node('localhost', :worker_2_port); - -SET citus.log_remote_commands = false; -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'mydatabase2' - ) q2 - $$ -) ORDER BY result; - -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%DROP DATABASE%'; -drop database mydatabase2; - -SET citus.log_remote_commands = false; - -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'mydatabase' - ) q2 - $$ -) ORDER BY result; - -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%CREATE DATABASE%'; - --- create a template database with all options set and allow connections false -CREATE DATABASE my_template_database - WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user - ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = 'en_US.utf8' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - ICU_LOCALE = 'en-US' - LOCALE_PROVIDER = 'icu' - COLLATION_VERSION = '1.0' - TABLESPACE = create_drop_db_tablespace - ALLOW_CONNECTIONS = false - IS_TEMPLATE = true; - -SET citus.log_remote_commands = false; - -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'my_template_database' - ) q2 - $$ -) ORDER BY result; - - - -SET citus.log_remote_commands = true; - ---template databases could not be dropped so we need to change the template flag -SELECT result from run_command_on_all_nodes( - $$ - UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database' - $$ -) ORDER BY result; - -; - -set citus.grep_remote_commands = '%DROP DATABASE%'; -drop database my_template_database; - -SET citus.log_remote_commands = false; -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'my_template_database' - ) q2 - $$ -) ORDER BY result; - - -\c - - - :master_port -drop tablespace create_drop_db_tablespace; - -\c - - - :worker_1_port -drop tablespace create_drop_db_tablespace; - -\c - - - :worker_2_port -drop tablespace create_drop_db_tablespace; - -\c - - - :master_port -drop user create_drop_db_test_user; diff --git a/src/test/regress/sql/pg15_create_drop_database_propagation.sql b/src/test/regress/sql/pg15_create_drop_database_propagation.sql new file mode 100644 index 000000000..4fce5f9db --- /dev/null +++ b/src/test/regress/sql/pg15_create_drop_database_propagation.sql @@ -0,0 +1,235 @@ +-- +-- PG15 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q + +-- create/drop database for pg > 15 + + +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; + +\c - - - :worker_1_port +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts4' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; + +\c - - - :worker_2_port +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts5' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; + +\c - - - :master_port +create user create_drop_db_test_user; +set citus.enable_create_database_propagation=on; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; +CREATE DATABASE mydatabase + WITH TEMPLATE = 'template0' + OWNER = create_drop_db_test_user + CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = '' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + ICU_LOCALE = 'und' + LOCALE_PROVIDER = 'icu' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false + OID = 966345; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + + +drop database mydatabase; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + +select citus_remove_node('localhost', :worker_2_port); + + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; + +CREATE DATABASE mydatabase2 + WITH OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = 'en_US.utf8' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false + OID = 966345; + +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase2' + ) q2 + $$ +) ORDER BY result; + + +SET citus.log_remote_commands = true; +select 1 from citus_add_node('localhost', :worker_2_port); + +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase2' + ) q2 + $$ +) ORDER BY result; + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database mydatabase2; + +SET citus.log_remote_commands = false; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; + +-- create a template database with all options set and allow connections false +CREATE DATABASE my_template_database + WITH TEMPLATE = 'template0' + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = 'en_US.utf8' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + ICU_LOCALE = 'en-US' + LOCALE_PROVIDER = 'icu' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = false + IS_TEMPLATE = true; + +SET citus.log_remote_commands = false; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + + + +SET citus.log_remote_commands = true; + +--template databases could not be dropped so we need to change the template flag +SELECT result from run_command_on_all_nodes( + $$ + UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database' + $$ +) ORDER BY result; + +; + +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; + +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + + +\c - - - :master_port +drop tablespace create_drop_db_tablespace; + +\c - - - :worker_1_port +drop tablespace create_drop_db_tablespace; + +\c - - - :worker_2_port +drop tablespace create_drop_db_tablespace; + +\c - - - :master_port +drop user create_drop_db_test_user; From d323ab8c51ce74de3f685dbcf000ada06707208d Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 15:44:22 +0300 Subject: [PATCH 43/60] Fixes pg15 sql --- src/test/regress/multi_schedule | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 56d61e295..65a272566 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -1,6 +1,5 @@ test: multi_test_helpers multi_test_helpers_superuser test: multi_cluster_management -test: pg15 test: create_role_propagation test: multi_create_fdw test: multi_test_catalog_views @@ -64,6 +63,7 @@ test: cte_inline recursive_view_local_table values sequences_with_different_type test: pg13 pg12 # run pg14 sequentially as it syncs metadata test: pg14 +test: pg15 test: pg15_jsonpath detect_conn_close test: pg16 test: drop_column_partitioned_table From 736894472e653bf60d3afab919defa2346fb44de Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 16:02:09 +0300 Subject: [PATCH 44/60] Fixes pg15 tests --- src/test/regress/expected/pg15.out | 264 ---------------- .../pg15_create_drop_database_propagation.out | 284 ++++++++++++++++++ .../pg15_create_drop_database_propagation.sql | 1 + 3 files changed, 285 insertions(+), 264 deletions(-) create mode 100644 src/test/regress/expected/pg15_create_drop_database_propagation.out diff --git a/src/test/regress/expected/pg15.out b/src/test/regress/expected/pg15.out index e58387551..fcbb0cd12 100644 --- a/src/test/regress/expected/pg15.out +++ b/src/test/regress/expected/pg15.out @@ -1536,267 +1536,3 @@ SET client_min_messages TO ERROR; DROP SCHEMA pg15 CASCADE; DROP ROLE rls_tenant_1; DROP ROLE rls_tenant_2; --- create/drop database for pg > 15 -\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3' -CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; -\c - - - :worker_1_port -\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts4' -CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; -\c - - - :worker_2_port -\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts5' -CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; -\c - - - :master_port -create user create_drop_db_test_user; -set citus.enable_create_database_propagation=on; -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%CREATE DATABASE%'; -CREATE DATABASE mydatabase - WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user - CONNECTION LIMIT = 10 - ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = '' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - ICU_LOCALE = 'und' - LOCALE_PROVIDER = 'icu' - COLLATION_VERSION = '1.0' - TABLESPACE = create_drop_db_tablespace - ALLOW_CONNECTIONS = true - IS_TEMPLATE = false - OID = 966345; -NOTICE: issuing CREATE DATABASE mydatabase TEMPLATE template0 OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE '' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'und' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 -NOTICE: issuing CREATE DATABASE mydatabase TEMPLATE template0 OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE '' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'und' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'mydatabase' - ) q2 - $$ -) ORDER BY result; - result ---------------------------------------------------------------------- - [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] -(3 rows) - -drop database mydatabase; -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'mydatabase' - ) q2 - $$ -) ORDER BY result; - result ---------------------------------------------------------------------- - - - -(3 rows) - -select 1 from citus_remove_node('localhost', :worker_2_port); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%CREATE DATABASE%'; -CREATE DATABASE mydatabase2 - WITH OWNER = create_drop_db_test_user - ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = 'en_US.utf8' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - COLLATION_VERSION = '1.0' - TABLESPACE = create_drop_db_tablespace - ALLOW_CONNECTIONS = true - IS_TEMPLATE = false - OID = 966345; -NOTICE: issuing CREATE DATABASE mydatabase2 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 -SET citus.log_remote_commands = false; -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'mydatabase2' - ) q2 - $$ -) ORDER BY result; - result ---------------------------------------------------------------------- - [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] -(2 rows) - -SET citus.log_remote_commands = true; -select 1 from citus_add_node('localhost', :worker_2_port); -NOTICE: issuing SET citus.enable_ddl_propagation TO 'off';select pg_catalog.citus_internal_database_command( 'CREATE DATABASE postgres OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE regression OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE template1 OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''true''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE template0 OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''false'' IS_TEMPLATE = ''true''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE mydatabase2 OWNER = create_drop_db_test_user ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' COLLATION_VERSION = ''1.0'' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');ALTER ROLE ALL IN DATABASE regression SET lc_messages = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_monetary = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_numeric = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_time = 'C';ALTER ROLE ALL IN DATABASE regression SET bytea_output = 'hex';ALTER ROLE ALL IN DATABASE regression SET timezone_abbreviations = 'Default';SET citus.enable_ddl_propagation TO 'on' - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SET citus.log_remote_commands = false; -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'mydatabase2' - ) q2 - $$ -) ORDER BY result; - result ---------------------------------------------------------------------- - [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] -(3 rows) - -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%DROP DATABASE%'; -drop database mydatabase2; -NOTICE: issuing DROP DATABASE mydatabase2 -NOTICE: issuing DROP DATABASE mydatabase2 -SET citus.log_remote_commands = false; -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'mydatabase' - ) q2 - $$ -) ORDER BY result; - result ---------------------------------------------------------------------- - - - -(3 rows) - -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%CREATE DATABASE%'; --- create a template database with all options set and allow connections false -CREATE DATABASE my_template_database - WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user - ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = 'en_US.utf8' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - ICU_LOCALE = 'en-US' - LOCALE_PROVIDER = 'icu' - COLLATION_VERSION = '1.0' - TABLESPACE = create_drop_db_tablespace - ALLOW_CONNECTIONS = false - IS_TEMPLATE = true; -NOTICE: issuing CREATE DATABASE my_template_database TEMPLATE template0 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'en-US' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true -NOTICE: issuing CREATE DATABASE my_template_database TEMPLATE template0 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'en-US' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true -SET citus.log_remote_commands = false; -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'my_template_database' - ) q2 - $$ -) ORDER BY result; - result ---------------------------------------------------------------------- - [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] -(3 rows) - -SET citus.log_remote_commands = true; ---template databases could not be dropped so we need to change the template flag -SELECT result from run_command_on_all_nodes( - $$ - UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database' - $$ -) ORDER BY result; - result ---------------------------------------------------------------------- - UPDATE 1 - UPDATE 1 - UPDATE 1 -(3 rows) - -; -set citus.grep_remote_commands = '%DROP DATABASE%'; -drop database my_template_database; -NOTICE: issuing DROP DATABASE my_template_database -NOTICE: issuing DROP DATABASE my_template_database -SET citus.log_remote_commands = false; -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'my_template_database' - ) q2 - $$ -) ORDER BY result; - result ---------------------------------------------------------------------- - - - -(3 rows) - -\c - - - :master_port -drop tablespace create_drop_db_tablespace; -\c - - - :worker_1_port -drop tablespace create_drop_db_tablespace; -\c - - - :worker_2_port -drop tablespace create_drop_db_tablespace; -\c - - - :master_port -drop user create_drop_db_test_user; diff --git a/src/test/regress/expected/pg15_create_drop_database_propagation.out b/src/test/regress/expected/pg15_create_drop_database_propagation.out new file mode 100644 index 000000000..4c8731421 --- /dev/null +++ b/src/test/regress/expected/pg15_create_drop_database_propagation.out @@ -0,0 +1,284 @@ +-- +-- PG15 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q +\endif +-- create/drop database for pg > 15 +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; +\c - - - :worker_1_port +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts4' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; +\c - - - :worker_2_port +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts5' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; +\c - - - :master_port +create user create_drop_db_test_user; +set citus.enable_create_database_propagation=on; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; +CREATE DATABASE mydatabase + WITH TEMPLATE = 'template0' + OWNER = create_drop_db_test_user + CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = '' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + ICU_LOCALE = 'und' + LOCALE_PROVIDER = 'icu' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false + OID = 966345; +NOTICE: issuing CREATE DATABASE mydatabase TEMPLATE template0 OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE '' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'und' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing CREATE DATABASE mydatabase TEMPLATE template0 OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE '' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'und' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] +(3 rows) + +drop database mydatabase; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + + + +(3 rows) + +select citus_remove_node('localhost', :worker_2_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; +CREATE DATABASE mydatabase2 + WITH OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = 'en_US.utf8' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false + OID = 966345; +NOTICE: issuing CREATE DATABASE mydatabase2 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase2' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] +(2 rows) + +SET citus.log_remote_commands = true; +select 1 from citus_add_node('localhost', :worker_2_port); +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off';select pg_catalog.citus_internal_database_command( 'CREATE DATABASE postgres OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE regression OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE template1 OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''true''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE template0 OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''false'' IS_TEMPLATE = ''true''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE mydatabase2 OWNER = create_drop_db_test_user ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' COLLATION_VERSION = ''1.0'' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');ALTER ROLE ALL IN DATABASE regression SET lc_messages = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_monetary = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_numeric = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_time = 'C';ALTER ROLE ALL IN DATABASE regression SET bytea_output = 'hex';ALTER ROLE ALL IN DATABASE regression SET timezone_abbreviations = 'Default';SET citus.enable_ddl_propagation TO 'on' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase2' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] +(3 rows) + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database mydatabase2; +NOTICE: issuing DROP DATABASE mydatabase2 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing DROP DATABASE mydatabase2 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'mydatabase' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + + + +(3 rows) + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; +-- create a template database with all options set and allow connections false +CREATE DATABASE my_template_database + WITH TEMPLATE = 'template0' + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = 'en_US.utf8' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + ICU_LOCALE = 'en-US' + LOCALE_PROVIDER = 'icu' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = false + IS_TEMPLATE = true; +NOTICE: issuing CREATE DATABASE my_template_database TEMPLATE template0 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'en-US' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing CREATE DATABASE my_template_database TEMPLATE template0 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'en-US' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] +(3 rows) + +SET citus.log_remote_commands = true; +--template databases could not be dropped so we need to change the template flag +SELECT result from run_command_on_all_nodes( + $$ + UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database' + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + UPDATE 1 + UPDATE 1 + UPDATE 1 +(3 rows) + +; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; +NOTICE: issuing DROP DATABASE my_template_database +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing DROP DATABASE my_template_database +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + + + +(3 rows) + +\c - - - :master_port +drop tablespace create_drop_db_tablespace; +\c - - - :worker_1_port +drop tablespace create_drop_db_tablespace; +\c - - - :worker_2_port +drop tablespace create_drop_db_tablespace; +\c - - - :master_port +drop user create_drop_db_test_user; diff --git a/src/test/regress/sql/pg15_create_drop_database_propagation.sql b/src/test/regress/sql/pg15_create_drop_database_propagation.sql index 4fce5f9db..3c7ac417e 100644 --- a/src/test/regress/sql/pg15_create_drop_database_propagation.sql +++ b/src/test/regress/sql/pg15_create_drop_database_propagation.sql @@ -7,6 +7,7 @@ SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 \if :server_version_ge_15 \else \q +\endif -- create/drop database for pg > 15 From a0da426a78d876e58dcbbb085b0e2ad01fa2ad37 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 16:51:31 +0300 Subject: [PATCH 45/60] Fixes tests --- .../create_drop_database_propagation.out | 109 ++++++++++++++++++ .../pg15_create_drop_database_propagation.out | 53 ++++++++- .../sql/create_drop_database_propagation.sql | 88 +++++++++++++- .../pg15_create_drop_database_propagation.sql | 37 +++++- 4 files changed, 279 insertions(+), 8 deletions(-) diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index 9f98c740c..add4ddac7 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -1,3 +1,6 @@ +-- test for create/drop database propagation +-- This test is only executes for Postgres 14 +-- For postgres 15 tests, pg15_create_drop_database_propagation.sql is used \set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3' CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; \c - - - :worker_1_port @@ -154,6 +157,112 @@ SELECT result from run_command_on_all_nodes( +(3 rows) + +-- create a template database with all options set and allow connections false +CREATE DATABASE my_template_database + WITH TEMPLATE = 'template0' + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = 'en_US.utf8' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + ICU_LOCALE = 'en-US' + LOCALE_PROVIDER = 'icu' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = false + IS_TEMPLATE = true; +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] +(3 rows) + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; +ERROR: cannot drop a template database +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] +(3 rows) + +SET citus.log_remote_commands = true; +--template databases could not be dropped so we need to change the template flag +SELECT result from run_command_on_all_nodes( + $$ + UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database' + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + UPDATE 1 + UPDATE 1 + UPDATE 1 +(3 rows) + +; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; +NOTICE: issuing DROP DATABASE my_template_database +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing DROP DATABASE my_template_database +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + + + (3 rows) --tests for special characters in database name diff --git a/src/test/regress/expected/pg15_create_drop_database_propagation.out b/src/test/regress/expected/pg15_create_drop_database_propagation.out index 4c8731421..e1073f980 100644 --- a/src/test/regress/expected/pg15_create_drop_database_propagation.out +++ b/src/test/regress/expected/pg15_create_drop_database_propagation.out @@ -42,6 +42,7 @@ NOTICE: issuing CREATE DATABASE mydatabase TEMPLATE template0 OWNER create_drop DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing CREATE DATABASE mydatabase TEMPLATE template0 OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE '' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'und' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SET citus.log_remote_commands = false; SELECT result from run_command_on_all_nodes( $$ SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( @@ -63,7 +64,14 @@ SELECT result from run_command_on_all_nodes( [{"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": 10, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] (3 rows) +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; drop database mydatabase; +NOTICE: issuing DROP DATABASE mydatabase +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing DROP DATABASE mydatabase +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SET citus.log_remote_commands = false; SELECT result from run_command_on_all_nodes( $$ SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( @@ -128,16 +136,12 @@ SELECT result from run_command_on_all_nodes( [{"datacl": null, "datname": "mydatabase2", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": true, "datconnlimit": -1, "datistemplate": false, "database_owner": "create_drop_db_test_user"}] (2 rows) -SET citus.log_remote_commands = true; select 1 from citus_add_node('localhost', :worker_2_port); -NOTICE: issuing SET citus.enable_ddl_propagation TO 'off';select pg_catalog.citus_internal_database_command( 'CREATE DATABASE postgres OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE regression OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE template1 OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''true''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE template0 OWNER = postgres ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' TABLESPACE = pg_default ALLOW_CONNECTIONS = ''false'' IS_TEMPLATE = ''true''');select pg_catalog.citus_internal_database_command( 'CREATE DATABASE mydatabase2 OWNER = create_drop_db_test_user ENCODING = ''UTF8'' LC_COLLATE = ''C'' LC_CTYPE = ''C'' LOCALE_PROVIDER = ''libc'' COLLATION_VERSION = ''1.0'' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = ''true'' IS_TEMPLATE = ''false''');ALTER ROLE ALL IN DATABASE regression SET lc_messages = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_monetary = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_numeric = 'C';ALTER ROLE ALL IN DATABASE regression SET lc_time = 'C';ALTER ROLE ALL IN DATABASE regression SET bytea_output = 'hex';ALTER ROLE ALL IN DATABASE regression SET timezone_abbreviations = 'Default';SET citus.enable_ddl_propagation TO 'on' -DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx ?column? --------------------------------------------------------------------- 1 (1 row) -SET citus.log_remote_commands = false; SELECT result from run_command_on_all_nodes( $$ SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( @@ -231,6 +235,32 @@ SELECT result from run_command_on_all_nodes( [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] (3 rows) +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; +ERROR: cannot drop a template database +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] +(3 rows) + SET citus.log_remote_commands = true; --template databases could not be dropped so we need to change the template flag SELECT result from run_command_on_all_nodes( @@ -274,6 +304,21 @@ SELECT result from run_command_on_all_nodes( (3 rows) +--tests for special characters in database name +set citus.enable_create_database_propagation=on; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; +create database "mydatabase#1'2"; +NOTICE: issuing CREATE DATABASE "mydatabase#1'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing CREATE DATABASE "mydatabase#1'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database if exists "mydatabase#1'2"; +NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx \c - - - :master_port drop tablespace create_drop_db_tablespace; \c - - - :worker_1_port diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index 6b95fd83e..d7174808e 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -1,5 +1,7 @@ - +-- test for create/drop database propagation +-- This test is only executes for Postgres 14 +-- For postgres 15 tests, pg15_create_drop_database_propagation.sql is used \set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3' CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; @@ -135,6 +137,90 @@ SELECT result from run_command_on_all_nodes( $$ ) ORDER BY result; +-- create a template database with all options set and allow connections false +CREATE DATABASE my_template_database + WITH TEMPLATE = 'template0' + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + STRATEGY = 'wal_log' + LOCALE = 'en_US.utf8' + LC_COLLATE = 'POSIX' + LC_CTYPE = 'POSIX' + ICU_LOCALE = 'en-US' + LOCALE_PROVIDER = 'icu' + COLLATION_VERSION = '1.0' + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = false + IS_TEMPLATE = true; + +SET citus.log_remote_commands = false; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; + +SET citus.log_remote_commands = false; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + +SET citus.log_remote_commands = true; + +--template databases could not be dropped so we need to change the template flag +SELECT result from run_command_on_all_nodes( + $$ + UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database' + $$ +) ORDER BY result; + +; + +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; + +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; + --tests for special characters in database name set citus.enable_create_database_propagation=on; SET citus.log_remote_commands = true; diff --git a/src/test/regress/sql/pg15_create_drop_database_propagation.sql b/src/test/regress/sql/pg15_create_drop_database_propagation.sql index 3c7ac417e..3a8e80ebf 100644 --- a/src/test/regress/sql/pg15_create_drop_database_propagation.sql +++ b/src/test/regress/sql/pg15_create_drop_database_propagation.sql @@ -45,6 +45,8 @@ CREATE DATABASE mydatabase IS_TEMPLATE = false OID = 966345; +SET citus.log_remote_commands = false; + SELECT result from run_command_on_all_nodes( $$ SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( @@ -60,9 +62,11 @@ SELECT result from run_command_on_all_nodes( $$ ) ORDER BY result; - +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; drop database mydatabase; +SET citus.log_remote_commands = false; SELECT result from run_command_on_all_nodes( $$ SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( @@ -114,10 +118,8 @@ SELECT result from run_command_on_all_nodes( ) ORDER BY result; -SET citus.log_remote_commands = true; select 1 from citus_add_node('localhost', :worker_2_port); -SET citus.log_remote_commands = false; SELECT result from run_command_on_all_nodes( $$ SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( @@ -190,7 +192,26 @@ SELECT result from run_command_on_all_nodes( $$ ) ORDER BY result; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; +SET citus.log_remote_commands = false; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, + pd.datistemplate, pd.datallowconn, pd.datconnlimit, + pd.datcollate , pd. datctype , pd.datacl, + pa.rolname AS database_owner, pt.spcname AS tablespace + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + join pg_tablespace pt on pd.dattablespace = pt.oid + WHERE datname = 'my_template_database' + ) q2 + $$ +) ORDER BY result; SET citus.log_remote_commands = true; @@ -223,6 +244,16 @@ SELECT result from run_command_on_all_nodes( ) ORDER BY result; +--tests for special characters in database name +set citus.enable_create_database_propagation=on; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE DATABASE%'; + +create database "mydatabase#1'2"; + +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database if exists "mydatabase#1'2"; + \c - - - :master_port drop tablespace create_drop_db_tablespace; From 8519e7d7fafdfb278789a8132807d7c9c54d4242 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 16:56:25 +0300 Subject: [PATCH 46/60] Adds a dummy file for pg14 --- .../expected/pg15_create_drop_database_propagation_0.out | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 src/test/regress/expected/pg15_create_drop_database_propagation_0.out diff --git a/src/test/regress/expected/pg15_create_drop_database_propagation_0.out b/src/test/regress/expected/pg15_create_drop_database_propagation_0.out new file mode 100644 index 000000000..b1ed9cc5b --- /dev/null +++ b/src/test/regress/expected/pg15_create_drop_database_propagation_0.out @@ -0,0 +1,9 @@ +-- +-- PG15 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q From 6eeeddd5490bb65cca4d46ba00508c5ae073d498 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 17:11:01 +0300 Subject: [PATCH 47/60] Fixes pg14 tests --- .../regress/expected/create_drop_database_propagation.out | 6 +----- src/test/regress/sql/create_drop_database_propagation.sql | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index add4ddac7..ffd85f7c5 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -162,15 +162,11 @@ SELECT result from run_command_on_all_nodes( -- create a template database with all options set and allow connections false CREATE DATABASE my_template_database WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user + OWNER = create_drop_db_test_user ENCODING = 'UTF8' - STRATEGY = 'wal_log' LOCALE = 'en_US.utf8' LC_COLLATE = 'POSIX' LC_CTYPE = 'POSIX' - ICU_LOCALE = 'en-US' - LOCALE_PROVIDER = 'icu' - COLLATION_VERSION = '1.0' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = true; diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index d7174808e..a81880092 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -140,15 +140,11 @@ SELECT result from run_command_on_all_nodes( -- create a template database with all options set and allow connections false CREATE DATABASE my_template_database WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user + OWNER = create_drop_db_test_user ENCODING = 'UTF8' - STRATEGY = 'wal_log' LOCALE = 'en_US.utf8' LC_COLLATE = 'POSIX' LC_CTYPE = 'POSIX' - ICU_LOCALE = 'en-US' - LOCALE_PROVIDER = 'icu' - COLLATION_VERSION = '1.0' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = true; From 8c29444bbf0728e153383f202404f445453de9af Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 17:25:56 +0300 Subject: [PATCH 48/60] Fixes pg14 tests --- .../create_drop_database_propagation.out | 18 ++++++++---------- .../sql/create_drop_database_propagation.sql | 2 -- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index ffd85f7c5..239048eef 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -165,8 +165,6 @@ CREATE DATABASE my_template_database OWNER = create_drop_db_test_user ENCODING = 'UTF8' LOCALE = 'en_US.utf8' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = true; @@ -185,11 +183,11 @@ SELECT result from run_command_on_all_nodes( ) q2 $$ ) ORDER BY result; - result + result --------------------------------------------------------------------- - [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] (3 rows) SET citus.log_remote_commands = true; @@ -211,11 +209,11 @@ SELECT result from run_command_on_all_nodes( ) q2 $$ ) ORDER BY result; - result + result --------------------------------------------------------------------- - [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] (3 rows) SET citus.log_remote_commands = true; diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index a81880092..8e898e420 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -143,8 +143,6 @@ CREATE DATABASE my_template_database OWNER = create_drop_db_test_user ENCODING = 'UTF8' LOCALE = 'en_US.utf8' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = true; From 2b3a556f3400f5871901f130194e6a37db8b70dd Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 17:40:37 +0300 Subject: [PATCH 49/60] Fixes pg14 tests --- .../create_drop_database_propagation.out | 40 ++++--------------- .../sql/create_drop_database_propagation.sql | 28 ++----------- 2 files changed, 10 insertions(+), 58 deletions(-) diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index 239048eef..fd7173ce2 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -164,7 +164,8 @@ CREATE DATABASE my_template_database WITH TEMPLATE = 'template0' OWNER = create_drop_db_test_user ENCODING = 'UTF8' - LOCALE = 'en_US.utf8' + LC_COLLATE = 'C' + LC_CTYPE = 'C' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = true; @@ -183,40 +184,13 @@ SELECT result from run_command_on_all_nodes( ) q2 $$ ) ORDER BY result; - result + result --------------------------------------------------------------------- - [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] + [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] (3 rows) -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%DROP DATABASE%'; -drop database my_template_database; -ERROR: cannot drop a template database -SET citus.log_remote_commands = false; -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'my_template_database' - ) q2 - $$ -) ORDER BY result; - result ---------------------------------------------------------------------- - [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] - [{"datacl": null, "datname": "my_template_database", "datctype": "en_US.utf8", "encoding": "UTF8", "datcollate": "en_US.utf8", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] -(3 rows) - -SET citus.log_remote_commands = true; --template databases could not be dropped so we need to change the template flag SELECT result from run_command_on_all_nodes( $$ @@ -230,7 +204,7 @@ SELECT result from run_command_on_all_nodes( UPDATE 1 (3 rows) -; +SET citus.log_remote_commands = true; set citus.grep_remote_commands = '%DROP DATABASE%'; drop database my_template_database; NOTICE: issuing DROP DATABASE my_template_database diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index 8e898e420..540d6a9e6 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -142,7 +142,8 @@ CREATE DATABASE my_template_database WITH TEMPLATE = 'template0' OWNER = create_drop_db_test_user ENCODING = 'UTF8' - LOCALE = 'en_US.utf8' + LC_COLLATE = 'C' + LC_CTYPE = 'C' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = true; @@ -164,29 +165,6 @@ SELECT result from run_command_on_all_nodes( $$ ) ORDER BY result; -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%DROP DATABASE%'; -drop database my_template_database; - -SET citus.log_remote_commands = false; - -SELECT result from run_command_on_all_nodes( - $$ - SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( - SELECT pd.datname, pg_encoding_to_char(pd.encoding) as encoding, - pd.datistemplate, pd.datallowconn, pd.datconnlimit, - pd.datcollate , pd. datctype , pd.datacl, - pa.rolname AS database_owner, pt.spcname AS tablespace - FROM pg_database pd - JOIN pg_authid pa ON pd.datdba = pa.oid - join pg_tablespace pt on pd.dattablespace = pt.oid - WHERE datname = 'my_template_database' - ) q2 - $$ -) ORDER BY result; - -SET citus.log_remote_commands = true; - --template databases could not be dropped so we need to change the template flag SELECT result from run_command_on_all_nodes( $$ @@ -194,7 +172,7 @@ SELECT result from run_command_on_all_nodes( $$ ) ORDER BY result; -; +SET citus.log_remote_commands = true; set citus.grep_remote_commands = '%DROP DATABASE%'; drop database my_template_database; From f9218d9780a88356b7f6861bb1cc07781c1df808 Mon Sep 17 00:00:00 2001 From: Benjamin O Date: Fri, 27 Oct 2023 10:42:55 -0400 Subject: [PATCH 50/60] Support replacing IPv6 Loopback in `normalize.sed` (#7269) I had a test failure issue due to my machine using the IPv6 loopback address. This change to the `normalize.sed` solves that issue. --- src/test/regress/bin/normalize.sed | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index efa9e310f..1d293e964 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -222,7 +222,7 @@ s/(CONTEXT: PL\/pgSQL function .* line )([0-9]+)/\1XX/g s/^(PL\/pgSQL function .* line) [0-9]+ (.*)/\1 XX \2/g # normalize a test difference in multi_move_mx -s/ connection to server at "\w+" \(127\.0\.0\.1\), port [0-9]+ failed://g +s/ connection to server at "\w+" (\(127\.0\.0\.1\)|\(::1\)), port [0-9]+ failed://g # normalize differences in tablespace of new index s/pg14\.idx.*/pg14\.xxxxx/g From d0b093c975c8b3056f9e35c81903e7cfa05644d2 Mon Sep 17 00:00:00 2001 From: Nils Dijk Date: Fri, 27 Oct 2023 16:57:51 +0200 Subject: [PATCH 51/60] automatically add a breakpoint that breaks on postgres errors (#7279) When debugging postgres it is quite hard to get to the source for `errfinish` in `elog.c`. Instead of relying on the developer to set a breakpoint in the `elog.c` file for `errfinish` for `elevel == ERROR`, this change adds the breakpoint to `.gdbinit`. This makes sure that whenever a debugger is attached to a postgres backend it will break on postgres errors. When attaching the debugger a small banner is printed that explains how to disable the breakpoint. --- .devcontainer/.gdbinit | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/.devcontainer/.gdbinit b/.devcontainer/.gdbinit index 9c710923f..9d544512b 100644 --- a/.devcontainer/.gdbinit +++ b/.devcontainer/.gdbinit @@ -3,3 +3,31 @@ # actually also works when debugging with vscode. Providing nice tools # to understand the internal datastructures we are working with. source /root/gdbpg.py + +# when debugging postgres it is convenient to _always_ have a breakpoint +# trigger when an error is logged. Because .gdbinit is sourced before gdb +# is fully attached and has the sources loaded. To make sure the breakpoint +# is added when the library is loaded we temporary set the breakpoint pending +# to on. After we have added out breakpoint we revert back to the default +# configuration for breakpoint pending. +# The breakpoint is hard to read, but at entry of the function we don't have +# the level loaded in elevel. Instead we hardcode the location where the +# level of the current error is stored. Also gdb doesn't understand the +# ERROR symbol so we hardcode this to the value of ERROR. It is very unlikely +# this value will ever change in postgres, but if it does we might need to +# find a way to conditionally load the correct breakpoint. +set breakpoint pending on +break elog.c:errfinish if errordata[errordata_stack_depth].elevel == 21 +set breakpoint pending auto + +echo \n +echo ----------------------------------------------------------------------------------\n +echo when attaching to a postgres backend a breakpoint will be set on elog.c:errfinish \n +echo it will only break on errors being raised in postgres \n +echo \n +echo to disable this breakpoint from vscode run `-exec disable 1` in the debug console \n +echo this assumes it's the first breakpoint loaded as it is loaded from .gdbinit \n +echo this can be verified with `-exec info break`, enabling can be done with \n +echo `-exec enable 1` \n +echo ----------------------------------------------------------------------------------\n +echo \n From cf7efca5463ad7c9c0cc87ef8b9e04ad7d94346b Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 18:01:07 +0300 Subject: [PATCH 52/60] Fixes single_node for pg14 --- src/test/regress/expected/single_node_0.out | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/single_node_0.out b/src/test/regress/expected/single_node_0.out index 321d283f8..a44460cca 100644 --- a/src/test/regress/expected/single_node_0.out +++ b/src/test/regress/expected/single_node_0.out @@ -88,8 +88,9 @@ SELECT create_distributed_table('failover_to_local', 'a', shard_count=>32); (1 row) CREATE INDEX CONCURRENTLY ON failover_to_local(a); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. - Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then reattempt the original command. ERROR: the total number of connections on the server is more than max_connections(100) HINT: Consider using a higher value for max_connections -- reset global GUC changes From 5381aa8fda9a002e51009133d75fd6cbf6f64930 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 18:13:17 +0300 Subject: [PATCH 53/60] Fixes flaky tests --- src/test/regress/citus_tests/run_test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index 6528834ae..ace48a8b9 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -135,6 +135,7 @@ DEPS = { "minimal_schedule", ["multi_behavioral_analytics_create_table"] ), "alter_role_propagation": TestDeps("minimal_schedule"), + "background_rebalance": TestDeps( None, [ @@ -151,6 +152,7 @@ DEPS = { ], worker_count=6, ), + "create_drop_database_propagation": TestDeps("minimal_schedule"), "function_propagation": TestDeps("minimal_schedule"), "citus_shards": TestDeps("minimal_schedule"), "grant_on_foreign_server_propagation": TestDeps("minimal_schedule"), From dcae4c3486b2137aa6b764958020e8d96e77f808 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 18:17:58 +0300 Subject: [PATCH 54/60] Fixes python code format --- src/test/regress/citus_tests/run_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index ace48a8b9..946676e69 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -135,7 +135,6 @@ DEPS = { "minimal_schedule", ["multi_behavioral_analytics_create_table"] ), "alter_role_propagation": TestDeps("minimal_schedule"), - "background_rebalance": TestDeps( None, [ From 89e2d63bd39b0224c2f24721f908aa0c9a0e769f Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 18:37:22 +0300 Subject: [PATCH 55/60] Adds pg15 create drop to minimal tests --- src/test/regress/citus_tests/run_test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index 946676e69..70844c702 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -152,6 +152,7 @@ DEPS = { worker_count=6, ), "create_drop_database_propagation": TestDeps("minimal_schedule"), + "pg_15_create_drop_database_propagation": TestDeps("minimal_schedule"), "function_propagation": TestDeps("minimal_schedule"), "citus_shards": TestDeps("minimal_schedule"), "grant_on_foreign_server_propagation": TestDeps("minimal_schedule"), From fe136886aaddc4262874530119e111c602aec619 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 18:43:51 +0300 Subject: [PATCH 56/60] Changes pg15 test files --- src/test/regress/citus_tests/run_test.py | 2 +- ...ropagation.out => create_drop_database_propagation_pg15.out} | 0 ...gation_0.out => create_drop_database_propagation_pg15_0.out} | 0 src/test/regress/multi_1_schedule | 2 +- ...ropagation.sql => create_drop_database_propagation_pg15.sql} | 0 5 files changed, 2 insertions(+), 2 deletions(-) rename src/test/regress/expected/{pg15_create_drop_database_propagation.out => create_drop_database_propagation_pg15.out} (100%) rename src/test/regress/expected/{pg15_create_drop_database_propagation_0.out => create_drop_database_propagation_pg15_0.out} (100%) rename src/test/regress/sql/{pg15_create_drop_database_propagation.sql => create_drop_database_propagation_pg15.sql} (100%) diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index 70844c702..30f6b3b37 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -152,7 +152,7 @@ DEPS = { worker_count=6, ), "create_drop_database_propagation": TestDeps("minimal_schedule"), - "pg_15_create_drop_database_propagation": TestDeps("minimal_schedule"), + "create_drop_database_propagation_pg_15": TestDeps("minimal_schedule"), "function_propagation": TestDeps("minimal_schedule"), "citus_shards": TestDeps("minimal_schedule"), "grant_on_foreign_server_propagation": TestDeps("minimal_schedule"), diff --git a/src/test/regress/expected/pg15_create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation_pg15.out similarity index 100% rename from src/test/regress/expected/pg15_create_drop_database_propagation.out rename to src/test/regress/expected/create_drop_database_propagation_pg15.out diff --git a/src/test/regress/expected/pg15_create_drop_database_propagation_0.out b/src/test/regress/expected/create_drop_database_propagation_pg15_0.out similarity index 100% rename from src/test/regress/expected/pg15_create_drop_database_propagation_0.out rename to src/test/regress/expected/create_drop_database_propagation_pg15_0.out diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 766a1bc73..3ad0eabfc 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -34,7 +34,7 @@ test: alter_database_owner test: distributed_triggers test: create_single_shard_table test: create_drop_database_propagation -test: pg15_create_drop_database_propagation +test: create_drop_database_propagation_pg15 # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential test: single_shard_table_udfs diff --git a/src/test/regress/sql/pg15_create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation_pg15.sql similarity index 100% rename from src/test/regress/sql/pg15_create_drop_database_propagation.sql rename to src/test/regress/sql/create_drop_database_propagation_pg15.sql From 693eeedfaf0f7b8189174a9bf95b96532b56ed59 Mon Sep 17 00:00:00 2001 From: gindibay Date: Fri, 27 Oct 2023 19:08:09 +0300 Subject: [PATCH 57/60] Fixes pg15 test name --- src/test/regress/citus_tests/run_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index 30f6b3b37..90c16b04e 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -152,7 +152,7 @@ DEPS = { worker_count=6, ), "create_drop_database_propagation": TestDeps("minimal_schedule"), - "create_drop_database_propagation_pg_15": TestDeps("minimal_schedule"), + "create_drop_database_propagation_pg15": TestDeps("minimal_schedule"), "function_propagation": TestDeps("minimal_schedule"), "citus_shards": TestDeps("minimal_schedule"), "grant_on_foreign_server_propagation": TestDeps("minimal_schedule"), From ee8f4bb7e851b210b72ceb0d2d952890de14a3e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emel=20=C5=9Eim=C5=9Fek?= Date: Mon, 30 Oct 2023 09:44:13 +0300 Subject: [PATCH 58/60] Start Maintenance Daemon for Main DB at the server start. (#7254) DESCRIPTION: This change starts a maintenance deamon at the time of server start if there is a designated main database. This is the code flow: 1. User designates a main database: `ALTER SYSTEM SET citus.main_db = "myadmindb";` 2. When postmaster starts, in _PG_Init, citus calls `InitializeMaintenanceDaemonForMainDb` This function registers a background worker to run `CitusMaintenanceDaemonMain `with `databaseOid = 0 ` 3. `CitusMaintenanceDaemonMain ` takes some special actions when databaseOid is 0: - Gets the citus.main_db value. - Connects to the citus.main_db - Now the `MyDatabaseId `is available, creates a hash entry for it. - Then follows the same control flow as for a regular db, --- src/backend/distributed/shared_library_init.c | 11 + src/backend/distributed/utils/maintenanced.c | 283 +++++++++++++----- src/include/distributed/maintenanced.h | 2 + src/test/regress/citus_tests/common.py | 14 + .../test/test_maintenancedeamon.py | 74 +++++ 5 files changed, 307 insertions(+), 77 deletions(-) create mode 100644 src/test/regress/citus_tests/test/test_maintenancedeamon.py diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 1ac20c8bc..9b5768ee7 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -481,6 +481,7 @@ _PG_init(void) #endif InitializeMaintenanceDaemon(); + InitializeMaintenanceDaemonForMainDb(); /* initialize coordinated transaction management */ InitializeTransactionManagement(); @@ -1820,6 +1821,16 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE | GUC_UNIT_MS, NULL, NULL, NULL); + DefineCustomStringVariable( + "citus.main_db", + gettext_noop("Which database is designated as the main_db"), + NULL, + &MainDb, + "", + PGC_POSTMASTER, + GUC_STANDARD, + NULL, NULL, NULL); + DefineCustomIntVariable( "citus.max_adaptive_executor_pool_size", gettext_noop("Sets the maximum number of connections per worker node used by " diff --git a/src/backend/distributed/utils/maintenanced.c b/src/backend/distributed/utils/maintenanced.c index 5f49de20a..22a0843bd 100644 --- a/src/backend/distributed/utils/maintenanced.c +++ b/src/backend/distributed/utils/maintenanced.c @@ -99,6 +99,7 @@ int Recover2PCInterval = 60000; int DeferShardDeleteInterval = 15000; int BackgroundTaskQueueCheckInterval = 5000; int MaxBackgroundTaskExecutors = 4; +char *MainDb = ""; /* config variables for metadata sync timeout */ int MetadataSyncInterval = 60000; @@ -112,7 +113,7 @@ static MaintenanceDaemonControlData *MaintenanceDaemonControl = NULL; * activated. */ static HTAB *MaintenanceDaemonDBHash; - +static ErrorContextCallback errorCallback = { 0 }; static volatile sig_atomic_t got_SIGHUP = false; static volatile sig_atomic_t got_SIGTERM = false; @@ -125,6 +126,8 @@ static void MaintenanceDaemonShmemExit(int code, Datum arg); static void MaintenanceDaemonErrorContext(void *arg); static bool MetadataSyncTriggeredCheckAndReset(MaintenanceDaemonDBData *dbData); static void WarnMaintenanceDaemonNotStarted(void); +static MaintenanceDaemonDBData * GetMaintenanceDaemonDBHashEntry(Oid databaseId, + bool *found); /* * InitializeMaintenanceDaemon, called at server start, is responsible for @@ -139,6 +142,82 @@ InitializeMaintenanceDaemon(void) } +/* + * GetMaintenanceDaemonDBHashEntry searches the MaintenanceDaemonDBHash for the + * databaseId. It returns the entry if found or creates a new entry and initializes + * the value with zeroes. + */ +MaintenanceDaemonDBData * +GetMaintenanceDaemonDBHashEntry(Oid databaseId, bool *found) +{ + MaintenanceDaemonDBData *dbData = (MaintenanceDaemonDBData *) hash_search( + MaintenanceDaemonDBHash, + &MyDatabaseId, + HASH_ENTER_NULL, + found); + + if (!dbData) + { + elog(LOG, + "cannot create or find the maintenance deamon hash entry for database %u", + databaseId); + return NULL; + } + + if (!*found) + { + /* ensure the values in MaintenanceDaemonDBData are zero */ + memset(((char *) dbData) + sizeof(Oid), 0, + sizeof(MaintenanceDaemonDBData) - sizeof(Oid)); + } + + return dbData; +} + + +/* + * InitializeMaintenanceDaemonForMainDb is called in _PG_Init + * at which stage we are not in a transaction or have databaseOid + */ +void +InitializeMaintenanceDaemonForMainDb(void) +{ + if (strcmp(MainDb, "") == 0) + { + elog(LOG, "There is no designated Main database."); + return; + } + + BackgroundWorker worker; + + memset(&worker, 0, sizeof(worker)); + + + strcpy_s(worker.bgw_name, sizeof(worker.bgw_name), + "Citus Maintenance Daemon for Main DB"); + + /* request ability to connect to target database */ + worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; + + /* + * No point in getting started before able to run query, but we do + * want to get started on Hot-Standby. + */ + worker.bgw_start_time = BgWorkerStart_ConsistentState; + + /* Restart after a bit after errors, but don't bog the system. */ + worker.bgw_restart_time = 5; + strcpy_s(worker.bgw_library_name, + sizeof(worker.bgw_library_name), "citus"); + strcpy_s(worker.bgw_function_name, sizeof(worker.bgw_library_name), + "CitusMaintenanceDaemonMain"); + + worker.bgw_main_arg = (Datum) 0; + + RegisterBackgroundWorker(&worker); +} + + /* * InitializeMaintenanceDaemonBackend, called at backend start and * configuration changes, is responsible for starting a per-database @@ -148,31 +227,20 @@ void InitializeMaintenanceDaemonBackend(void) { Oid extensionOwner = CitusExtensionOwner(); - bool found; + bool found = false; LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE); - MaintenanceDaemonDBData *dbData = (MaintenanceDaemonDBData *) hash_search( - MaintenanceDaemonDBHash, - &MyDatabaseId, - HASH_ENTER_NULL, - &found); + MaintenanceDaemonDBData *dbData = GetMaintenanceDaemonDBHashEntry(MyDatabaseId, + &found); if (dbData == NULL) { WarnMaintenanceDaemonNotStarted(); LWLockRelease(&MaintenanceDaemonControl->lock); - return; } - if (!found) - { - /* ensure the values in MaintenanceDaemonDBData are zero */ - memset(((char *) dbData) + sizeof(Oid), 0, - sizeof(MaintenanceDaemonDBData) - sizeof(Oid)); - } - if (IsMaintenanceDaemon) { /* @@ -271,66 +339,97 @@ WarnMaintenanceDaemonNotStarted(void) /* - * CitusMaintenanceDaemonMain is the maintenance daemon's main routine, it'll - * be started by the background worker infrastructure. If it errors out, - * it'll be restarted after a few seconds. + * ConnectToDatabase connects to the database for the given databaseOid. + * if databaseOid is 0, connects to MainDb and then creates a hash entry. + * If a hash entry cannot be created for MainDb it exits the process requesting a restart. + * However for regular databases, it exits without requesting a restart since another + * subsequent backend is expected to start the Maintenance Daemon. + * If the found hash entry has a valid workerPid, it exits + * without requesting a restart since there is already a daemon running. */ -void -CitusMaintenanceDaemonMain(Datum main_arg) +static MaintenanceDaemonDBData * +ConnectToDatabase(Oid databaseOid) { - Oid databaseOid = DatumGetObjectId(main_arg); - TimestampTz nextStatsCollectionTime USED_WITH_LIBCURL_ONLY = - TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 60 * 1000); - bool retryStatsCollection USED_WITH_LIBCURL_ONLY = false; - TimestampTz lastRecoveryTime = 0; - TimestampTz lastShardCleanTime = 0; - TimestampTz lastStatStatementsPurgeTime = 0; - TimestampTz nextMetadataSyncTime = 0; + MaintenanceDaemonDBData *myDbData = NULL; - /* state kept for the background tasks queue monitor */ - TimestampTz lastBackgroundTaskQueueCheck = GetCurrentTimestamp(); - BackgroundWorkerHandle *backgroundTasksQueueBgwHandle = NULL; - bool backgroundTasksQueueWarnedForLock = false; - /* - * We do metadata sync in a separate background worker. We need its - * handle to be able to check its status. - */ - BackgroundWorkerHandle *metadataSyncBgwHandle = NULL; + bool isMainDb = false; - /* - * Look up this worker's configuration. - */ LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE); - MaintenanceDaemonDBData *myDbData = (MaintenanceDaemonDBData *) - hash_search(MaintenanceDaemonDBHash, &databaseOid, - HASH_FIND, NULL); - if (!myDbData) - { - /* - * When the database crashes, background workers are restarted, but - * the state in shared memory is lost. In that case, we exit and - * wait for a session to call InitializeMaintenanceDaemonBackend - * to properly add it to the hash. - */ - proc_exit(0); + if (databaseOid == 0) + { + char *databaseName = MainDb; + + /* + * Since we cannot query databaseOid without initializing Postgres + * first, connect to the database by name. + */ + BackgroundWorkerInitializeConnection(databaseName, NULL, 0); + + /* + * Now we have a valid MyDatabaseId. + * Insert the hash entry for the database to the Maintenance Deamon Hash. + */ + bool found = false; + + myDbData = GetMaintenanceDaemonDBHashEntry(MyDatabaseId, &found); + + if (!myDbData) + { + /* + * If an entry cannot be created, + * return code of 1 requests worker restart + * Since BackgroundWorker for the MainDb is only registered + * once during server startup, we need to retry. + */ + proc_exit(1); + } + + if (found && myDbData->workerPid != 0) + { + /* Another maintenance daemon is running.*/ + + proc_exit(0); + } + + databaseOid = MyDatabaseId; + myDbData->userOid = GetSessionUserId(); + isMainDb = true; + } + else + { + myDbData = (MaintenanceDaemonDBData *) + hash_search(MaintenanceDaemonDBHash, &databaseOid, + HASH_FIND, NULL); + + if (!myDbData) + { + /* + * When the database crashes, background workers are restarted, but + * the state in shared memory is lost. In that case, we exit and + * wait for a session to call InitializeMaintenanceDaemonBackend + * to properly add it to the hash. + */ + + proc_exit(0); + } + + if (myDbData->workerPid != 0) + { + /* + * Another maintenance daemon is running. This usually happens because + * postgres restarts the daemon after an non-zero exit, and + * InitializeMaintenanceDaemonBackend started one before postgres did. + * In that case, the first one stays and the last one exits. + */ + + proc_exit(0); + } } - if (myDbData->workerPid != 0) - { - /* - * Another maintenance daemon is running. This usually happens because - * postgres restarts the daemon after an non-zero exit, and - * InitializeMaintenanceDaemonBackend started one before postgres did. - * In that case, the first one stays and the last one exits. - */ - - proc_exit(0); - } - - before_shmem_exit(MaintenanceDaemonShmemExit, main_arg); + before_shmem_exit(MaintenanceDaemonShmemExit, ObjectIdGetDatum(databaseOid)); /* * Signal that I am the maintenance daemon now. @@ -356,25 +455,55 @@ CitusMaintenanceDaemonMain(Datum main_arg) LWLockRelease(&MaintenanceDaemonControl->lock); - /* - * Setup error context so log messages can be properly attributed. Some of - * them otherwise sound like they might be from a normal user connection. - * Do so before setting up signals etc, so we never exit without the - * context setup. - */ - ErrorContextCallback errorCallback = { 0 }; memset(&errorCallback, 0, sizeof(errorCallback)); errorCallback.callback = MaintenanceDaemonErrorContext; errorCallback.arg = (void *) myDbData; errorCallback.previous = error_context_stack; error_context_stack = &errorCallback; - elog(LOG, "starting maintenance daemon on database %u user %u", databaseOid, myDbData->userOid); - /* connect to database, after that we can actually access catalogs */ - BackgroundWorkerInitializeConnectionByOid(databaseOid, myDbData->userOid, 0); + if (!isMainDb) + { + /* connect to database, after that we can actually access catalogs */ + BackgroundWorkerInitializeConnectionByOid(databaseOid, myDbData->userOid, 0); + } + + return myDbData; +} + + +/* + * CitusMaintenanceDaemonMain is the maintenance daemon's main routine, it'll + * be started by the background worker infrastructure. If it errors out, + * it'll be restarted after a few seconds. + */ +void +CitusMaintenanceDaemonMain(Datum main_arg) +{ + Oid databaseOid = DatumGetObjectId(main_arg); + TimestampTz nextStatsCollectionTime USED_WITH_LIBCURL_ONLY = + TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 60 * 1000); + bool retryStatsCollection USED_WITH_LIBCURL_ONLY = false; + TimestampTz lastRecoveryTime = 0; + TimestampTz lastShardCleanTime = 0; + TimestampTz lastStatStatementsPurgeTime = 0; + TimestampTz nextMetadataSyncTime = 0; + + /* state kept for the background tasks queue monitor */ + TimestampTz lastBackgroundTaskQueueCheck = GetCurrentTimestamp(); + BackgroundWorkerHandle *backgroundTasksQueueBgwHandle = NULL; + bool backgroundTasksQueueWarnedForLock = false; + + + /* + * We do metadata sync in a separate background worker. We need its + * handle to be able to check its status. + */ + BackgroundWorkerHandle *metadataSyncBgwHandle = NULL; + + MaintenanceDaemonDBData *myDbData = ConnectToDatabase(databaseOid); /* make worker recognizable in pg_stat_activity */ pgstat_report_appname("Citus Maintenance Daemon"); @@ -383,7 +512,7 @@ CitusMaintenanceDaemonMain(Datum main_arg) * Terminate orphaned metadata sync daemons spawned from previously terminated * or crashed maintenanced instances. */ - SignalMetadataSyncDaemon(databaseOid, SIGTERM); + SignalMetadataSyncDaemon(MyDatabaseId, SIGTERM); /* enter main loop */ while (!got_SIGTERM) @@ -945,7 +1074,7 @@ MaintenanceDaemonShmemExit(int code, Datum arg) } -/* MaintenanceDaemonSigTermHandler calls proc_exit(0) */ +/* MaintenanceDaemonSigTermHandler sets the got_SIGTERM flag.*/ static void MaintenanceDaemonSigTermHandler(SIGNAL_ARGS) { diff --git a/src/include/distributed/maintenanced.h b/src/include/distributed/maintenanced.h index de1e68883..07387a7fd 100644 --- a/src/include/distributed/maintenanced.h +++ b/src/include/distributed/maintenanced.h @@ -20,6 +20,7 @@ /* config variable for */ extern double DistributedDeadlockDetectionTimeoutFactor; +extern char *MainDb; extern void StopMaintenanceDaemon(Oid databaseId); extern void TriggerNodeMetadataSync(Oid databaseId); @@ -27,6 +28,7 @@ extern void InitializeMaintenanceDaemon(void); extern size_t MaintenanceDaemonShmemSize(void); extern void MaintenanceDaemonShmemInit(void); extern void InitializeMaintenanceDaemonBackend(void); +extern void InitializeMaintenanceDaemonForMainDb(void); extern bool LockCitusExtension(void); extern PGDLLEXPORT void CitusMaintenanceDaemonMain(Datum main_arg); diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py index 907102482..53c9c7944 100644 --- a/src/test/regress/citus_tests/common.py +++ b/src/test/regress/citus_tests/common.py @@ -453,6 +453,9 @@ def cleanup_test_leftovers(nodes): for node in nodes: node.cleanup_schemas() + for node in nodes: + node.cleanup_databases() + for node in nodes: node.cleanup_users() @@ -753,6 +756,7 @@ class Postgres(QueryRunner): self.subscriptions = set() self.publications = set() self.replication_slots = set() + self.databases = set() self.schemas = set() self.users = set() @@ -993,6 +997,10 @@ class Postgres(QueryRunner): args = sql.SQL("") self.sql(sql.SQL("CREATE USER {} {}").format(sql.Identifier(name), args)) + def create_database(self, name): + self.databases.add(name) + self.sql(sql.SQL("CREATE DATABASE {}").format(sql.Identifier(name))) + def create_schema(self, name): self.schemas.add(name) self.sql(sql.SQL("CREATE SCHEMA {}").format(sql.Identifier(name))) @@ -1020,6 +1028,12 @@ class Postgres(QueryRunner): for user in self.users: self.sql(sql.SQL("DROP USER IF EXISTS {}").format(sql.Identifier(user))) + def cleanup_databases(self): + for database in self.databases: + self.sql( + sql.SQL("DROP DATABASE IF EXISTS {}").format(sql.Identifier(database)) + ) + def cleanup_schemas(self): for schema in self.schemas: self.sql( diff --git a/src/test/regress/citus_tests/test/test_maintenancedeamon.py b/src/test/regress/citus_tests/test/test_maintenancedeamon.py new file mode 100644 index 000000000..3f6cb501e --- /dev/null +++ b/src/test/regress/citus_tests/test/test_maintenancedeamon.py @@ -0,0 +1,74 @@ +# This test checks that once citus.main_db is set and the +# server is restarted. A Citus Maintenance Daemon for the main_db +# is launched. This should happen even if there is no query run +# in main_db yet. +import time + + +def wait_until_maintenance_deamons_start(deamoncount, cluster): + i = 0 + n = 0 + + while i < 10: + i += 1 + n = cluster.coordinator.sql_value( + "SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';" + ) + + if n == deamoncount: + break + + time.sleep(0.1) + + assert n == deamoncount + + +def test_set_maindb(cluster_factory): + cluster = cluster_factory(0) + + # Test that once citus.main_db is set to a database name + # there are two maintenance deamons running upon restart. + # One maintenance deamon for the database of the current connection + # and one for the citus.main_db. + cluster.coordinator.create_database("mymaindb") + cluster.coordinator.configure("citus.main_db='mymaindb'") + cluster.coordinator.restart() + + assert cluster.coordinator.sql_value("SHOW citus.main_db;") == "mymaindb" + + wait_until_maintenance_deamons_start(2, cluster) + + assert ( + cluster.coordinator.sql_value( + "SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname='mymaindb';" + ) + == 1 + ) + + # Test that once citus.main_db is set to empty string + # there is only one maintenance deamon for the database + # of the current connection. + cluster.coordinator.configure("citus.main_db=''") + cluster.coordinator.restart() + assert cluster.coordinator.sql_value("SHOW citus.main_db;") == "" + + wait_until_maintenance_deamons_start(1, cluster) + + # Test that after citus.main_db is dropped. The maintenance + # deamon for this database is terminated. + cluster.coordinator.configure("citus.main_db='mymaindb'") + cluster.coordinator.restart() + assert cluster.coordinator.sql_value("SHOW citus.main_db;") == "mymaindb" + + wait_until_maintenance_deamons_start(2, cluster) + + cluster.coordinator.sql("DROP DATABASE mymaindb;") + + wait_until_maintenance_deamons_start(1, cluster) + + assert ( + cluster.coordinator.sql_value( + "SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname='mymaindb';" + ) + == 0 + ) From d8639d58de527f9a14abf768ab945c9fe2e07b46 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 31 Oct 2023 10:58:44 +0300 Subject: [PATCH 59/60] Adds locale restrictions --- src/backend/distributed/commands/database.c | 17 +++++++ .../commands/distribute_object_ops.c | 2 +- .../deparser/deparse_database_stmts.c | 15 +++++++ src/include/distributed/commands.h | 2 + .../create_drop_database_propagation.out | 40 ++++++++++++----- .../create_drop_database_propagation_pg15.out | 38 +++------------- .../sql/create_drop_database_propagation.sql | 44 ++++++++++++++----- .../create_drop_database_propagation_pg15.sql | 27 +----------- 8 files changed, 106 insertions(+), 79 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 42ef2ed43..4867c55c2 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -263,6 +263,23 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } +List * +PreprocessCreateDatabaseStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + if (!EnableCreateDatabasePropagation || !ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + + //Validate the statement + DeparseTreeNode(node); + + return NIL; +} + /* * PostprocessCreatedbStmt is executed after the statement is applied to the local diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 2888bef1d..37ad4c5a4 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -469,7 +469,7 @@ static DistributeObjectOps Database_Alter = { static DistributeObjectOps Database_Create = { .deparse = DeparseCreateDatabaseStmt, .qualify = NULL, - .preprocess = NULL, + .preprocess = PreprocessCreateDatabaseStmt, .postprocess = PostprocessCreateDatabaseStmt, .objectType = OBJECT_DATABASE, .operationType = DIST_OPS_CREATE, diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c index bf98b9622..169ca40e8 100644 --- a/src/backend/distributed/deparser/deparse_database_stmts.c +++ b/src/backend/distributed/deparser/deparse_database_stmts.c @@ -237,6 +237,21 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt) foreach_ptr(option, stmt->options) { + //If option is template, lc_type, locale or lc_collate, propagation will not be supportted + // since template database is not stored in the catalog + if (strcmp(option->defname, "template") == 0 || + strcmp(option->defname, "strategy") == 0 || + strcmp(option->defname, "lc_ctype") == 0 || + strcmp(option->defname, "locale") == 0 || + strcmp(option->defname, "lc_collate") == 0 || + strcmp(option->defname, "icu_locale") == 0 || + strcmp(option->defname, "locale_provider") == 0 ) + { + ereport(ERROR, + errmsg("CREATE DATABASE option \"%s\" is not supported", + option->defname)); + } + optionToStatement(buf, option, create_database_option_formats, lengthof( create_database_option_formats)); } diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index a4f890f9e..28828075b 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -237,6 +237,8 @@ extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missing_ok, bool extern List * PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PreprocessCreateDatabaseStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); extern List * PostprocessCreateDatabaseStmt(Node *node, const char *queryString); extern List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index fd7173ce2..9489664eb 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -14,11 +14,8 @@ create user create_drop_db_test_user; set citus.enable_create_database_propagation=on; CREATE DATABASE mydatabase WITH OWNER = create_drop_db_test_user - TEMPLATE = 'template0' ENCODING = 'UTF8' CONNECTION LIMIT = 10 - LC_COLLATE = 'C' - LC_CTYPE = 'C' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = true IS_TEMPLATE = false; @@ -74,12 +71,9 @@ select 1 from citus_remove_node('localhost', :worker_2_port); --test with is_template true and allow connections false CREATE DATABASE mydatabase - WITH TEMPLATE = 'template0' OWNER = create_drop_db_test_user CONNECTION LIMIT = 10 ENCODING = 'UTF8' - LC_COLLATE = 'C' - LC_CTYPE = 'C' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = false; @@ -161,11 +155,8 @@ SELECT result from run_command_on_all_nodes( -- create a template database with all options set and allow connections false CREATE DATABASE my_template_database - WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user + WITH OWNER = create_drop_db_test_user ENCODING = 'UTF8' - LC_COLLATE = 'C' - LC_CTYPE = 'C' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = true; @@ -248,6 +239,35 @@ NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2" DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2" DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +--test for unsupported options +CREATE DATABASE mydatabase + with CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + LC_CTYPE = 'C.UTF-8' + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; +ERROR: CREATE DATABASE option "lc_ctype" is not supported +CREATE DATABASE mydatabase + with CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + LC_CTYPE = 'C.UTF-8' + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; +ERROR: CREATE DATABASE option "lc_ctype" is not supported +CREATE DATABASE mydatabase + with CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + LC_COLLATE = 'C.UTF-8' + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; +ERROR: CREATE DATABASE option "lc_collate" is not supported +CREATE DATABASE mydatabase + with CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + LOCALE = 'C.UTF-8' + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; +ERROR: CREATE DATABASE option "locale" is not supported --clean up resources created by this test drop tablespace create_drop_db_tablespace; \c - - - :worker_1_port diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15.out b/src/test/regress/expected/create_drop_database_propagation_pg15.out index e1073f980..bc6374803 100644 --- a/src/test/regress/expected/create_drop_database_propagation_pg15.out +++ b/src/test/regress/expected/create_drop_database_propagation_pg15.out @@ -23,24 +23,17 @@ set citus.enable_create_database_propagation=on; SET citus.log_remote_commands = true; set citus.grep_remote_commands = '%CREATE DATABASE%'; CREATE DATABASE mydatabase - WITH TEMPLATE = 'template0' + WITH OWNER = create_drop_db_test_user CONNECTION LIMIT = 10 ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = '' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - ICU_LOCALE = 'und' - LOCALE_PROVIDER = 'icu' - COLLATION_VERSION = '1.0' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = true IS_TEMPLATE = false OID = 966345; -NOTICE: issuing CREATE DATABASE mydatabase TEMPLATE template0 OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE '' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'und' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 +NOTICE: issuing CREATE DATABASE mydatabase OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE DATABASE mydatabase TEMPLATE template0 OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE '' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'und' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 +NOTICE: issuing CREATE DATABASE mydatabase OWNER create_drop_db_test_user CONNECTION LIMIT 10 ENCODING 'UTF8' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SET citus.log_remote_commands = false; SELECT result from run_command_on_all_nodes( @@ -104,16 +97,11 @@ set citus.grep_remote_commands = '%CREATE DATABASE%'; CREATE DATABASE mydatabase2 WITH OWNER = create_drop_db_test_user ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = 'en_US.utf8' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - COLLATION_VERSION = '1.0' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = true IS_TEMPLATE = false OID = 966345; -NOTICE: issuing CREATE DATABASE mydatabase2 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 +NOTICE: issuing CREATE DATABASE mydatabase2 OWNER create_drop_db_test_user ENCODING 'UTF8' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS true IS_TEMPLATE false OID 966345 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SET citus.log_remote_commands = false; SELECT result from run_command_on_all_nodes( @@ -196,22 +184,15 @@ SET citus.log_remote_commands = true; set citus.grep_remote_commands = '%CREATE DATABASE%'; -- create a template database with all options set and allow connections false CREATE DATABASE my_template_database - WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user + WITH OWNER = create_drop_db_test_user ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = 'en_US.utf8' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - ICU_LOCALE = 'en-US' - LOCALE_PROVIDER = 'icu' COLLATION_VERSION = '1.0' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = true; -NOTICE: issuing CREATE DATABASE my_template_database TEMPLATE template0 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'en-US' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true +NOTICE: issuing CREATE DATABASE my_template_database OWNER create_drop_db_test_user ENCODING 'UTF8' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing CREATE DATABASE my_template_database TEMPLATE template0 OWNER create_drop_db_test_user ENCODING 'UTF8' STRATEGY 'wal_log' LOCALE 'en_US.utf8' LC_COLLATE 'POSIX' LC_CTYPE 'POSIX' ICU_LOCALE 'en-US' LOCALE_PROVIDER 'icu' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true +NOTICE: issuing CREATE DATABASE my_template_database OWNER create_drop_db_test_user ENCODING 'UTF8' COLLATION_VERSION '1.0' TABLESPACE create_drop_db_tablespace ALLOW_CONNECTIONS false IS_TEMPLATE true DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SET citus.log_remote_commands = false; SELECT result from run_command_on_all_nodes( @@ -235,10 +216,6 @@ SELECT result from run_command_on_all_nodes( [{"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "datallowconn": false, "datconnlimit": -1, "datistemplate": true, "database_owner": "create_drop_db_test_user"}] (3 rows) -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%DROP DATABASE%'; -drop database my_template_database; -ERROR: cannot drop a template database SET citus.log_remote_commands = false; SELECT result from run_command_on_all_nodes( $$ @@ -275,7 +252,6 @@ SELECT result from run_command_on_all_nodes( UPDATE 1 (3 rows) -; set citus.grep_remote_commands = '%DROP DATABASE%'; drop database my_template_database; NOTICE: issuing DROP DATABASE my_template_database diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index 540d6a9e6..ae90088d1 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -21,17 +21,12 @@ set citus.enable_create_database_propagation=on; CREATE DATABASE mydatabase WITH OWNER = create_drop_db_test_user - TEMPLATE = 'template0' ENCODING = 'UTF8' CONNECTION LIMIT = 10 - LC_COLLATE = 'C' - LC_CTYPE = 'C' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = true IS_TEMPLATE = false; - - SELECT result from run_command_on_all_nodes( $$ SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( @@ -73,12 +68,9 @@ select 1 from citus_remove_node('localhost', :worker_2_port); --test with is_template true and allow connections false CREATE DATABASE mydatabase - WITH TEMPLATE = 'template0' OWNER = create_drop_db_test_user CONNECTION LIMIT = 10 ENCODING = 'UTF8' - LC_COLLATE = 'C' - LC_CTYPE = 'C' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = false; @@ -139,11 +131,8 @@ SELECT result from run_command_on_all_nodes( -- create a template database with all options set and allow connections false CREATE DATABASE my_template_database - WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user + WITH OWNER = create_drop_db_test_user ENCODING = 'UTF8' - LC_COLLATE = 'C' - LC_CTYPE = 'C' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false IS_TEMPLATE = true; @@ -203,6 +192,37 @@ create database "mydatabase#1'2"; set citus.grep_remote_commands = '%DROP DATABASE%'; drop database if exists "mydatabase#1'2"; +--test for unsupported options + +CREATE DATABASE mydatabase + with CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + LC_CTYPE = 'C.UTF-8' + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; + +CREATE DATABASE mydatabase + with CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + LC_CTYPE = 'C.UTF-8' + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; + +CREATE DATABASE mydatabase + with CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + LC_COLLATE = 'C.UTF-8' + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; + +CREATE DATABASE mydatabase + with CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + LOCALE = 'C.UTF-8' + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; + + --clean up resources created by this test drop tablespace create_drop_db_tablespace; diff --git a/src/test/regress/sql/create_drop_database_propagation_pg15.sql b/src/test/regress/sql/create_drop_database_propagation_pg15.sql index 3a8e80ebf..ca3e3b202 100644 --- a/src/test/regress/sql/create_drop_database_propagation_pg15.sql +++ b/src/test/regress/sql/create_drop_database_propagation_pg15.sql @@ -29,17 +29,10 @@ set citus.enable_create_database_propagation=on; SET citus.log_remote_commands = true; set citus.grep_remote_commands = '%CREATE DATABASE%'; CREATE DATABASE mydatabase - WITH TEMPLATE = 'template0' + WITH OWNER = create_drop_db_test_user CONNECTION LIMIT = 10 ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = '' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - ICU_LOCALE = 'und' - LOCALE_PROVIDER = 'icu' - COLLATION_VERSION = '1.0' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = true IS_TEMPLATE = false @@ -91,11 +84,6 @@ set citus.grep_remote_commands = '%CREATE DATABASE%'; CREATE DATABASE mydatabase2 WITH OWNER = create_drop_db_test_user ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = 'en_US.utf8' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - COLLATION_VERSION = '1.0' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = true IS_TEMPLATE = false @@ -161,15 +149,8 @@ set citus.grep_remote_commands = '%CREATE DATABASE%'; -- create a template database with all options set and allow connections false CREATE DATABASE my_template_database - WITH TEMPLATE = 'template0' - OWNER = create_drop_db_test_user + WITH OWNER = create_drop_db_test_user ENCODING = 'UTF8' - STRATEGY = 'wal_log' - LOCALE = 'en_US.utf8' - LC_COLLATE = 'POSIX' - LC_CTYPE = 'POSIX' - ICU_LOCALE = 'en-US' - LOCALE_PROVIDER = 'icu' COLLATION_VERSION = '1.0' TABLESPACE = create_drop_db_tablespace ALLOW_CONNECTIONS = false @@ -192,9 +173,6 @@ SELECT result from run_command_on_all_nodes( $$ ) ORDER BY result; -SET citus.log_remote_commands = true; -set citus.grep_remote_commands = '%DROP DATABASE%'; -drop database my_template_database; SET citus.log_remote_commands = false; @@ -222,7 +200,6 @@ SELECT result from run_command_on_all_nodes( $$ ) ORDER BY result; -; set citus.grep_remote_commands = '%DROP DATABASE%'; drop database my_template_database; From 2d9181a9ab332a34d117712fcf199401fb3619e0 Mon Sep 17 00:00:00 2001 From: gindibay Date: Tue, 31 Oct 2023 11:00:40 +0300 Subject: [PATCH 60/60] Fixes indents --- src/backend/distributed/commands/database.c | 5 +++-- src/backend/distributed/deparser/deparse_database_stmts.c | 6 +++--- src/include/distributed/commands.h | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 4867c55c2..22de2d4bd 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -263,9 +263,10 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } + List * PreprocessCreateDatabaseStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext) + ProcessUtilityContext processUtilityContext) { if (!EnableCreateDatabasePropagation || !ShouldPropagate()) { @@ -274,7 +275,7 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString, EnsureCoordinator(); - //Validate the statement + /*Validate the statement */ DeparseTreeNode(node); return NIL; diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c index 169ca40e8..e286dae65 100644 --- a/src/backend/distributed/deparser/deparse_database_stmts.c +++ b/src/backend/distributed/deparser/deparse_database_stmts.c @@ -237,15 +237,15 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt) foreach_ptr(option, stmt->options) { - //If option is template, lc_type, locale or lc_collate, propagation will not be supportted - // since template database is not stored in the catalog + /*If option is template, lc_type, locale or lc_collate, propagation will not be supportted */ + /* since template database is not stored in the catalog */ if (strcmp(option->defname, "template") == 0 || strcmp(option->defname, "strategy") == 0 || strcmp(option->defname, "lc_ctype") == 0 || strcmp(option->defname, "locale") == 0 || strcmp(option->defname, "lc_collate") == 0 || strcmp(option->defname, "icu_locale") == 0 || - strcmp(option->defname, "locale_provider") == 0 ) + strcmp(option->defname, "locale_provider") == 0) { ereport(ERROR, errmsg("CREATE DATABASE option \"%s\" is not supported", diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 28828075b..0e43fa386 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -238,7 +238,7 @@ extern List * PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString ProcessUtilityContext processUtilityContext); extern List * PreprocessCreateDatabaseStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); + ProcessUtilityContext processUtilityContext); extern List * PostprocessCreateDatabaseStmt(Node *node, const char *queryString); extern List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext);