From 66385ab577ecda7d79fc080f68803b73f4d5718f Mon Sep 17 00:00:00 2001 From: Roma Novikov Date: Tue, 8 Jun 2021 18:19:38 +0300 Subject: [PATCH 01/45] Update USER_GUIDE.md typo fix --- docs/USER_GUIDE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md index 8cdd750..1e97b61 100644 --- a/docs/USER_GUIDE.md +++ b/docs/USER_GUIDE.md @@ -91,7 +91,7 @@ pg_stat_monitor extension contains a view called pg_stat_monitor, which contains  message             | text                     | :heavy_check_mark:  | :x:  plans               | bigint                   | :heavy_check_mark:  | :heavy_check_mark:  plan_total_time     | double precision         | :heavy_check_mark:  | :heavy_check_mark: - plan_min_timei      | double precision         | :heavy_check_mark:  | :heavy_check_mark: + plan_min_time      | double precision         | :heavy_check_mark:  | :heavy_check_mark:  plan_max_time       | double precision         | :heavy_check_mark:  | :heavy_check_mark:  plan_mean_time      | double precision         | :heavy_check_mark:  | :heavy_check_mark:  plan_stddev_time    | double precision         | :heavy_check_mark:  | :heavy_check_mark: From 33b22e4ef2d1906c8ee2fd35eb5444cb19d53527 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Fri, 6 Aug 2021 15:35:14 -0400 Subject: [PATCH 02/45] PG-203: Fix memory leak. The query_txt variable is allocated at the beginning of the pg_stat_monitor_internal() function and released at the end, but an extra malloc call to allocate it was added within an internal loop in the funcion, thus allocating memory for every loop iteration, without releasing the memory in the loop. The query_txt variable can be reused inside the loop body, so this commit removes the redundant declaration of query_txt from inside the loop, which also fixes the leak. --- pg_stat_monitor.c | 1 - 1 file changed, 1 deletion(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 6f651a9..c8dc638 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1546,7 +1546,6 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, uint64 planid = entry->key.planid; uint64 appid = entry->key.appid; unsigned char *buf = pgss_qbuf[bucketid]; - char *query_txt = (char*) malloc(PGSM_QUERY_MAX_LEN); #if PG_VERSION_NUM < 140000 bool is_allowed_role = is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS); #else From 2e31738f15fae670f073c1ffad9de71bd4e7661b Mon Sep 17 00:00:00 2001 From: Naeem Akhter Date: Tue, 17 Aug 2021 18:23:15 +0500 Subject: [PATCH 03/45] PG-195: Change pgsm_bucket_time value from 300 to 60. Changed the default value of pgsm_bucket_time from 300 to 60. Now neither PMM users will need to adjust the default value, nor restart of PG server will be required for this purpose. --- docs/USER_GUIDE.md | 2 +- guc.c | 2 +- regression/expected/guc.out | 2 +- regression/expected/guc_1.out | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md index 8cdd750..e4b5bac 100644 --- a/docs/USER_GUIDE.md +++ b/docs/USER_GUIDE.md @@ -23,7 +23,7 @@ SELECT * FROM pg_stat_monitor_settings; pg_stat_monitor.pgsm_track_utility | 1 | 1 | Selects whether utility commands are tracked. | 0 | 0 | 0 pg_stat_monitor.pgsm_normalized_query | 1 | 1 | Selects whether save query in normalized format. | 0 | 0 | 0 pg_stat_monitor.pgsm_max_buckets | 10 | 10 | Sets the maximum number of buckets. | 1 | 10 | 1 - pg_stat_monitor.pgsm_bucket_time | 300 | 300 | Sets the time in seconds per bucket. | 1 | 2147483647 | 1 + pg_stat_monitor.pgsm_bucket_time | 60 | 60 | Sets the time in seconds per bucket. | 1 | 2147483647 | 1 pg_stat_monitor.pgsm_histogram_min | 0 | 0 | Sets the time in millisecond. | 0 | 2147483647 | 1 pg_stat_monitor.pgsm_histogram_max | 100000 | 100000 | Sets the time in millisecond. | 10 | 2147483647 | 1 pg_stat_monitor.pgsm_histogram_buckets | 10 | 10 | Sets the maximum number of histogram buckets | 2 | 2147483647 | 1 diff --git a/guc.c b/guc.c index 5d16b33..52db4e3 100644 --- a/guc.c +++ b/guc.c @@ -104,7 +104,7 @@ init_guc(void) conf[i] = (GucVariable) { .guc_name = "pg_stat_monitor.pgsm_bucket_time", .guc_desc = "Sets the time in seconds per bucket.", - .guc_default = 300, + .guc_default = 60, .guc_min = 1, .guc_max = INT_MAX, .guc_restart = true, diff --git a/regression/expected/guc.out b/regression/expected/guc.out index 9fa592c..934c719 100644 --- a/regression/expected/guc.out +++ b/regression/expected/guc.out @@ -14,7 +14,7 @@ select pg_sleep(.5); SELECT * FROM pg_stat_monitor_settings ORDER BY name COLLATE "C"; name | value | default_value | description | minimum | maximum | restart ------------------------------------------+--------+---------------+----------------------------------------------------------------------------------------------------------+---------+------------+--------- - pg_stat_monitor.pgsm_bucket_time | 300 | 300 | Sets the time in seconds per bucket. | 1 | 2147483647 | 1 + pg_stat_monitor.pgsm_bucket_time | 60 | 60 | Sets the time in seconds per bucket. | 1 | 2147483647 | 1 pg_stat_monitor.pgsm_enable | 1 | 1 | Enable/Disable statistics collector. | 0 | 0 | 0 pg_stat_monitor.pgsm_enable_query_plan | 0 | 0 | Enable/Disable query plan monitoring | 0 | 0 | 0 pg_stat_monitor.pgsm_histogram_buckets | 10 | 10 | Sets the maximum number of histogram buckets | 2 | 2147483647 | 1 diff --git a/regression/expected/guc_1.out b/regression/expected/guc_1.out index 6abf062..1425109 100644 --- a/regression/expected/guc_1.out +++ b/regression/expected/guc_1.out @@ -14,7 +14,7 @@ select pg_sleep(.5); SELECT * FROM pg_stat_monitor_settings ORDER BY name COLLATE "C"; name | value | default_value | description | minimum | maximum | restart ------------------------------------------+--------+---------------+----------------------------------------------------------------------------------------------------------+---------+------------+--------- - pg_stat_monitor.pgsm_bucket_time | 300 | 300 | Sets the time in seconds per bucket. | 1 | 2147483647 | 1 + pg_stat_monitor.pgsm_bucket_time | 60 | 60 | Sets the time in seconds per bucket. | 1 | 2147483647 | 1 pg_stat_monitor.pgsm_enable | 1 | 1 | Enable/Disable statistics collector. | 0 | 0 | 0 pg_stat_monitor.pgsm_enable_query_plan | 0 | 0 | Enable/Disable query plan monitoring | 0 | 0 | 0 pg_stat_monitor.pgsm_histogram_buckets | 10 | 10 | Sets the maximum number of histogram buckets | 2 | 2147483647 | 1 From 775c087fb2a012b91d64563655977a180a6dd522 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Thu, 19 Aug 2021 15:45:06 -0400 Subject: [PATCH 04/45] PG-222: Add benchmark support for measuring hook execution time. Added a new view 'pg_stat_monitor_hook_stats' that provide execution time statistics for all hooks installed by the module, following is a description of the fields: - hook: The hook function name. - min_time: The fastest execution time recorded for the given hook. - max_time: The slowest execution time recorded for the given hook. - total_time: Total execution time taken by all calls to the hook. - avg_time: Average execution time of a call to the hook. - ncalls: Total number of calls to the hook. - load_comparison: A percentual of time taken by an individual hook compared to every other hook. To enable benchmark, code must be compiled with -DBENCHMARK flag, this will make the hook functions to be replaced by a function with the same name plus a '_benchmark' suffix, e.g. hook_function_benchmark. The hook_function_benchmark will call the original function and calculate the amount of time it took to execute, than it will update statistics for that hook. --- hash_query.c | 4 + pg_stat_monitor--1.0.sql | 21 +++ pg_stat_monitor.c | 307 ++++++++++++++++++++++++++++++++++++--- pg_stat_monitor.h | 72 +++++++++ 4 files changed, 381 insertions(+), 23 deletions(-) diff --git a/hash_query.c b/hash_query.c index 55d1cde..265b2a1 100644 --- a/hash_query.c +++ b/hash_query.c @@ -60,6 +60,10 @@ pgss_startup(void) ResetSharedState(pgss); } +#ifdef BENCHMARK + init_hook_stats(); +#endif + pgss->query_buf_size_bucket = MAX_QUERY_BUF / PGSM_MAX_BUCKETS; for (i = 0; i < PGSM_MAX_BUCKETS; i++) diff --git a/pg_stat_monitor--1.0.sql b/pg_stat_monitor--1.0.sql index 73153b5..f6106a2 100644 --- a/pg_stat_monitor--1.0.sql +++ b/pg_stat_monitor--1.0.sql @@ -227,6 +227,27 @@ end loop; END $$ language plpgsql; +CREATE FUNCTION pg_stat_monitor_hook_stats( + OUT hook text, + OUT min_time float8, + OUT max_time float8, + OUT total_time float8, + OUT ncalls int8 +) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pg_stat_monitor_hook_stats' +LANGUAGE C STRICT VOLATILE PARALLEL SAFE; + +CREATE VIEW pg_stat_monitor_hook_stats AS SELECT + hook, + min_time, + max_time, + total_time, + total_time / greatest(ncalls, 1) as avg_time, + ncalls, + ROUND(CAST(total_time / greatest(sum(total_time) OVER(), 0.00000001) * 100 as numeric), 2)::text || '%' as load_comparison +FROM pg_stat_monitor_hook_stats(); + GRANT SELECT ON pg_stat_monitor TO PUBLIC; GRANT SELECT ON pg_stat_monitor_settings TO PUBLIC; -- Don't want this to be available to non-superusers. diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 6f651a9..27574d2 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -16,6 +16,9 @@ */ #include "postgres.h" #include +#ifdef BENCHMARK +#include /* clock() */ +#endif #include "commands/explain.h" #include "pg_stat_monitor.h" @@ -69,6 +72,9 @@ static struct rusage rusage_start; static struct rusage rusage_end; static unsigned char *pgss_qbuf[MAX_BUCKETS]; static char *pgss_explain(QueryDesc *queryDesc); +#ifdef BENCHMARK +static struct pg_hook_stats_t *pg_hook_stats; +#endif static char *extract_query_comments(const char *query); static int get_histogram_bucket(double q_time); @@ -89,7 +95,7 @@ static ExecutorFinish_hook_type prev_ExecutorFinish = NULL; static ExecutorEnd_hook_type prev_ExecutorEnd = NULL; static ProcessUtility_hook_type prev_ProcessUtility = NULL; static emit_log_hook_type prev_emit_log_hook = NULL; -void pgsm_emit_log_hook(ErrorData *edata); +DECLARE_HOOK(void pgsm_emit_log_hook, ErrorData *edata); static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook = NULL; @@ -100,6 +106,7 @@ PG_FUNCTION_INFO_V1(pg_stat_monitor_1_3); PG_FUNCTION_INFO_V1(pg_stat_monitor); PG_FUNCTION_INFO_V1(pg_stat_monitor_settings); PG_FUNCTION_INFO_V1(get_histogram_timings); +PG_FUNCTION_INFO_V1(pg_stat_monitor_hook_stats); static uint pg_get_client_addr(void); static int pg_get_application_name(char* application_name); @@ -108,35 +115,35 @@ static Datum intarray_get_datum(int32 arr[], int len); #if PG_VERSION_NUM < 140000 -static void pgss_post_parse_analyze(ParseState *pstate, Query *query); +DECLARE_HOOK(void pgss_post_parse_analyze, ParseState *pstate, Query *query); #else -static void pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate); +DECLARE_HOOK(void pgss_post_parse_analyze, ParseState *pstate, Query *query, JumbleState *jstate); #endif -static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags); -static void pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count, bool execute_once); -static void pgss_ExecutorFinish(QueryDesc *queryDesc); -static void pgss_ExecutorEnd(QueryDesc *queryDesc); -static bool pgss_ExecutorCheckPerms(List *rt, bool abort); +DECLARE_HOOK(void pgss_ExecutorStart, QueryDesc *queryDesc, int eflags); +DECLARE_HOOK(void pgss_ExecutorRun, QueryDesc *queryDesc, ScanDirection direction, uint64 count, bool execute_once); +DECLARE_HOOK(void pgss_ExecutorFinish, QueryDesc *queryDesc); +DECLARE_HOOK(void pgss_ExecutorEnd, QueryDesc *queryDesc); +DECLARE_HOOK(bool pgss_ExecutorCheckPerms, List *rt, bool abort); #if PG_VERSION_NUM >= 140000 -static PlannedStmt * pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams); -static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, +DECLARE_HOOK(PlannedStmt * pgss_planner_hook, Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams); +DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *qc); #elif PG_VERSION_NUM >= 130000 -static PlannedStmt * pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams); -static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, +DECLARE_HOOK(PlannedStmt * pgss_planner_hook, Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams); +DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *qc); #else static void BufferUsageAccumDiff(BufferUsage* bufusage, BufferUsage* pgBufferUsage, BufferUsage* bufusage_start); -static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, +DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, @@ -259,7 +266,7 @@ _PG_init(void) * the postmaster process.) We'll allocate or attach to the shared * resources in pgss_shmem_startup(). */ - RequestAddinShmemSpace(hash_memsize()); + RequestAddinShmemSpace(hash_memsize() + HOOK_STATS_SIZE); RequestNamedLWLockTranche("pg_stat_monitor", 1); /* @@ -268,24 +275,24 @@ _PG_init(void) prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = pgss_shmem_startup; prev_post_parse_analyze_hook = post_parse_analyze_hook; - post_parse_analyze_hook = pgss_post_parse_analyze; + post_parse_analyze_hook = HOOK(pgss_post_parse_analyze); prev_ExecutorStart = ExecutorStart_hook; - ExecutorStart_hook = pgss_ExecutorStart; + ExecutorStart_hook = HOOK(pgss_ExecutorStart); prev_ExecutorRun = ExecutorRun_hook; - ExecutorRun_hook = pgss_ExecutorRun; + ExecutorRun_hook = HOOK(pgss_ExecutorRun); prev_ExecutorFinish = ExecutorFinish_hook; - ExecutorFinish_hook = pgss_ExecutorFinish; + ExecutorFinish_hook = HOOK(pgss_ExecutorFinish); prev_ExecutorEnd = ExecutorEnd_hook; - ExecutorEnd_hook = pgss_ExecutorEnd; + ExecutorEnd_hook = HOOK(pgss_ExecutorEnd); prev_ProcessUtility = ProcessUtility_hook; - ProcessUtility_hook = pgss_ProcessUtility; + ProcessUtility_hook = HOOK(pgss_ProcessUtility); #if PG_VERSION_NUM >= 130000 planner_hook_next = planner_hook; - planner_hook = pgss_planner_hook; + planner_hook = HOOK(pgss_planner_hook); #endif - emit_log_hook = pgsm_emit_log_hook; + emit_log_hook = HOOK(pgsm_emit_log_hook); prev_ExecutorCheckPerms_hook = ExecutorCheckPerms_hook; - ExecutorCheckPerms_hook = pgss_ExecutorCheckPerms; + ExecutorCheckPerms_hook = HOOK(pgss_ExecutorCheckPerms); nested_queryids = (uint64*) malloc(sizeof(uint64) * max_stack_depth); @@ -338,6 +345,16 @@ pg_stat_monitor_version(PG_FUNCTION_ARGS) } #if PG_VERSION_NUM >= 140000 +#ifdef BENCHMARK +static void +pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query, JumbleState *jstate) +{ + double start_time = (double)clock(); + pgss_post_parse_analyze(pstate, query, jstate); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_POST_PARSE_ANALYZE, elapsed); +} +#endif /* * Post-parse-analysis hook: mark query with a queryId */ @@ -383,6 +400,16 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) } #else +#ifdef BENCHMARK +static void +pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query) +{ + double start_time = (double)clock(); + pgss_post_parse_analyze(pstate, query); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_POST_PARSE_ANALYZE, elapsed); +} +#endif /* * Post-parse-analysis hook: mark query with a queryId */ @@ -435,6 +462,16 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query) } #endif +#ifdef BENCHMARK +static void +pgss_ExecutorStart_benchmark(QueryDesc *queryDesc, int eflags) +{ + double start_time = (double)clock(); + pgss_ExecutorStart(queryDesc, eflags); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_EXECUTORSTART, elapsed); +} +#endif /* * ExecutorStart hook: start up tracking if needed */ @@ -494,6 +531,18 @@ pgss_ExecutorStart(QueryDesc *queryDesc, int eflags) } } +#ifdef BENCHMARK +static void +pgss_ExecutorRun_benchmark(QueryDesc *queryDesc, ScanDirection direction, uint64 count, + bool execute_once) +{ + double start_time = (double)clock(); + pgss_ExecutorRun(queryDesc, direction, count, execute_once); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_EXECUTORUN, elapsed); +} +#endif + /* * ExecutorRun hook: all we need do is track nesting depth */ @@ -524,6 +573,17 @@ pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count, PG_END_TRY(); } +#ifdef BENCHMARK +static void +pgss_ExecutorFinish_benchmark(QueryDesc *queryDesc) +{ + double start_time = (double)clock(); + pgss_ExecutorFinish(queryDesc); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_EXECUTORFINISH, elapsed); +} +#endif + /* * ExecutorFinish hook: all we need do is track nesting depth */ @@ -567,6 +627,17 @@ pgss_explain(QueryDesc *queryDesc) return es->str->data; } +#ifdef BENCHMARK +static void +pgss_ExecutorEnd_benchmark(QueryDesc *queryDesc) +{ + double start_time = (double)clock(); + pgss_ExecutorEnd(queryDesc); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_EXECUTOREND, elapsed); +} +#endif + /* * ExecutorEnd hook: store results if needed */ @@ -625,6 +696,19 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) pgss->num_relations = 0; } +#ifdef BENCHMARK +static bool +pgss_ExecutorCheckPerms_benchmark(List *rt, bool abort) +{ + bool ret; + double start_time = (double)clock(); + ret = pgss_ExecutorCheckPerms(rt, abort); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_EXECUTORCHECKPERMS, elapsed); + return ret; +} +#endif + static bool pgss_ExecutorCheckPerms(List *rt, bool abort) { @@ -676,6 +760,18 @@ pgss_ExecutorCheckPerms(List *rt, bool abort) } #if PG_VERSION_NUM >= 130000 +#ifdef BENCHMARK +static PlannedStmt* +pgss_planner_hook_benchmark(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams) +{ + PlannedStmt *ret; + double start_time = (double)clock(); + ret = pgss_planner_hook(parse, query_string, cursorOptions, boundParams); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_PLANNER_HOOK, elapsed); + return ret; +} +#endif static PlannedStmt* pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams) { @@ -769,6 +865,21 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par * ProcessUtility hook */ #if PG_VERSION_NUM >= 140000 +#ifdef BENCHMARK +static void +pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc) +{ + double start_time = (double)clock(); + pgss_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed); +} +#endif static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, ProcessUtilityContext context, @@ -777,6 +888,20 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, QueryCompletion *qc) #elif PG_VERSION_NUM >= 130000 +#ifdef BENCHMARK +static void +pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc) +{ + double start_time = (double)clock(); + pgss_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, qc); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed); +} +#endif static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, @@ -784,6 +909,20 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, QueryCompletion *qc) #else +#ifdef BENCHMARK +static void +pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString, + ProcessUtilityContext context, ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + char *completionTag) +{ + double start_time = (double)clock(); + pgss_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed); +} +#endif static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, @@ -1443,6 +1582,14 @@ pg_stat_monitor_reset(PG_FUNCTION_ARGS) LWLockAcquire(pgss->lock, LW_EXCLUSIVE); hash_entry_dealloc(-1); hash_query_entryies_reset(); +#ifdef BENCHMARK + for (int i = STATS_START; i < STATS_END; ++i) { + pg_hook_stats[i].min_time = 0; + pg_hook_stats[i].max_time = 0; + pg_hook_stats[i].total_time = 0; + pg_hook_stats[i].ncalls = 0; + } +#endif LWLockRelease(pgss->lock); PG_RETURN_VOID(); } @@ -2992,12 +3139,84 @@ pg_stat_monitor_settings(PG_FUNCTION_ARGS) return (Datum)0; } +Datum +pg_stat_monitor_hook_stats(PG_FUNCTION_ARGS) +{ +#ifdef BENCHMARK + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate *tupstore; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + enum pg_hook_stats_id hook_id; + + /* Safety check... */ + if (!IsSystemInitialized()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); + + /* check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("pg_stat_monitor: set-valued function called in context that cannot accept a set"))); + + /* Switch into long-lived context to construct returned data structures */ + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "pg_stat_monitor: return type must be a row type"); + + if (tupdesc->natts != 5) + elog(ERROR, "pg_stat_monitor: incorrect number of output arguments, required %d", tupdesc->natts); + + tupstore = tuplestore_begin_heap(true, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; + + MemoryContextSwitchTo(oldcontext); + + for (hook_id = 0; hook_id < STATS_END; hook_id++) + { + Datum values[5]; + bool nulls[5]; + int j = 0; + memset(values, 0, sizeof(values)); + memset(nulls, 0, sizeof(nulls)); + + values[j++] = CStringGetTextDatum(pg_hook_stats[hook_id].hook_name); + values[j++] = Float8GetDatumFast(pg_hook_stats[hook_id].min_time); + values[j++] = Float8GetDatumFast(pg_hook_stats[hook_id].max_time); + values[j++] = Float8GetDatumFast(pg_hook_stats[hook_id].total_time); + values[j++] = Int64GetDatumFast(pg_hook_stats[hook_id].ncalls); + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } + /* clean up and return the tuplestore */ + tuplestore_donestoring(tupstore); +#endif /* #ifdef BENCHMARK */ + return (Datum)0; +} + void set_qbuf(int i, unsigned char *buf) { pgss_qbuf[i] = buf; } +#ifdef BENCHMARK +static void +pgsm_emit_log_hook_benchmark(ErrorData *edata) +{ + double start_time = (double)clock(); + pgsm_emit_log_hook(edata); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSM_EMIT_LOG_HOOK, elapsed); +} +#endif void pgsm_emit_log_hook(ErrorData *edata) { @@ -3247,3 +3466,45 @@ static uint64 djb2_hash(unsigned char *str, size_t len) return hash; } + +#ifdef BENCHMARK +void init_hook_stats(void) +{ + bool found = false; + pg_hook_stats = ShmemInitStruct("pg_stat_monitor_hook_stats", HOOK_STATS_SIZE, &found); + if (!found) + { + memset(pg_hook_stats, 0, HOOK_STATS_SIZE); + +#define SET_HOOK_NAME(hook, name) \ + snprintf(pg_hook_stats[hook].hook_name, sizeof(pg_hook_stats->hook_name), name); + + SET_HOOK_NAME(STATS_PGSS_POST_PARSE_ANALYZE, "pgss_post_parse_analyze"); + SET_HOOK_NAME(STATS_PGSS_EXECUTORSTART, "pgss_ExecutorStart"); + SET_HOOK_NAME(STATS_PGSS_EXECUTORUN, "pgss_ExecutorRun"); + SET_HOOK_NAME(STATS_PGSS_EXECUTORFINISH, "pgss_ExecutorFinish"); + SET_HOOK_NAME(STATS_PGSS_EXECUTOREND, "pgss_ExecutorEnd"); + SET_HOOK_NAME(STATS_PGSS_PROCESSUTILITY, "pgss_ProcessUtility"); +#if PG_VERSION_NUM >= 130000 + SET_HOOK_NAME(STATS_PGSS_PLANNER_HOOK, "pgss_planner_hook"); +#endif + SET_HOOK_NAME(STATS_PGSM_EMIT_LOG_HOOK, "pgsm_emit_log_hook"); + SET_HOOK_NAME(STATS_PGSS_EXECUTORCHECKPERMS, "pgss_ExecutorCheckPerms"); + } +} + +void update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed) +{ + Assert(hook_id > STATS_START && hook_id < STATS_END); + + struct pg_hook_stats_t *p = &pg_hook_stats[hook_id]; + if (time_elapsed < p->min_time) + p->min_time = time_elapsed; + + if (time_elapsed > p->max_time) + p->max_time = time_elapsed; + + p->total_time += time_elapsed; + p->ncalls++; +} +#endif \ No newline at end of file diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 3717849..1f9e6d4 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -413,4 +413,76 @@ void pgss_startup(void); #define PGSM_QUERY_PLAN get_conf(12)->guc_variable #define PGSM_TRACK_PLANNING get_conf(13)->guc_variable +/*---- Benchmarking ----*/ +#ifdef BENCHMARK +/* + * These enumerator values are used as index in the hook stats array. + * STATS_START and STATS_END are used only to delimit the range. + * STATS_END is also the length of the valid items in the enum. + */ +enum pg_hook_stats_id { + STATS_START = -1, + STATS_PGSS_POST_PARSE_ANALYZE, + STATS_PGSS_EXECUTORSTART, + STATS_PGSS_EXECUTORUN, + STATS_PGSS_EXECUTORFINISH, + STATS_PGSS_EXECUTOREND, + STATS_PGSS_PROCESSUTILITY, +#if PG_VERSION_NUM >= 130000 + STATS_PGSS_PLANNER_HOOK, +#endif + STATS_PGSM_EMIT_LOG_HOOK, + STATS_PGSS_EXECUTORCHECKPERMS, + STATS_END +}; + +/* Hold time to execute statistics for a hook. */ +struct pg_hook_stats_t { + char hook_name[64]; + double min_time; + double max_time; + double total_time; + uint64 ncalls; +}; + +#define HOOK_STATS_SIZE MAXALIGN((size_t)STATS_END * sizeof(struct pg_hook_stats_t)) + +/* Allocate a pg_hook_stats_t array of size HOOK_STATS_SIZE on shared memory. */ +void init_hook_stats(void); + +/* Update hook time execution statistics. */ +void update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed); + +/* + * Macro used to declare a hook function: + * Example: + * DECLARE_HOOK(void my_hook, const char *query, size_t length); + * Will expand to: + * static void my_hook(const char *query, size_t length); + * static void my_hook_benchmark(const char *query, size_t length); + */ +#define DECLARE_HOOK(hook, ...) \ + static hook(__VA_ARGS__); \ + static hook##_benchmark(__VA_ARGS__); + +/* + * Macro used to wrap a hook when pg_stat_monitor is compiled with -DBENCHMARK. + * + * It is intended to be used as follows in _PG_init(): + * pg_hook_function = HOOK(my_hook_function); + * Then, if pg_stat_monitor is compiled with -DBENCHMARK this will expand to: + * pg_hook_name = my_hook_function_benchmark; + * Otherwise it will simple expand to: + * pg_hook_name = my_hook_function; + */ +#define HOOK(name) name##_benchmark + +#else /* #ifdef BENCHMARK */ + +#define DECLARE_HOOK(hook, ...) \ + static hook(__VA_ARGS__); +#define HOOK(name) name +#define HOOK_STATS_SIZE 0 +#endif + #endif From a847ed95de9bc3550ff6783d9aec2c81e064f6ff Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Fri, 27 Aug 2021 15:53:11 -0400 Subject: [PATCH 05/45] PG-223: Remove relations and num_relations from pgssSharedState. These variables can't be in shared state, as the following problem was taking place: 1. Process1 call pgss_ExecutorCheckPerms(), acquire lock, update relations and num_relations, release lock. 2. Process 2 call pgss_ExecutorCheckPerms(), acquire lock, update num_relations = 0; 3. Process 1 read num_relations = 0 in pgss_update_entry, this value is wrong as it was updated by Process 2. Even if we acquire the lock in pgss_update_entry to read num_relations and relations variable, Process 1 may end up acquiring the lock after Process 2 has ovewritten the variable values, leading to Process 1 reading of wrong data. By defining relations and num_relations to be static and global in pg_stat_monitor.c we take advantage that each individual PostgreSQL backend will have its own copy of this data, which allows us to remove the locking in pgss_ExecutorCheckPerms to update these variables, improving pg_stat_monitor overall performance. --- pg_stat_monitor.c | 22 +++++++++------------- pg_stat_monitor.h | 2 -- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 9a4cea9..2876e1b 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -66,7 +66,8 @@ static int plan_nested_level = 0; /* The array to store outer layer query id*/ uint64 *nested_queryids; -FILE *qfile; +static char relations[REL_LST][REL_LEN]; +static int num_relations; /* Number of relation in the query */ static bool system_init = false; static struct rusage rusage_start; static struct rusage rusage_end; @@ -645,7 +646,6 @@ static void pgss_ExecutorEnd(QueryDesc *queryDesc) { uint64 queryId = queryDesc->plannedstmt->queryId; - pgssSharedState *pgss = pgsm_get_ss(); SysInfo sys_info; PlanInfo plan_info; @@ -693,7 +693,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) prev_ExecutorEnd(queryDesc); else standard_ExecutorEnd(queryDesc); - pgss->num_relations = 0; + num_relations = 0; } #ifdef BENCHMARK @@ -713,13 +713,11 @@ static bool pgss_ExecutorCheckPerms(List *rt, bool abort) { ListCell *lr = NULL; - pgssSharedState *pgss = pgsm_get_ss(); int i = 0; int j = 0; Oid list_oid[20]; - LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - pgss->num_relations = 0; + num_relations = 0; foreach(lr, rt) { @@ -744,14 +742,13 @@ pgss_ExecutorCheckPerms(List *rt, bool abort) namespace_name = get_namespace_name(get_rel_namespace(rte->relid)); relation_name = get_rel_name(rte->relid); if (rte->relkind == 'v') - snprintf(pgss->relations[i++], REL_LEN, "%s.%s*", namespace_name, relation_name); + snprintf(relations[i++], REL_LEN, "%s.%s*", namespace_name, relation_name); else - snprintf(pgss->relations[i++], REL_LEN, "%s.%s", namespace_name, relation_name); + snprintf(relations[i++], REL_LEN, "%s.%s", namespace_name, relation_name); } } } - pgss->num_relations = i; - LWLockRelease(pgss->lock); + num_relations = i; if (prev_ExecutorCheckPerms_hook) return prev_ExecutorCheckPerms_hook(rt, abort); @@ -1184,7 +1181,6 @@ pgss_update_entry(pgssEntry *entry, int index; char application_name[APPLICATIONNAME_LEN]; int application_name_len = pg_get_application_name(application_name); - pgssSharedState *pgss = pgsm_get_ss(); double old_mean; int message_len = error_info ? strlen (error_info->message) : 0; int comments_len = comments ? strlen (comments) : 0; @@ -1255,8 +1251,8 @@ pgss_update_entry(pgssEntry *entry, _snprintf(e->counters.planinfo.plan_text, plan_info->plan_text, plan_text_len, PLAN_TEXT_LEN); _snprintf(e->counters.info.application_name, application_name, application_name_len, APPLICATIONNAME_LEN); - e->counters.info.num_relations = pgss->num_relations; - _snprintf2(e->counters.info.relations, pgss->relations, pgss->num_relations, REL_LEN); + e->counters.info.num_relations = num_relations; + _snprintf2(e->counters.info.relations, relations, num_relations, REL_LEN); e->counters.info.cmd_type = cmd_type; diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 1f9e6d4..62c33b9 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -310,8 +310,6 @@ typedef struct pgssSharedState uint64 prev_bucket_usec; uint64 bucket_entry[MAX_BUCKETS]; int64 query_buf_size_bucket; - char relations[REL_LST][REL_LEN]; - int num_relations; /* Number of relation in the query */ char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */ } pgssSharedState; From 58f707c6d95f94e2fb5531680490e1fdf4d1c2e6 Mon Sep 17 00:00:00 2001 From: Mikhail Samoylov Date: Sat, 28 Aug 2021 00:05:52 +0300 Subject: [PATCH 06/45] Add PPG job --- .github/workflows/ppg11test.yml | 62 +++++++++++++++++++++++++++++++++ .github/workflows/ppg12test.yml | 62 +++++++++++++++++++++++++++++++++ .github/workflows/ppg13test.yml | 62 +++++++++++++++++++++++++++++++++ README.md | 3 ++ 4 files changed, 189 insertions(+) create mode 100644 .github/workflows/ppg11test.yml create mode 100644 .github/workflows/ppg12test.yml create mode 100644 .github/workflows/ppg13test.yml diff --git a/.github/workflows/ppg11test.yml b/.github/workflows/ppg11test.yml new file mode 100644 index 0000000..47987a3 --- /dev/null +++ b/.github/workflows/ppg11test.yml @@ -0,0 +1,62 @@ +name: ppg11-test +on: [push] + +jobs: + build: + name: ppg11-test + runs-on: ubuntu-latest + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install percona-release script + run: | + sudo apt-get install gnupg2 wget -y + sudo wget https://repo.percona.com/apt/pool/testing/p/percona-release/percona-release_1.0-27.generic_all.deb + sudo dpkg -i percona-release_1.0-27.generic_all.deb + + - name: Install Percona Distribution Postgresql 11 + run: | + sudo percona-release setup ppg-11 + sudo apt-get update -y + sudo apt-get install -y percona-postgresql percona-postgresql-11 percona-postgresql-all percona-postgresql-client-11 percona-postgresql-client-common percona-postgresql-common percona-postgresql-contrib percona-postgresql-server-dev-all + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/11/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/ppg12test.yml b/.github/workflows/ppg12test.yml new file mode 100644 index 0000000..1224b86 --- /dev/null +++ b/.github/workflows/ppg12test.yml @@ -0,0 +1,62 @@ +name: ppg12-test +on: [push] + +jobs: + build: + name: ppg12-test + runs-on: ubuntu-latest + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install percona-release script + run: | + sudo apt-get install gnupg2 wget -y + sudo wget https://repo.percona.com/apt/pool/testing/p/percona-release/percona-release_1.0-27.generic_all.deb + sudo dpkg -i percona-release_1.0-27.generic_all.deb + + - name: Install Percona Distribution Postgresql 12 + run: | + sudo percona-release setup ppg-12 + sudo apt-get update -y + sudo apt-get install -y percona-postgresql percona-postgresql-12 percona-postgresql-all percona-postgresql-client-12 percona-postgresql-client-common percona-postgresql-common percona-postgresql-contrib percona-postgresql-server-dev-all + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/12/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/ppg13test.yml b/.github/workflows/ppg13test.yml new file mode 100644 index 0000000..713432c --- /dev/null +++ b/.github/workflows/ppg13test.yml @@ -0,0 +1,62 @@ +name: ppg13-test +on: [push] + +jobs: + build: + name: ppg13-test + runs-on: ubuntu-latest + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install percona-release script + run: | + sudo apt-get install gnupg2 wget -y + sudo wget https://repo.percona.com/apt/pool/testing/p/percona-release/percona-release_1.0-27.generic_all.deb + sudo dpkg -i percona-release_1.0-27.generic_all.deb + + - name: Install Percona Distribution Postgresql 13 + run: | + sudo percona-release setup ppg-13 + sudo apt-get update -y + sudo apt-get install -y percona-postgresql percona-postgresql-13 percona-postgresql-all percona-postgresql-client-13 percona-postgresql-client-common percona-postgresql-common percona-postgresql-contrib percona-postgresql-server-dev-all + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/13/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/README.md b/README.md index 263ea19..02510b3 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,9 @@ ![pg12-test](https://github.com/percona/pg_stat_monitor/workflows/pg12-test/badge.svg) ![pg13-test](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) ![pg14-test](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) +![ppg11-test](https://github.com/percona/pg_stat_monitor/workflows/ppg11-test/badge.svg) +![ppg12-test](https://github.com/percona/pg_stat_monitor/workflows/ppg12-test/badge.svg) +![ppg13-test](https://github.com/percona/pg_stat_monitor/workflows/ppg13-test/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/percona/pg_stat_monitor/badge.svg)](https://coveralls.io/github/percona/pg_stat_monitor) From 549347025d47735725f97d00514acb1eb2d0c8b2 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Fri, 27 Aug 2021 16:12:07 -0400 Subject: [PATCH 07/45] PG-223: Use memcpy and strlcpy to copy relations to counters. We redefine macro _snprintf to use memcpy, which performs better, we also update call sites using this macro to add the null terminator '\0' to the source string length, this way memcpy also correctly copies the null terminator to the destination string. We update _snprintf2 macro to use strlcpy, the reason we don't use memcpy here is because in the place where this macro is called, pgss_update_entry, only the maximum string length of REL_LEN=1000 is specified as an upper bound to copy the relations string vector to the destination counters, but since this data is string, we don't need to copy 1k bytes for every entry, by using strlcpy the copy ends as soon as the null terminator '\0' is found in the source string. --- pg_stat_monitor.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 2876e1b..4766de8 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -31,24 +31,14 @@ PG_MODULE_MAGIC; #define PGUNSIXBIT(val) (((val) & 0x3F) + '0') #define _snprintf(_str_dst, _str_src, _len, _max_len)\ -do \ -{ \ - int i; \ - for(i = 0; i < _len && i < _max_len; i++) \ - {\ - _str_dst[i] = _str_src[i]; \ - }\ -}while(0) + memcpy((void *)_str_dst, _str_src, _len < _max_len ? _len : _max_len) #define _snprintf2(_str_dst, _str_src, _len1, _len2)\ do \ { \ - int i,j; \ + int i; \ for(i = 0; i < _len1; i++) \ - for(j = 0; j < _len2; j++) \ - { \ - _str_dst[i][j] = _str_src[i][j]; \ - } \ + strlcpy((char *)_str_dst[i], _str_src[i], _len2); \ }while(0) /*---- Initicalization Function Declarations ----*/ @@ -1196,7 +1186,8 @@ pgss_update_entry(pgssEntry *entry, if (reset) memset(&entry->counters, 0, sizeof(Counters)); - _snprintf(e->counters.info.comments, comments, comments_len, COMMENTS_LEN); + if (comments_len > 0) + _snprintf(e->counters.info.comments, comments, comments_len + 1, COMMENTS_LEN); e->counters.state = kind; if (kind == PGSS_PLAN) { @@ -1248,8 +1239,11 @@ pgss_update_entry(pgssEntry *entry, e->counters.resp_calls[index]++; } - _snprintf(e->counters.planinfo.plan_text, plan_info->plan_text, plan_text_len, PLAN_TEXT_LEN); - _snprintf(e->counters.info.application_name, application_name, application_name_len, APPLICATIONNAME_LEN); + if (plan_text_len > 0) + _snprintf(e->counters.planinfo.plan_text, plan_info->plan_text, plan_text_len + 1, PLAN_TEXT_LEN); + + if (application_name_len > 0) + _snprintf(e->counters.info.application_name, application_name, application_name_len + 1, APPLICATIONNAME_LEN); e->counters.info.num_relations = num_relations; _snprintf2(e->counters.info.relations, relations, num_relations, REL_LEN); From aee45ebe5277898964a84180d4cb75c647b0be4e Mon Sep 17 00:00:00 2001 From: Ibrar Ahmed Date: Tue, 31 Aug 2021 12:10:11 +0000 Subject: [PATCH 08/45] PG-221: Use alternat of GetUserID function in error hook. --- pg_stat_monitor.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 4766de8..c639e34 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1477,7 +1477,8 @@ pgss_store(uint64 queryid, int application_name_len = pg_get_application_name(application_name); bool reset = false; uint64 bucketid; - uint64 userid = GetUserId(); + uint64 userid; + int con; uint64 dbid = MyDatabaseId; uint64 ip = pg_get_client_addr(); uint64 planid = plan_info ? plan_info->planid: 0; @@ -1488,6 +1489,9 @@ pgss_store(uint64 queryid, return; Assert(query != NULL); + GetUserIdAndSecContext((unsigned int *)&userid, &con); + if (userid == 0) + return; comments = extract_query_comments(query); @@ -3496,4 +3500,4 @@ void update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed) p->total_time += time_elapsed; p->ncalls++; } -#endif \ No newline at end of file +#endif From 3d3ece2f993dbf301e906fe151ae315983cb6d96 Mon Sep 17 00:00:00 2001 From: Ibrar Ahmed Date: Tue, 31 Aug 2021 14:55:53 +0000 Subject: [PATCH 09/45] =?UTF-8?q?PG-221:=20Use=20alternate=20of=20GetUserI?= =?UTF-8?q?D=20function=C2=A0in=20error=20hook.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pg_stat_monitor.c | 9 +++++---- regression/expected/error.out | 30 +++++++++++++++++------------- regression/sql/error.sql | 2 +- 3 files changed, 23 insertions(+), 18 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index c639e34..a02a0d0 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1489,11 +1489,12 @@ pgss_store(uint64 queryid, return; Assert(query != NULL); - GetUserIdAndSecContext((unsigned int *)&userid, &con); - if (userid == 0) - return; + if (kind == PGSS_ERROR) + GetUserIdAndSecContext((unsigned int *)&userid, &con); + else + userid = GetUserId(); - comments = extract_query_comments(query); + comments = extract_query_comments(query); /* Safety check... */ if (!IsSystemInitialized() || !pgss_qbuf[pgss->current_wbucket]) diff --git a/regression/expected/error.out b/regression/expected/error.out index 1c278c4..b901e14 100644 --- a/regression/expected/error.out +++ b/regression/expected/error.out @@ -20,19 +20,23 @@ BEGIN RAISE WARNING 'warning message'; END $$; WARNING: warning message -SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | elevel | sqlcode | message ------------------------------------------------------------------------------------------+--------+---------+----------------------------------- - ELECET * FROM unknown; | 20 | 42601 | syntax error at or near "ELECET" - SELECT * FROM unknown; | 20 | 42P01 | relation "unknown" does not exist - SELECT 1/0; | 20 | 22012 | division by zero - SELECT pg_stat_monitor_reset(); | 0 | | - SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 0 | | - do $$ +| 19 | 01000 | warning message - BEGIN +| | | - RAISE WARNING 'warning message'; +| | | - END $$; | | | -(6 rows) +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; + query | elevel | sqlcode | message +------------------------------------------------------------------------------------------------+--------+---------+----------------------------------- + ELECET * FROM unknown; | 20 | 42601 | syntax error at or near "ELECET" + SELECT * FROM unknown; | 20 | 42P01 | relation "unknown" does not exist + SELECT 1/0; | 20 | 22012 | division by zero + SELECT pg_stat_monitor_reset(); | 0 | | + SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | + do $$ +| 0 | | + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$; | | | + do $$ +| 19 | 01000 | warning message + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$; | | | +(7 rows) SELECT pg_stat_monitor_reset(); pg_stat_monitor_reset diff --git a/regression/sql/error.sql b/regression/sql/error.sql index 5102782..98870f2 100644 --- a/regression/sql/error.sql +++ b/regression/sql/error.sql @@ -9,6 +9,6 @@ BEGIN RAISE WARNING 'warning message'; END $$; -SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C"; +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; SELECT pg_stat_monitor_reset(); DROP EXTENSION pg_stat_monitor; From b5aa300e8222589bfd9a2acd7625efc60e97b5bd Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Tue, 31 Aug 2021 18:29:22 -0400 Subject: [PATCH 10/45] pg-224: Fix memory leak on extract_query_comments. There were two objects leaking memory in this function, the comments variable of type char * allocated using palloc0, and the regular expression object preg. If the regcomp function failed, the function was returning without releasing the comments variable allocated previously. If the regexec function failed, the function was returning without releasing the preg object and the comments variable. This commit does two changes, first it turns the comments in extract_query_comments an argument to the function, in pgss_store we declare the comments variable in the stack, so it will clean up after itself. The second change was to move the regular expression object to global scope, this way we compile the object only once, during module initialization. With these two changes we fix the memory leak and avoid allocating/releasing memory for every call to this function. --- pg_stat_monitor.c | 46 +++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index a02a0d0..5461149 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -56,6 +56,8 @@ static int plan_nested_level = 0; /* The array to store outer layer query id*/ uint64 *nested_queryids; +/* Regex object used to extract query comments. */ +static regex_t preg_query_comments; static char relations[REL_LST][REL_LEN]; static int num_relations; /* Number of relation in the query */ static bool system_init = false; @@ -67,7 +69,7 @@ static char *pgss_explain(QueryDesc *queryDesc); static struct pg_hook_stats_t *pg_hook_stats; #endif -static char *extract_query_comments(const char *query); +static void extract_query_comments(const char *query, char *comments, size_t max_len); static int get_histogram_bucket(double q_time); static bool IsSystemInitialized(void); static void dump_queries_buffer(int bucket_id, unsigned char *buf, int buf_len); @@ -219,7 +221,8 @@ static uint64 djb2_hash(unsigned char *str, size_t len); void _PG_init(void) { - int i; + int i, rc; + elog(DEBUG2, "pg_stat_monitor: %s()", __FUNCTION__); /* * In order to create our shared memory area, we have to be loaded via @@ -252,6 +255,15 @@ _PG_init(void) EmitWarningsOnPlaceholders("pg_stat_monitor"); + /* + * Compile regular expression for extracting out query comments only once. + */ + rc = regcomp(&preg_query_comments, "/\\*.*\\*/", 0); + if (rc != 0) + { + elog(ERROR, "pg_stat_monitor: query comments regcomp() failed, return code=(%d)\n", rc); + } + /* * Request additional shared resources. (These are no-ops if we're not in * the postmaster process.) We'll allocate or attach to the shared @@ -307,6 +319,7 @@ _PG_fini(void) ProcessUtility_hook = prev_ProcessUtility; free(nested_queryids); + regfree(&preg_query_comments); hash_entry_reset(); } @@ -1483,7 +1496,7 @@ pgss_store(uint64 queryid, uint64 ip = pg_get_client_addr(); uint64 planid = plan_info ? plan_info->planid: 0; uint64 appid = djb2_hash((unsigned char *)application_name, application_name_len); - char *comments; + char comments[512] = ""; /* Monitoring is disabled */ if (!PGSM_ENABLED) return; @@ -1494,7 +1507,7 @@ pgss_store(uint64 queryid, else userid = GetUserId(); - comments = extract_query_comments(query); + extract_query_comments(query, comments, sizeof(comments)); /* Safety check... */ if (!IsSystemInitialized() || !pgss_qbuf[pgss->current_wbucket]) @@ -3407,29 +3420,20 @@ get_histogram_timings(PG_FUNCTION_ARGS) return CStringGetTextDatum(text_str); } -char * -extract_query_comments(const char *query) +static void +extract_query_comments(const char *query, char *comments, size_t max_len) { - regex_t preg; - char *pattern = "/\\*.*\\*/"; int rc; size_t nmatch = 1; regmatch_t pmatch; - char *comments = palloc0(512); - rc = regcomp(&preg, pattern, 0); - if (rc != 0) - { - printf("regcomp() failed, returning nonzero (%d)\n", rc); - return ""; - } - rc = regexec(&preg, query, nmatch, &pmatch, 0); - if (rc != 0) - return ""; - sprintf(comments, "%.*s", pmatch.rm_eo - pmatch.rm_so -4, &query[pmatch.rm_so + 2]); - regfree(&preg); - return comments; + rc = regexec(&preg_query_comments, query, nmatch, &pmatch, 0); + if (rc != 0) + return; + + snprintf(comments, max_len, "%.*s", pmatch.rm_eo - pmatch.rm_so -4, &query[pmatch.rm_so + 2]); } + #if PG_VERSION_NUM < 140000 static uint64 get_query_id(JumbleState *jstate, Query *query) From 7a543bafb08ea04a055c7e69865c7b610917cf13 Mon Sep 17 00:00:00 2001 From: Ibrar Ahmed Date: Wed, 1 Sep 2021 09:55:35 +0000 Subject: [PATCH 11/45] Regression output fix. --- regression/expected/error_1.out | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/regression/expected/error_1.out b/regression/expected/error_1.out index 7a3f002..102a92a 100644 --- a/regression/expected/error_1.out +++ b/regression/expected/error_1.out @@ -20,18 +20,18 @@ BEGIN RAISE WARNING 'warning message'; END $$; WARNING: warning message -SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | elevel | sqlcode | message ------------------------------------------------------------------------------------------+--------+---------+----------------------------------- - ELECET * FROM unknown; | 21 | 42601 | syntax error at or near "ELECET" - SELECT * FROM unknown; | 21 | 42P01 | relation "unknown" does not exist - SELECT 1/0; | 21 | 22012 | division by zero - SELECT pg_stat_monitor_reset(); | 0 | | - SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 0 | | - do $$ +| 19 | 01000 | warning message - BEGIN +| | | - RAISE WARNING 'warning message'; +| | | - END $$; | | | +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; + query | elevel | sqlcode | message +------------------------------------------------------------------------------------------------+--------+---------+----------------------------------- + ELECET * FROM unknown; | 21 | 42601 | syntax error at or near "ELECET" + SELECT * FROM unknown; | 21 | 42P01 | relation "unknown" does not exist + SELECT 1/0; | 21 | 22012 | division by zero + SELECT pg_stat_monitor_reset(); | 0 | | + SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | + do $$ +| 19 | 01000 | warning message + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$; | | | (6 rows) SELECT pg_stat_monitor_reset(); From bd2ebf2a5b15357bc367a5ab2453c367a5d272b9 Mon Sep 17 00:00:00 2001 From: Ibrar Ahmed Date: Wed, 1 Sep 2021 10:25:42 +0000 Subject: [PATCH 12/45] Regression output fix. --- regression/expected/error_1.out | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/regression/expected/error_1.out b/regression/expected/error_1.out index 102a92a..8a8dd8b 100644 --- a/regression/expected/error_1.out +++ b/regression/expected/error_1.out @@ -28,11 +28,15 @@ SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLA SELECT 1/0; | 21 | 22012 | division by zero SELECT pg_stat_monitor_reset(); | 0 | | SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | + do $$ +| 0 | | + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$; | | | do $$ +| 19 | 01000 | warning message BEGIN +| | | RAISE WARNING 'warning message'; +| | | END $$; | | | -(6 rows) +(7 rows) SELECT pg_stat_monitor_reset(); pg_stat_monitor_reset From 81a2c705b684a805c4fbc20adf7d2d3203edcab3 Mon Sep 17 00:00:00 2001 From: Ibrar Ahmed Date: Wed, 1 Sep 2021 11:27:19 +0000 Subject: [PATCH 13/45] Removing commit (4d1c2e6), because it must be in its branch. --- .github/workflows/ppg11test.yml | 62 --------------------------------- .github/workflows/ppg12test.yml | 62 --------------------------------- .github/workflows/ppg13test.yml | 62 --------------------------------- README.md | 3 -- 4 files changed, 189 deletions(-) delete mode 100644 .github/workflows/ppg11test.yml delete mode 100644 .github/workflows/ppg12test.yml delete mode 100644 .github/workflows/ppg13test.yml diff --git a/.github/workflows/ppg11test.yml b/.github/workflows/ppg11test.yml deleted file mode 100644 index 47987a3..0000000 --- a/.github/workflows/ppg11test.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: ppg11-test -on: [push] - -jobs: - build: - name: ppg11-test - runs-on: ubuntu-latest - steps: - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - - name: Delete old postgresql files - run: | - sudo apt-get update - sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo rm -rf /var/lib/postgresql/ - sudo rm -rf /var/log/postgresql/ - sudo rm -rf /etc/postgresql/ - sudo rm -rf /usr/lib/postgresql - sudo rm -rf /usr/include/postgresql - sudo rm -rf /usr/share/postgresql - sudo rm -rf /etc/postgresql - sudo rm -f /usr/bin/pg_config - - - name: Install percona-release script - run: | - sudo apt-get install gnupg2 wget -y - sudo wget https://repo.percona.com/apt/pool/testing/p/percona-release/percona-release_1.0-27.generic_all.deb - sudo dpkg -i percona-release_1.0-27.generic_all.deb - - - name: Install Percona Distribution Postgresql 11 - run: | - sudo percona-release setup ppg-11 - sudo apt-get update -y - sudo apt-get install -y percona-postgresql percona-postgresql-11 percona-postgresql-all percona-postgresql-client-11 percona-postgresql-client-common percona-postgresql-common percona-postgresql-contrib percona-postgresql-server-dev-all - sudo chown -R postgres:postgres src/ - - - name: Build pg_stat_monitor - run: | - sudo make USE_PGXS=1 - sudo make USE_PGXS=1 install - working-directory: src/pg_stat_monitor/ - - - name: Start pg_stat_monitor_tests - run: | - sudo service postgresql stop - echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/11/main/postgresql.conf - sudo service postgresql start - sudo -u postgres bash -c 'make installcheck USE_PGXS=1' - working-directory: src/pg_stat_monitor/ - - - name: Report on test fail - uses: actions/upload-artifact@v2 - if: ${{ failure() }} - with: - name: Regressions diff and postgresql log - path: | - src/pg_stat_monitor/regression.diffs - src/pg_stat_monitor/logfile - retention-days: 1 diff --git a/.github/workflows/ppg12test.yml b/.github/workflows/ppg12test.yml deleted file mode 100644 index 1224b86..0000000 --- a/.github/workflows/ppg12test.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: ppg12-test -on: [push] - -jobs: - build: - name: ppg12-test - runs-on: ubuntu-latest - steps: - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - - name: Delete old postgresql files - run: | - sudo apt-get update - sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo rm -rf /var/lib/postgresql/ - sudo rm -rf /var/log/postgresql/ - sudo rm -rf /etc/postgresql/ - sudo rm -rf /usr/lib/postgresql - sudo rm -rf /usr/include/postgresql - sudo rm -rf /usr/share/postgresql - sudo rm -rf /etc/postgresql - sudo rm -f /usr/bin/pg_config - - - name: Install percona-release script - run: | - sudo apt-get install gnupg2 wget -y - sudo wget https://repo.percona.com/apt/pool/testing/p/percona-release/percona-release_1.0-27.generic_all.deb - sudo dpkg -i percona-release_1.0-27.generic_all.deb - - - name: Install Percona Distribution Postgresql 12 - run: | - sudo percona-release setup ppg-12 - sudo apt-get update -y - sudo apt-get install -y percona-postgresql percona-postgresql-12 percona-postgresql-all percona-postgresql-client-12 percona-postgresql-client-common percona-postgresql-common percona-postgresql-contrib percona-postgresql-server-dev-all - sudo chown -R postgres:postgres src/ - - - name: Build pg_stat_monitor - run: | - sudo make USE_PGXS=1 - sudo make USE_PGXS=1 install - working-directory: src/pg_stat_monitor/ - - - name: Start pg_stat_monitor_tests - run: | - sudo service postgresql stop - echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/12/main/postgresql.conf - sudo service postgresql start - sudo -u postgres bash -c 'make installcheck USE_PGXS=1' - working-directory: src/pg_stat_monitor/ - - - name: Report on test fail - uses: actions/upload-artifact@v2 - if: ${{ failure() }} - with: - name: Regressions diff and postgresql log - path: | - src/pg_stat_monitor/regression.diffs - src/pg_stat_monitor/logfile - retention-days: 1 diff --git a/.github/workflows/ppg13test.yml b/.github/workflows/ppg13test.yml deleted file mode 100644 index 713432c..0000000 --- a/.github/workflows/ppg13test.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: ppg13-test -on: [push] - -jobs: - build: - name: ppg13-test - runs-on: ubuntu-latest - steps: - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - - name: Delete old postgresql files - run: | - sudo apt-get update - sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo rm -rf /var/lib/postgresql/ - sudo rm -rf /var/log/postgresql/ - sudo rm -rf /etc/postgresql/ - sudo rm -rf /usr/lib/postgresql - sudo rm -rf /usr/include/postgresql - sudo rm -rf /usr/share/postgresql - sudo rm -rf /etc/postgresql - sudo rm -f /usr/bin/pg_config - - - name: Install percona-release script - run: | - sudo apt-get install gnupg2 wget -y - sudo wget https://repo.percona.com/apt/pool/testing/p/percona-release/percona-release_1.0-27.generic_all.deb - sudo dpkg -i percona-release_1.0-27.generic_all.deb - - - name: Install Percona Distribution Postgresql 13 - run: | - sudo percona-release setup ppg-13 - sudo apt-get update -y - sudo apt-get install -y percona-postgresql percona-postgresql-13 percona-postgresql-all percona-postgresql-client-13 percona-postgresql-client-common percona-postgresql-common percona-postgresql-contrib percona-postgresql-server-dev-all - sudo chown -R postgres:postgres src/ - - - name: Build pg_stat_monitor - run: | - sudo make USE_PGXS=1 - sudo make USE_PGXS=1 install - working-directory: src/pg_stat_monitor/ - - - name: Start pg_stat_monitor_tests - run: | - sudo service postgresql stop - echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/13/main/postgresql.conf - sudo service postgresql start - sudo -u postgres bash -c 'make installcheck USE_PGXS=1' - working-directory: src/pg_stat_monitor/ - - - name: Report on test fail - uses: actions/upload-artifact@v2 - if: ${{ failure() }} - with: - name: Regressions diff and postgresql log - path: | - src/pg_stat_monitor/regression.diffs - src/pg_stat_monitor/logfile - retention-days: 1 diff --git a/README.md b/README.md index 02510b3..263ea19 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,6 @@ ![pg12-test](https://github.com/percona/pg_stat_monitor/workflows/pg12-test/badge.svg) ![pg13-test](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) ![pg14-test](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) -![ppg11-test](https://github.com/percona/pg_stat_monitor/workflows/ppg11-test/badge.svg) -![ppg12-test](https://github.com/percona/pg_stat_monitor/workflows/ppg12-test/badge.svg) -![ppg13-test](https://github.com/percona/pg_stat_monitor/workflows/ppg13-test/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/percona/pg_stat_monitor/badge.svg)](https://coveralls.io/github/percona/pg_stat_monitor) From b8198278cd2c5af47720dc237dbacadb6ffb4c06 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Tue, 27 Jul 2021 14:16:17 -0400 Subject: [PATCH 14/45] PG-204: Fix save previous emit_log_hook. pg_stat_monitor was not saving the pointer to the previous hook for emit_log_hook, this commit fix the issue. --- pg_stat_monitor.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 5461149..95a3efe 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -293,9 +293,10 @@ _PG_init(void) planner_hook_next = planner_hook; planner_hook = HOOK(pgss_planner_hook); #endif - emit_log_hook = HOOK(pgsm_emit_log_hook); + prev_emit_log_hook = emit_log_hook; + emit_log_hook = HOOK(pgsm_emit_log_hook); prev_ExecutorCheckPerms_hook = ExecutorCheckPerms_hook; - ExecutorCheckPerms_hook = HOOK(pgss_ExecutorCheckPerms); + ExecutorCheckPerms_hook = HOOK(pgss_ExecutorCheckPerms); nested_queryids = (uint64*) malloc(sizeof(uint64) * max_stack_depth); @@ -317,6 +318,7 @@ _PG_fini(void) ExecutorFinish_hook = prev_ExecutorFinish; ExecutorEnd_hook = prev_ExecutorEnd; ProcessUtility_hook = prev_ProcessUtility; + emit_log_hook = prev_emit_log_hook; free(nested_queryids); regfree(&preg_query_comments); From eafb2e89a8657b58771a8e776868611b8c591e64 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Fri, 3 Sep 2021 14:11:58 -0400 Subject: [PATCH 15/45] PG-225: Fix deadlock in pgss_store. The deadlock scenario is describe below: 1. pgss_store is called, it acquires the lock pgss->lock. 2. An error ocurr, mostly out of memory when accessing internal hash tables used to store internal data, on functions pgss_store_query_info and pgss_get_entry. 3. Call elog() to report out of memory error. 4. Our pgsm_emit_log_hook is called, it calls pgss_store_error, which in turn calls pgss_store. 5. Try to acquire already acquired lock pgss->lock, deadlock happens. To fix the problem, there are two modifications worth mentioning done by this commit: 1. We are now passing HASH_ENTER_NULL flag to hash_search, instead of HASH_ENTER, as read in postgresql sources, this prevents this function from reporting error when out of memory, but instead it will only return NULL if we pass HASH_ENTER_NULL, so we can handle the error ourselves. 2. In pgss_store, if an error happens after the pgss->lock is acquired, we only set a flag, then, after releasing the lock, we check if the flag is set and report the error accordingly. --- hash_query.c | 4 ++-- pg_stat_monitor.c | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/hash_query.c b/hash_query.c index 265b2a1..b74cada 100644 --- a/hash_query.c +++ b/hash_query.c @@ -147,7 +147,7 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key,int encoding) return NULL; } /* Find or create an entry with desired hash code */ - entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found); + entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER_NULL, &found); if (!found) { pgss->bucket_entry[pgss->current_wbucket]++; @@ -285,7 +285,7 @@ hash_create_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 us key.ip = ip; key.appid = appid; - entry = (pgssQueryEntry *) hash_search(pgss_query_hash, &key, HASH_ENTER, &found); + entry = (pgssQueryEntry *) hash_search(pgss_query_hash, &key, HASH_ENTER_NULL, &found); return entry; } diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 95a3efe..c9a8fe4 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1499,6 +1499,7 @@ pgss_store(uint64 queryid, uint64 planid = plan_info ? plan_info->planid: 0; uint64 appid = djb2_hash((unsigned char *)application_name, application_name_len); char comments[512] = ""; + bool out_of_memory = false; /* Monitoring is disabled */ if (!PGSM_ENABLED) return; @@ -1532,7 +1533,7 @@ pgss_store(uint64 queryid, pgssQueryEntry *query_entry; query_entry = pgss_store_query_info(bucketid, queryid, dbid, userid, ip, appid, query, strlen(query), kind); if (query_entry == NULL) - elog(DEBUG1, "pg_stat_monitor: out of memory"); + out_of_memory = true; break; } case PGSS_ERROR: @@ -1543,13 +1544,13 @@ pgss_store(uint64 queryid, query_entry = pgss_store_query_info(bucketid, queryid, dbid, userid, ip, appid, query, strlen(query), kind); if (query_entry == NULL) { - elog(DEBUG1, "pg_stat_monitor: out of memory"); + out_of_memory = true; break; } entry = pgss_get_entry(bucketid, userid, dbid, queryid, ip, planid, appid); if (entry == NULL) { - elog(DEBUG1, "pg_stat_monitor: out of memory"); + out_of_memory = true; break; } @@ -1576,6 +1577,8 @@ pgss_store(uint64 queryid, break; } LWLockRelease(pgss->lock); + if (out_of_memory) + elog(DEBUG1, "pg_stat_monitor: out of memory"); } /* * Reset all statement statistics. From d633126a2d713791c69ff1c0b70a383b4522a6e0 Mon Sep 17 00:00:00 2001 From: Hamid Akhtar Date: Mon, 6 Sep 2021 15:21:55 +0500 Subject: [PATCH 16/45] Bumping version for upcoming 0.9.2-beta1 release --- META.json | 4 ++-- pg_stat_monitor.c | 2 +- pg_stat_monitor.control | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/META.json b/META.json index fcb66e0..9b04cd7 100644 --- a/META.json +++ b/META.json @@ -2,7 +2,7 @@ "name": "pg_stat_monitor", "abstract": "PostgreSQL Query Performance Monitoring Tool", "description": "The pg_stat_monitor is a PostgreSQL Query Performance Monitoring tool, based on PostgreSQL's contrib module pg_stat_statements. PostgreSQL’s pg_stat_statements provides the basic statistics, which is sometimes not enough. The major shortcoming in pg_stat_statements is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user needs to calculate the aggregate which is quite expensive.", - "version": "0.7.1", + "version": "0.9.2-beta1", "maintainer": [ "ibrar.ahmed@percona.com" ], @@ -12,7 +12,7 @@ "abstract": "PostgreSQL Query Performance Monitoring Tool", "file": "pg_stat_monitor--1.0.sql", "docfile": "README.md", - "version": "0.7.1" + "version": "0.9.2-beta1" } }, "prereqs": { diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index c9a8fe4..90a07ff 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -24,7 +24,7 @@ PG_MODULE_MAGIC; -#define BUILD_VERSION "devel" +#define BUILD_VERSION "0.9.2-beta1" #define PG_STAT_STATEMENTS_COLS 52 /* maximum of above */ #define PGSM_TEXT_FILE "/tmp/pg_stat_monitor_query" diff --git a/pg_stat_monitor.control b/pg_stat_monitor.control index cc93476..2b826e9 100644 --- a/pg_stat_monitor.control +++ b/pg_stat_monitor.control @@ -1,5 +1,5 @@ # pg_stat_monitor extension -comment = 'track execution statistics of all SQL statements executed' +comment = 'The pg_stat_monitor is a PostgreSQL Query Performance Monitoring tool, based on PostgreSQL contrib module pg_stat_statements. pg_stat_monitor provides aggregated statistics, client information, plan details including plan, and histogram information.' default_version = '1.0' module_pathname = '$libdir/pg_stat_monitor' relocatable = true From 0ceef74071752c8415c1bcac8c0303b5ee8614b4 Mon Sep 17 00:00:00 2001 From: Ibrar Ahmed Date: Mon, 6 Sep 2021 15:40:53 +0500 Subject: [PATCH 17/45] Bumping version for upcoming 0.9.2-beta1 release --- regression/expected/version.out | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/regression/expected/version.out b/regression/expected/version.out index b9920ea..7c4640e 100644 --- a/regression/expected/version.out +++ b/regression/expected/version.out @@ -2,7 +2,7 @@ CREATE EXTENSION pg_stat_monitor; SELECT pg_stat_monitor_version(); pg_stat_monitor_version ------------------------- - devel + 0.9.2-beta1 (1 row) DROP EXTENSION pg_stat_monitor; From 40e6dff0f9c6f1920e1afe81e0f1991828001695 Mon Sep 17 00:00:00 2001 From: Lenz Grimmer Date: Thu, 9 Sep 2021 11:05:31 +0200 Subject: [PATCH 18/45] Documentation: Minor textual/formatting improvements Updated `README.md` and `USER_GUIDE.md`, improved wording and formatting. Slightly updated description in `META.json`. Signed-off-by: Lenz Grimmer --- META.json | 2 +- README.md | 20 ++++++++++---------- docs/USER_GUIDE.md | 18 +++++++++--------- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/META.json b/META.json index 9b04cd7..0397750 100644 --- a/META.json +++ b/META.json @@ -1,7 +1,7 @@ { "name": "pg_stat_monitor", "abstract": "PostgreSQL Query Performance Monitoring Tool", - "description": "The pg_stat_monitor is a PostgreSQL Query Performance Monitoring tool, based on PostgreSQL's contrib module pg_stat_statements. PostgreSQL’s pg_stat_statements provides the basic statistics, which is sometimes not enough. The major shortcoming in pg_stat_statements is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user needs to calculate the aggregate which is quite expensive.", + "description": "pg_stat_monitor is a PostgreSQL Query Performance Monitoring tool, based on PostgreSQL's contrib module pg_stat_statements. PostgreSQL’s pg_stat_statements provides the basic statistics, which is sometimes not enough. The major shortcoming in pg_stat_statements is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user would need to calculate the aggregates, which is quite an expensive operation.", "version": "0.9.2-beta1", "maintainer": [ "ibrar.ahmed@percona.com" diff --git a/README.md b/README.md index 263ea19..cac1ab0 100644 --- a/README.md +++ b/README.md @@ -7,11 +7,11 @@ ## What is pg_stat_monitor? -The **pg_stat_monitor** is a **Query Performance Monitoring** tool for [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL. **pg_stat_monitor** is based on PostgreSQL's contrib module ``pg_stat_statements``. pg_stat_statements provides the basic statistics, which is sometimes not enough. The major shortcoming in pg_stat_statements is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user needs to calculate the aggregate which is quite expensive. +**pg_stat_monitor** is a **Query Performance Monitoring** tool for [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL. **pg_stat_monitor** is based on PostgreSQL's contrib module ``pg_stat_statements``. ``pg_stat_statements`` provides the basic statistics, which is sometimes not enough. The major shortcoming in ``pg_stat_statements`` is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user would need to calculate the aggregates, which is quite an expensive operation. -**pg_stat_monitor** is developed on the basis of pg_stat_statements as its more advanced replacement. It provides all the features of pg_stat_statements plus its own feature set. +**pg_stat_monitor** is developed on the basis of pg_stat_statements as is a more advanced replacement. It provides all the features of pg_stat_statements plus its own feature set. -### How pg_stat_monitor works? +### How does pg_stat_monitor work? ``pg_stat_monitor`` accumulates the information in the form of buckets. All the aggregated information is bucket based. The size of a bucket and the number of buckets should be configured using GUC (Grand Unified Configuration). When a bucket time elapses, ``pg_stat_monitor`` resets all the statistics and switches to the next bucket. After the last bucket elapses, ``pg_stat_monitor`` goes back to the first bucket. All the data on the first bucket is cleared out with new writes; therefore, to not lose the data, users must read the buckets before that. @@ -26,7 +26,7 @@ The **pg_stat_monitor** is a **Query Performance Monitoring** tool for [Percona 9. [Copyright Notice](#copyright-notice) ## Supported PostgreSQL Versions -The ``pg_stat_monitor`` should work on the latest version of both [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL but is only tested with these versions: +``pg_stat_monitor`` should work on the latest version of both [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL and is currently tested against thes following versions: | Distribution | Version | Supported | | ------------------------------------|---------|--------------------| @@ -45,7 +45,7 @@ You can install ``pg_stat_monitor`` from [Percona repositories](#installing-from ### Installing from Percona repositories -``pg_stat_monitor`` is supplied as part of Percona Distribution for PostgreSQL. The rpm/deb packages are available from Percona repositories. To install ``pg_stat_monitor``, follow [the installation instructions](https://www.percona.com/doc/postgresql/LATEST/installing.html). +``pg_stat_monitor`` is supplied as part of Percona Distribution for PostgreSQL. The RPM/DEB packages are available from Percona's repositories. To install ``pg_stat_monitor``, follow [the installation instructions](https://www.percona.com/doc/postgresql/LATEST/installing.html). ### Installing from PGXN @@ -73,9 +73,9 @@ make USE_PGXS=1 install ``` ## Setup -``pg_stat_monitor`` cannot be enabled in your running ``postgresql`` instance. ``pg_stat_monitor`` needs to be loaded at the start time. This requires adding the ``pg_stat_monitor`` extension for the ``shared_preload_libraries`` parameter and restarting the ``postgresql`` instance. +``pg_stat_monitor`` cannot be enabled in your running ``postgresql`` instance, it needs to be loaded at the start time. This requires adding the ``pg_stat_monitor`` extension to the ``shared_preload_libraries`` parameter and restarting the ``postgresql`` instance. -You can set the ``pg_stat_monitor`` extension in the ``postgresql.conf`` file. +You can set the ``pg_stat_monitor`` extension in the ``postgresql.conf`` file. ``` # - Shared Library Preloading - @@ -134,11 +134,11 @@ SELECT decode_error_level(elevel) AS elevel, sqlcode, query, message FROM pg_st ``` -To learn more about ``pg_stat_monitor`` configuration and usage, see [User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md). +To learn more about ``pg_stat_monitor`` configuration and usage, see the [User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md). -## Submitting Bug Reports +## Submitting Feature Requests or Bug Reports -If you found a bug in ``pg_stat_monitor``, please submit the report to the [Jira issue tracker](https://jira.percona.com/projects/PG/issues). +If you would like to request a feature of if you found a bug in ``pg_stat_monitor``, please submit the report to the [Jira issue tracker](https://jira.percona.com/projects/PG/issues). Start by searching the open tickets for a similar report. If you find that someone else has already reported your issue, then you can upvote that report to increase its visibility. diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md index e4b5bac..c56593f 100644 --- a/docs/USER_GUIDE.md +++ b/docs/USER_GUIDE.md @@ -34,7 +34,7 @@ SELECT * FROM pg_stat_monitor_settings; ``` -Some configuration parameters require the server restart and should be set before the server startup. These must be set in the ``postgresql.conf`` file. Other parameters do not require server restart and can be set permanently either in the ``postgresql.conf`` or from the client (``psql``). +Some configuration parameters require a server restart and should be set before the server startup. These must be set in the ``postgresql.conf`` file. Other parameters do not require a server restart and can be set permanently either in the ``postgresql.conf`` or from the client (``psql``). The table below shows set up options for each configuration parameter and whether the server restart is required to apply the parameter's value: @@ -68,7 +68,7 @@ The table below shows set up options for each configuration parameter and whethe ### Usage -pg_stat_monitor extension contains a view called pg_stat_monitor, which contains all the monitoring information. Find the list of columns in pg_stat_monitor view in the following table. The table also shows whether a particular column is available in pg_stat_statements. +The ``pg_stat_monitor`` extension contains a view called ``pg_stat_monitor``, which containss all the monitoring information. Find the list of columns in ``pg_stat_monitor`` view in the following table. The table also shows whether a particular column is available in ``pg_stat_statements``. |      Column        |           Type           | pg_stat_monitor      | pg_stat_statements @@ -153,18 +153,18 @@ bucket | bucket_start_time | #### Query Information -**`userid`**: An ID of the user to whom that query belongs. pg_stat_monitor collects queries from all the users and uses the `userid` to segregate the queries based on different users. +**`userid`**: An ID of the user to whom that query belongs. ``pg_stat_monitor`` collects queries from all the users and uses the `userid` to segregate the queries based on different users. -**`dbid`**: The database ID of the query. pg_stat_monitor accumulates queries from all the databases; therefore, this column is used to identify the database. +**`dbid`**: The database ID of the query. ``pg_stat_monitor`` accumulates queries from all the databases; therefore, this column is used to identify the database. -**`queryid`**:  pg_stat_monitor generates a unique ID for each query (queryid). +**`queryid`**: ``pg_stat_monitor`` generates a unique ID for each query (queryid). **`query`**: The query column contains the actual text of the query. This parameter depends on the **`pg_stat_monitor.pgsm_normalized_query`** configuration parameters, in which format to show the query. **`calls`**: Number of calls of that particular query. -##### Example 1: Shows the usename, database name, unique queryid hash, query, and the total number of calls of that query. +##### Example: Shows the usename, database name, unique queryid hash, query, and the total number of calls of that query. ```sql postgres=# SELECT userid, datname, queryid, substr(query,0, 50) AS query, calls FROM pg_stat_monitor; userid | datname | queryid | query | calls @@ -186,7 +186,7 @@ postgres=# SELECT userid, datname, queryid, substr(query,0, 50) AS query, calls ``` -##### Example 4: Shows the connected application_name. +##### Example: Shows the connected application_name. ```sql SELECT application_name, query FROM pg_stat_monitor; @@ -295,7 +295,7 @@ postgres=# SELECT * FROM histogram(0, 'F44CD1B4B33A47AF') AS a(range TEXT, freq (10 rows) ``` -There are 10 timebase buckets of the time **`pg_stat_monitor.pgsm_respose_time_step`** in the field ``resp_calls``. The value in the field shows how many queries run in that period of time. +There are 10 timebase buckets of the time **`pg_stat_monitor.pgsm_respose_time_step`** in the field ``resp_calls``. The field's value shows how many queries run in that period of time. #### Object Information. @@ -337,7 +337,7 @@ View definition:     bar b; ``` -Now when we query the pg_stat_monitor, it will show the view name and also all the table names in the view. +Now when we query ``pg_stat_monitor``, it will show the view name and also all the table names in the view. ```sql SELECT relations, query FROM pg_stat_monitor;       relations      |                                                query                                                 From 88492e5b9bf927903ca42ce7083fbda65b75fb74 Mon Sep 17 00:00:00 2001 From: Anastasia Alexadrova Date: Wed, 8 Sep 2021 18:15:34 +0300 Subject: [PATCH 19/45] PG-208 Doc Added Contibuting guide new file: CONTRIBUTING.md --- CONTRIBUTING.md | 119 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..78ec34d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,119 @@ +# Contributing guide + +Welcome to `pg_stat_monitor` - the Query Performance Monitoring tool for PostgreSQL! + +We're glad that you would like to become a Percona community member and participate in keeping open source open. + +You can contribute in one of the following ways: + +1. Reach us on our [Forums](https://forums.percona.com/) and Discord. +2. [Submit a bug report or a feature request](#submit-a-bug-report-or-a-feature-request) +3. [Submit a pull request (PR) with the code patch](#submit-a-pull-request) +4. [Contribute to documentation](#contributing-to-documentation) + +By contributing, you agree to the [Percona Community code of conduct](https://github.com/percona/community/blob/main/content/contribute/coc.md). + + +## Submit a bug report or a feature request + +All bug reports, enhancements and feature requests are tracked in [Jira issue tracker](https://jira.percona.com/projects/PG). If you would like to suggest a new feature / an improvement or you found a bug in `pg_stat_monitor`, please submit the report to the [PG project](https://jira.percona.com/projects/PG/issues). + +Start by searching the open tickets for a similar report. If you find that someone else has already reported your issue, then you can upvote that report to increase its visibility. + +If there is no existing report, submit your report following these steps: + +1. Sign in to [Jira issue tracker](https://jira.percona.com/projects/PG/issues). You will need to create an account if you do not have one. +2. In the _Summary_, _Description_, _Steps To Reproduce_, _Affects Version_ fields describe the problem you have detected or an idea that you have for a new feature or improvement. +3. As a general rule of thumb, try to create bug reports that are: + + * Reproducible: describe the steps to reproduce the problem. + * Unique: check if there already exists a JIRA ticket to describe the problem. + * Scoped to a Single Bug: only report one bug in one JIRA ticket + +## Submit a pull request + +Though not mandatory, we encourage you to first check for a bug report among Jira issues and in the PR list: perhaps the bug has already been addressed. + +For feature requests and enhancements, we do ask you to create a Jira issue, describe your idea and discuss the design with us. This way we align your ideas with our vision for the product development. + +If the bug hasn’t been reported / addressed, or we’ve agreed on the enhancement implementation with you, do the following: + +1. [Fork](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo) this repository +2. Clone this repository on your machine. +3. Create a separate branch for your changes. If you work on a Jira issue, please include the issue number in the branch name so it reads as `-my_branch`. This makes it easier to track your contribution. +4. Make your changes. Please follow the guidelines outlined in the [PostgreSQL Coding Standard](https://www.postgresql.org/docs/current/source.html) to improve code readability. +5. Test your changes locally. See the [Running tests ](#running-tests) section for more information +6. Update the documentation describing your changes. See the [Contributing to documentation](#contributing-to-documentation) section for details +8. Commit the changes. Add the Jira issue number at the beginning of your message subject, so that is reads as ` : My commit message`. Follow this pattern for your commits: + + ``` + PG-1234: Main commit message. + + Details of fix. + ``` + + The [commit message guidelines](https://gist.github.com/robertpainsi/b632364184e70900af4ab688decf6f53) will help you with writing great commit messages + +9. Open a pull request to Percona +10. Our team will review your code and if everything is correct, will merge it. Otherwise, we will contact you for additional information or with the request to make changes. +11. Make sure your pull request contains only one commit message + +### Building pg_stat_monitor + +To build `pg_stat_monitor` from source code, you require the following: + +* git +* make + +Refer to the [Building from source code](https://github.com/percona/pg_stat_monitor#installing-from-source-code) section for guidelines. + + +### Running tests + +When you work, you should periodically run tests to check that your changes don’t break existing code. + +You can find the tests in the `regression` directory. + +#### Run manually + +1. Change directory to pg_stat_monitor + +**NOTE**: Make sure `postgres` user is the owner of the `pg_stat_monitor` directory + +2. Start the tests + 1. If you built PostgreSQL from PGDG, use the following command: + + ```sh + make installcheck + ``` + + + 2. If you installed PostgreSQL server from Percona Distribution for PostgreSQL, use the following command: + + ```sh + sudo su postgres bash -c 'make installcheck USE_PGXS=1' + ``` +#### Run automatically + +The tests are run automatically with GitHub actions once you commit and push your changes. Make sure all tests are successfully passed before you proceed. + + +## Contributing to documentation + +`pg_stat_monitor` documentation is written in Markdown language, so you can +[edit it online via GitHub](#edit-documentation-online-vi-github). Alternatively, you can include doc changes in your patch. The doc files are in the `docs` directory. + +### Edit documentation online via GitHub + +1. Click the **Edit this page** link on the sidebar. The source `.md` file of the page opens in GitHub editor in your browser. If you haven’t worked with the repository before, GitHub creates a [fork](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo) of it for you. +2. Edit the page. You can check your changes on the **Preview** tab. +3. Commit your changes. + * In the _Commit changes_ section, describe your changes. + * Select the **Create a new branch for this commit** and start a pull request option + * Click **Propose changes**. +4. GitHub creates a branch and a commit for your changes. It loads a new page on which you can open a pull request to Percona. The page shows the base branch - the one you offer your changes for, your commit message and a diff - a visual representation of your changes against the original page. This allows you to make a last-minute review. When you are ready, click the Create pull request button. +5. Someone from our team reviews the pull request and if everything is correct, merges it into the documentation. Then it gets published on the site. + +## After your pull request is merged + +Once your pull request is merged, you are an official Percona Community Contributor. Welcome to the community! From 4a2a042955f8aa5ad9a8d40d25a32f14f43019c0 Mon Sep 17 00:00:00 2001 From: Mikhail Samoylov Date: Sat, 11 Sep 2021 10:12:01 +0300 Subject: [PATCH 20/45] Test pg_stat_monitor with pg from packages --- .github/workflows/pg11packagetest.yml | 60 +++++++++++++++++++++++++++ .github/workflows/pg12packagetest.yml | 60 +++++++++++++++++++++++++++ .github/workflows/pg13packagetest.yml | 60 +++++++++++++++++++++++++++ README.md | 3 ++ 4 files changed, 183 insertions(+) create mode 100644 .github/workflows/pg11packagetest.yml create mode 100644 .github/workflows/pg12packagetest.yml create mode 100644 .github/workflows/pg13packagetest.yml diff --git a/.github/workflows/pg11packagetest.yml b/.github/workflows/pg11packagetest.yml new file mode 100644 index 0000000..71913bc --- /dev/null +++ b/.github/workflows/pg11packagetest.yml @@ -0,0 +1,60 @@ +name: pg11package-test +on: [push] + +jobs: + build: + name: pg11package-test + runs-on: ubuntu-20.04 + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl wget -y + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install postgresql 11 + run: | + sudo wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg main" >> /etc/apt/sources.list.d/pgdg.list' + sudo apt update + sudo apt -y install postgresql-11 postgresql-server-dev-11 + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Change sources owner to postgres + run: sudo chown -R postgres:postgres src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/11/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg12packagetest.yml b/.github/workflows/pg12packagetest.yml new file mode 100644 index 0000000..d8207a3 --- /dev/null +++ b/.github/workflows/pg12packagetest.yml @@ -0,0 +1,60 @@ +name: pg12package-test +on: [push] + +jobs: + build: + name: pg12package-test + runs-on: ubuntu-20.04 + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl wget -y + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install postgresql 12 + run: | + sudo wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg main" >> /etc/apt/sources.list.d/pgdg.list' + sudo apt update + sudo apt -y install postgresql-12 postgresql-server-dev-12 + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Change sources owner to postgres + run: sudo chown -R postgres:postgres src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/12/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg13packagetest.yml b/.github/workflows/pg13packagetest.yml new file mode 100644 index 0000000..7bc1176 --- /dev/null +++ b/.github/workflows/pg13packagetest.yml @@ -0,0 +1,60 @@ +name: pg13package-test +on: [push] + +jobs: + build: + name: pg13package-test + runs-on: ubuntu-20.04 + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl wget -y + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install postgresql 13 + run: | + sudo wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg main" >> /etc/apt/sources.list.d/pgdg.list' + sudo apt update + sudo apt -y install postgresql-13 postgresql-server-dev-13 + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Change sources owner to postgres + run: sudo chown -R postgres:postgres src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/13/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/README.md b/README.md index 263ea19..c807622 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,9 @@ ![pg12-test](https://github.com/percona/pg_stat_monitor/workflows/pg12-test/badge.svg) ![pg13-test](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) ![pg14-test](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) +![pg11package-test](https://github.com/percona/pg_stat_monitor/workflows/pg11package-test/badge.svg) +![pg12package-test](https://github.com/percona/pg_stat_monitor/workflows/pg12package-test/badge.svg) +![pg13package-test](https://github.com/percona/pg_stat_monitor/workflows/pg13package-test/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/percona/pg_stat_monitor/badge.svg)](https://coveralls.io/github/percona/pg_stat_monitor) From aecfb7a5cd03a8b63a3215ea35946fb2cf10ec2d Mon Sep 17 00:00:00 2001 From: Andrew Pogrebnoy Date: Tue, 21 Sep 2021 10:37:10 +0300 Subject: [PATCH 21/45] PG-232: remove unused function and fix a couple of typos --- hash_query.c | 10 ++-------- pg_stat_monitor.h | 2 -- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/hash_query.c b/hash_query.c index b74cada..4d6c812 100644 --- a/hash_query.c +++ b/hash_query.c @@ -97,12 +97,6 @@ pgsm_get_hash(void) return pgss_hash; } -HTAB* -pgsm_get_query_hash(void) -{ - return pgss_query_hash; -} - /* * shmem_shutdown hook: Dump statistics into file. * @@ -270,7 +264,7 @@ hash_entry_reset() LWLockRelease(pgss->lock); } -/* Caller must accuire lock */ +/* Caller must acquire a lock */ pgssQueryEntry* hash_create_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid) { @@ -289,7 +283,7 @@ hash_create_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 us return entry; } -/* Caller must accuire lock */ +/* Caller must acquire a lock */ pgssQueryEntry* hash_find_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid) { diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 62c33b9..15ed28d 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -374,10 +374,8 @@ void pgss_shmem_shutdown(int code, Datum arg); int pgsm_get_bucket_size(void); pgssSharedState* pgsm_get_ss(void); HTAB *pgsm_get_plan_hash(void); -HTAB *pgsm_get_query_hash(void); HTAB *pgsm_get_hash(void); HTAB *pgsm_get_plan_hash(void); -HTAB* pgsm_get_query_hash(void); void hash_entry_reset(void); void hash_query_entryies_reset(void); void hash_query_entries(); From b4d4dae29fd387d501ff5d6c90ebb012f3cddae0 Mon Sep 17 00:00:00 2001 From: Andrew Pogrebnoy Date: Wed, 22 Sep 2021 17:10:52 +0300 Subject: [PATCH 22/45] PG-232: revome unsed declrations --- pg_stat_monitor.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 90a07ff..418ba78 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -94,8 +94,6 @@ static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook = NULL; PG_FUNCTION_INFO_V1(pg_stat_monitor_version); PG_FUNCTION_INFO_V1(pg_stat_monitor_reset); -PG_FUNCTION_INFO_V1(pg_stat_monitor_1_2); -PG_FUNCTION_INFO_V1(pg_stat_monitor_1_3); PG_FUNCTION_INFO_V1(pg_stat_monitor); PG_FUNCTION_INFO_V1(pg_stat_monitor_settings); PG_FUNCTION_INFO_V1(get_histogram_timings); From f7588532be85a2f6e0c14bae746adeeca46316d4 Mon Sep 17 00:00:00 2001 From: Naeem Akhter Date: Thu, 23 Sep 2021 03:10:06 +0500 Subject: [PATCH 23/45] PG-226: Enable installcheck-world regression. 1) Enabled configure and build with proper flags and environment to make sure that built server is aligned with pg and ppg package distribution in terms of features and configurations. Earlier build (configure) was not aligned with pg community standard configuration that are used for community builds and distribution. 2) Enabled installcheck-world regression test suites of the pg server, to verify the stability and compatibility of pg server after loading pg_stat_monitor in server. 3) Change in expected files of error.out and error_1.out for error testacase. 4) Disbaled and removed coverage using coveralls.io, as it was not serving the purpose. (Note: installcheck-world was failing on pg-14 due to some changes that are done in pg14 or upstream PGSS, and that is causing additional line (Query Identifier) is output of some of the test cases of installcheck-world so it is not enabled in this commit. Problem with pg14 server installcheck regression is output of an extra line (Query identifier ****) in some of the test cases after loading extension PGSM and that causes regression to fail for server after library load.) --- .github/workflows/coverage_test.yml | 68 ------------------------ .github/workflows/pg11test.yml | 81 +++++++++++++++++++++-------- .github/workflows/pg12test.yml | 75 ++++++++++++++++++-------- .github/workflows/pg13test.yml | 80 ++++++++++++++++++++-------- .github/workflows/pg14test.yml | 66 +++++++++++++++-------- README.md | 2 - regression/expected/error.out | 6 +-- regression/expected/error_1.out | 6 +-- 8 files changed, 220 insertions(+), 164 deletions(-) delete mode 100644 .github/workflows/coverage_test.yml diff --git a/.github/workflows/coverage_test.yml b/.github/workflows/coverage_test.yml deleted file mode 100644 index 55304c4..0000000 --- a/.github/workflows/coverage_test.yml +++ /dev/null @@ -1,68 +0,0 @@ -name: coverage-test -on: ["push", "pull_request"] - -jobs: - build: - name: coverage-test - runs-on: ubuntu-latest - steps: - - name: Clone postgres repository - uses: actions/checkout@v2 - with: - repository: 'postgres/postgres' - ref: 'REL_13_STABLE' - - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - ref: 'master' - - - name: Install dependencies - run: | - sudo apt-get update - sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl git lcov -y - sudo rm -rf /var/lib/postgresql/ - sudo rm -rf /var/log/postgresql/ - sudo rm -rf /etc/postgresql/ - sudo rm -rf /usr/lib/postgresql - sudo rm -rf /usr/include/postgresql - sudo rm -rf /usr/share/postgresql - sudo rm -rf /etc/postgresql - sudo rm -f /usr/bin/pg_config - - name: Create pgsql dir - run: mkdir -p /opt/pgsql - - - name: Build postgres - run: | - export PATH="/opt/pgsql/bin:$PATH" - ./configure --enable-coverage --enable-tap-tests --prefix=/opt/pgsql - make - make install - - name: Start postgresql cluster - run: | - export PATH="/opt/pgsql/bin:$PATH" - /opt/pgsql/bin/initdb -D /opt/pgsql/data - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start - - name: Build pg_stat_monitor - run: | - export PATH="/opt/pgsql/bin:$PATH" - sudo cp /opt/pgsql/bin/pg_config /usr/bin - make USE_PGXS=1 - make USE_PGXS=1 install - working-directory: src/pg_stat_monitor/ - - - name: Start pg_stat_monitor_tests & Run code coverage - run: | - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile stop - echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start - make installcheck - make coverage-html - lcov --capture --directory . --output-file coverage/lcov.info - pip install cpp-coveralls - export COVERALLS_REPO_TOKEN="${{ secrets.COVERALL_PG_STAT_MONITOR_TOKEN }}" - coveralls --verbose - working-directory: src/pg_stat_monitor/ - diff --git a/.github/workflows/pg11test.yml b/.github/workflows/pg11test.yml index 57ea811..a1f7e53 100644 --- a/.github/workflows/pg11test.yml +++ b/.github/workflows/pg11test.yml @@ -1,10 +1,10 @@ -name: pg11-test +name: Test-with-pg11-build on: [push] jobs: build: name: pg11-test - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 steps: - name: Clone postgres repository uses: actions/checkout@v2 @@ -12,16 +12,13 @@ jobs: repository: 'postgres/postgres' ref: 'REL_11_STABLE' - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - name: Install dependencies run: | sudo apt-get update sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y docbook-xsl docbook-xsl + sudo apt-get install -y libxml2 libxml2-utils libxml2-dev libxslt-dev xsltproc libkrb5-dev libldap2-dev libsystemd-dev gettext tcl-dev libperl-dev + sudo apt-get install -y pkg-config clang-9 llvm-9 llvm-9-dev libselinux1-dev python-dev python3-dev uuid-dev liblz4-dev sudo rm -rf /var/lib/postgresql/ sudo rm -rf /var/log/postgresql/ sudo rm -rf /etc/postgresql/ @@ -37,34 +34,76 @@ jobs: - name: Build postgres run: | export PATH="/opt/pgsql/bin:$PATH" - ./configure --enable-tap-tests --prefix=/opt/pgsql - make - make install + ./configure '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=/usr/include' \ + '--mandir=/usr/share/man' '--infodir=/usr/share/info' '--sysconfdir=/etc' \ + '--localstatedir=/var' '--disable-silent-rules' '--libdir=/usr/lib/x86_64-linux-gnu' \ + 'runstatedir=/run' '--disable-maintainer-mode' '--disable-dependency-tracking' \ + '--with-icu' '--with-tcl' '--with-perl' '--with-python' '--with-pam' '--with-openssl' \ + '--with-libxml' '--with-libxslt' 'PYTHON=/usr/bin/python3' \ + '--mandir=/usr/share/postgresql/11/man' '--docdir=/usr/share/doc/postgresql-doc-11' \ + '--sysconfdir=/etc/postgresql-common' '--datarootdir=/usr/share/' \ + '--datadir=/usr/share/postgresql/11' '--bindir=/usr/lib/postgresql/11/bin' \ + '--libdir=/usr/lib/x86_64-linux-gnu/' '--libexecdir=/usr/lib/postgresql/' \ + '--includedir=/usr/include/postgresql/' '--with-extra-version= (Ubuntu 11.x.pgdg20.04+1)' \ + '--enable-nls' '--enable-thread-safety' '--enable-tap-tests' '--enable-debug' \ + '--enable-dtrace' '--disable-rpath' '--with-uuid=e2fs' '--with-gnu-ld' \ + '--with-pgport=5432' '--with-system-tzdata=/usr/share/zoneinfo' '--with-llvm' \ + 'LLVM_CONFIG=/usr/bin/llvm-config-9' 'CLANG=/usr/bin/clang-9' '--with-systemd' \ + '--with-selinux' 'MKDIR_P=/bin/mkdir -p' 'PROVE=/usr/bin/prove' 'TAR=/bin/tar' \ + 'CFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security -fno-omit-frame-pointer' \ + 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now' '--with-gssapi' '--with-ldap' \ + '--with-includes=/usr/include/mit-krb5' '--with-libs=/usr/lib/mit-krb5' \ + '--with-libs=/usr/lib/x86_64-linux-gnu/mit-krb5' 'build_alias=x86_64-linux-gnu' \ + 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' 'CXXFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security' + make world + sudo make install-world - name: Start postgresql cluster run: | - export PATH="/opt/pgsql/bin:$PATH" - /opt/pgsql/bin/initdb -D /opt/pgsql/data - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start + /usr/lib/postgresql/11/bin/initdb -D /opt/pgsql/data + /usr/lib/postgresql/11/bin/pg_ctl -D /opt/pgsql/data -l logfile start + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' - name: Build pg_stat_monitor run: | - export PATH="/opt/pgsql/bin:$PATH" - sudo cp /opt/pgsql/bin/pg_config /usr/bin + export PATH="/usr/lib/postgresql/11/bin:$PATH" + sudo cp /usr/lib/postgresql/11/bin/pg_config /usr/bin make USE_PGXS=1 - make USE_PGXS=1 install + sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ + - name: Load pg_stat_monitor library and Restart Server + run: | + /usr/lib/postgresql/11/bin/pg_ctl -D /opt/pgsql/data -l logfile stop + echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf + /usr/lib/postgresql/11/bin/pg_ctl -D /opt/pgsql/data -l logfile start + working-directory: src/pg_stat_monitor/ + + - name: Start Server installcheck-world tests (without TAP) + run: | + make installcheck-world + + - name: Report on installcheck-world test suites fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions output files of failed testsuite, and postgresql log + path: | + **/regression.diffs + **/regression.out + src/pg_stat_monitor/logfile + retention-days: 1 - name: Start pg_stat_monitor_tests run: | - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile stop - echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start make installcheck working-directory: src/pg_stat_monitor/ - - name: Report on test fail + - name: Report on pg_stat_monitor test fail uses: actions/upload-artifact@v2 if: ${{ failure() }} with: diff --git a/.github/workflows/pg12test.yml b/.github/workflows/pg12test.yml index 1eee4c2..919614b 100644 --- a/.github/workflows/pg12test.yml +++ b/.github/workflows/pg12test.yml @@ -1,4 +1,4 @@ -name: pg12-test +name: Test-with-pg12-build on: [push] jobs: @@ -12,16 +12,13 @@ jobs: repository: 'postgres/postgres' ref: 'REL_12_STABLE' - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - name: Install dependencies run: | sudo apt-get update sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y docbook-xsl docbook-xsl + sudo apt-get install -y libxml2 libxml2-utils libxml2-dev libxslt-dev xsltproc libkrb5-dev libldap2-dev libsystemd-dev gettext tcl-dev libperl-dev + sudo apt-get install -y pkg-config clang-9 llvm-9 llvm-9-dev libselinux1-dev python-dev python3-dev uuid-dev liblz4-dev sudo rm -rf /var/lib/postgresql/ sudo rm -rf /var/log/postgresql/ sudo rm -rf /etc/postgresql/ @@ -30,41 +27,77 @@ jobs: sudo rm -rf /usr/share/postgresql sudo rm -rf /etc/postgresql sudo rm -f /usr/bin/pg_config - - name: Create pgsql dir run: mkdir -p /opt/pgsql - name: Build postgres run: | export PATH="/opt/pgsql/bin:$PATH" - ./configure --enable-tap-tests --prefix=/opt/pgsql - make - make install + ./configure '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=/usr/include' '--mandir=/usr/share/man' \ + '--infodir=/usr/share/info' '--sysconfdir=/etc' '--localstatedir=/var' '--disable-silent-rules' \ + '--libdir=/usr/lib/x86_64-linux-gnu' 'runstatedir=/run' '--disable-maintainer-mode' \ + '--disable-dependency-tracking' '--with-icu' '--with-tcl' '--with-perl' '--with-python' \ + '--with-pam' '--with-openssl' '--with-libxml' '--with-libxslt' 'PYTHON=/usr/bin/python3' \ + '--mandir=/usr/share/postgresql/12/man' '--docdir=/usr/share/doc/postgresql-doc-12' \ + '--sysconfdir=/etc/postgresql-common' '--datarootdir=/usr/share/' '--datadir=/usr/share/postgresql/12' \ + '--bindir=/usr/lib/postgresql/12/bin' '--libdir=/usr/lib/x86_64-linux-gnu/' '--libexecdir=/usr/lib/postgresql/' \ + '--includedir=/usr/include/postgresql/' '--with-extra-version= (Ubuntu 12.x.pgdg20.04+1)' '--enable-nls' \ + '--enable-thread-safety' '--enable-tap-tests' '--enable-debug' '--enable-dtrace' '--disable-rpath' \ + '--with-uuid=e2fs' '--with-gnu-ld' '--with-pgport=5432' '--with-system-tzdata=/usr/share/zoneinfo' '--with-llvm' \ + 'LLVM_CONFIG=/usr/bin/llvm-config-9' 'CLANG=/usr/bin/clang-9' '--with-systemd' '--with-selinux' 'MKDIR_P=/bin/mkdir -p' \ + 'PROVE=/usr/bin/prove' 'TAR=/bin/tar' 'CFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security -fno-omit-frame-pointer' \ + 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now' '--with-gssapi' '--with-ldap' \ + '--with-includes=/usr/include/mit-krb5' '--with-libs=/usr/lib/mit-krb5' \ + '--with-libs=/usr/lib/x86_64-linux-gnu/mit-krb5' 'build_alias=x86_64-linux-gnu' \ + 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' 'CXXFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security' + make world + sudo make install-world - name: Start postgresql cluster run: | - export PATH="/opt/pgsql/bin:$PATH" - /opt/pgsql/bin/initdb -D /opt/pgsql/data - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start + /usr/lib/postgresql/12/bin/initdb -D /opt/pgsql/data + /usr/lib/postgresql/12/bin/pg_ctl -D /opt/pgsql/data -l logfile start + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' - name: Build pg_stat_monitor run: | - export PATH="/opt/pgsql/bin:$PATH" - sudo cp /opt/pgsql/bin/pg_config /usr/bin + export PATH="/usr/lib/postgresql/12/bin:$PATH" + sudo cp /usr/lib/postgresql/12/bin/pg_config /usr/bin make USE_PGXS=1 - make USE_PGXS=1 install + sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ + - name: Load pg_stat_monitor library and Restart Server + run: | + /usr/lib/postgresql/12/bin/pg_ctl -D /opt/pgsql/data -l logfile stop + echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf + /usr/lib/postgresql/12/bin/pg_ctl -D /opt/pgsql/data -l logfile start + working-directory: src/pg_stat_monitor/ + + - name: Start Server installcheck-world tests (without TAP) + run: | + make installcheck-world + - name: Report on installcheck-world test suites fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions output files of failed testsuite, and postgresql log + path: | + **/regression.diffs + **/regression.out + src/pg_stat_monitor/logfile + retention-days: 1 - name: Start pg_stat_monitor_tests run: | - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile stop - echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start make installcheck working-directory: src/pg_stat_monitor/ - - name: Report on test fail + - name: Report on pg_stat_monitor test fail uses: actions/upload-artifact@v2 if: ${{ failure() }} with: diff --git a/.github/workflows/pg13test.yml b/.github/workflows/pg13test.yml index dd746fa..6a85cd5 100644 --- a/.github/workflows/pg13test.yml +++ b/.github/workflows/pg13test.yml @@ -1,4 +1,4 @@ -name: pg13-test +name: Test-with-pg13-build on: [push] jobs: @@ -12,16 +12,13 @@ jobs: repository: 'postgres/postgres' ref: 'REL_13_STABLE' - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - name: Install dependencies run: | sudo apt-get update sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y docbook-xsl docbook-xsl + sudo apt-get install -y libxml2 libxml2-utils libxml2-dev libxslt-dev xsltproc libkrb5-dev libldap2-dev libsystemd-dev gettext tcl-dev libperl-dev + sudo apt-get install -y pkg-config clang-9 llvm-9 llvm-9-dev libselinux1-dev python-dev python3-dev uuid-dev liblz4-dev sudo rm -rf /var/lib/postgresql/ sudo rm -rf /var/log/postgresql/ sudo rm -rf /etc/postgresql/ @@ -30,41 +27,82 @@ jobs: sudo rm -rf /usr/share/postgresql sudo rm -rf /etc/postgresql sudo rm -f /usr/bin/pg_config - - name: Create pgsql dir run: mkdir -p /opt/pgsql - name: Build postgres run: | export PATH="/opt/pgsql/bin:$PATH" - ./configure --enable-tap-tests --prefix=/opt/pgsql - make - make install + ./configure '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=${prefix}/include' \ + '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' \ + '--sysconfdir=/etc' '--localstatedir=/var' '--disable-silent-rules' \ + '--libdir=${prefix}/lib/x86_64-linux-gnu' \ + '--libexecdir=${prefix}/lib/x86_64-linux-gnu' '--disable-maintainer-mode' \ + '--disable-dependency-tracking' '--with-icu' '--with-tcl' '--with-perl' \ + '--with-python' '--with-pam' '--with-openssl' '--with-libxml' '--with-libxslt' \ + 'PYTHON=/usr/bin/python3' '--mandir=/usr/share/postgresql/13/man' \ + '--docdir=/usr/share/doc/postgresql-doc-13' \ + '--sysconfdir=/etc/postgresql-common' '--datarootdir=/usr/share/' \ + '--datadir=/usr/share/postgresql/13' '--bindir=/usr/lib/postgresql/13/bin' \ + '--libdir=/usr/lib/x86_64-linux-gnu/' '--libexecdir=/usr/lib/postgresql/' \ + '--includedir=/usr/include/postgresql/' '--with-extra-version= (Ubuntu 2:13-x.focal)' \ + '--enable-nls' '--enable-thread-safety' '--enable-tap-tests' '--enable-debug' \ + '--enable-dtrace' '--disable-rpath' '--with-uuid=e2fs' '--with-gnu-ld' \ + '--with-pgport=5432' '--with-system-tzdata=/usr/share/zoneinfo' '--with-llvm' \ + 'LLVM_CONFIG=/usr/bin/llvm-config-11' 'CLANG=/usr/bin/clang-11' \ + '--with-systemd' '--with-selinux' 'MKDIR_P=/bin/mkdir -p' 'PROVE=/usr/bin/prove' \ + 'TAR=/bin/tar' 'XSLTPROC=xsltproc --nonet' 'CFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security -fno-omit-frame-pointer' \ + 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now' '--with-gssapi' '--with-ldap' \ + 'build_alias=x86_64-linux-gnu' 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' \ + 'CXXFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security' + make world + sudo make install-world - name: Start postgresql cluster run: | - export PATH="/opt/pgsql/bin:$PATH" - /opt/pgsql/bin/initdb -D /opt/pgsql/data - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start + /usr/lib/postgresql/13/bin/initdb -D /opt/pgsql/data + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile start + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' - name: Build pg_stat_monitor run: | - export PATH="/opt/pgsql/bin:$PATH" - sudo cp /opt/pgsql/bin/pg_config /usr/bin + export PATH="/usr/lib/postgresql/13/bin:$PATH" + sudo cp /usr/lib/postgresql/13/bin/pg_config /usr/bin make USE_PGXS=1 - make USE_PGXS=1 install + sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ + - name: Load pg_stat_monitor library and Restart Server + run: | + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile stop + echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile start + working-directory: src/pg_stat_monitor/ + + - name: Start Server installcheck-world tests (without TAP) + run: | + make installcheck-world + - name: Report on installcheck-world test suites fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions output files of failed testsuite, and postgresql log + path: | + **/regression.diffs + **/regression.out + src/pg_stat_monitor/logfile + retention-days: 1 - name: Start pg_stat_monitor_tests run: | - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile stop - echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start make installcheck working-directory: src/pg_stat_monitor/ - - name: Report on test fail + - name: Report on pg_stat_monitor test fail uses: actions/upload-artifact@v2 if: ${{ failure() }} with: diff --git a/.github/workflows/pg14test.yml b/.github/workflows/pg14test.yml index acfcdbf..0824bd4 100644 --- a/.github/workflows/pg14test.yml +++ b/.github/workflows/pg14test.yml @@ -1,4 +1,4 @@ -name: pg14-test +name: Test-with-pg14-build on: [push] jobs: @@ -12,16 +12,13 @@ jobs: repository: 'postgres/postgres' ref: 'REL_14_STABLE' - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - name: Install dependencies run: | sudo apt-get update sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y docbook-xsl docbook-xsl + sudo apt-get install -y libxml2 libxml2-utils libxml2-dev libxslt-dev xsltproc libkrb5-dev libldap2-dev libsystemd-dev gettext tcl-dev libperl-dev + sudo apt-get install -y pkg-config clang-9 llvm-9 llvm-9-dev libselinux1-dev python-dev python3-dev uuid-dev liblz4-dev sudo rm -rf /var/lib/postgresql/ sudo rm -rf /var/log/postgresql/ sudo rm -rf /etc/postgresql/ @@ -30,41 +27,68 @@ jobs: sudo rm -rf /usr/share/postgresql sudo rm -rf /etc/postgresql sudo rm -f /usr/bin/pg_config - - name: Create pgsql dir run: mkdir -p /opt/pgsql - name: Build postgres run: | export PATH="/opt/pgsql/bin:$PATH" - ./configure --enable-tap-tests --prefix=/opt/pgsql - make - make install + ./configure '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=${prefix}/include' \ + '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' \ + '--sysconfdir=/etc' '--localstatedir=/var' '--disable-silent-rules' \ + '--libdir=${prefix}/lib/x86_64-linux-gnu' \ + '--libexecdir=${prefix}/lib/x86_64-linux-gnu' '--disable-maintainer-mode' \ + '--disable-dependency-tracking' '--with-icu' '--with-tcl' '--with-perl' \ + '--with-python' '--with-pam' '--with-openssl' '--with-libxml' '--with-libxslt' \ + 'PYTHON=/usr/bin/python3' '--mandir=/usr/share/postgresql/14/man' \ + '--docdir=/usr/share/doc/postgresql-doc-14' \ + '--sysconfdir=/etc/postgresql-common' '--datarootdir=/usr/share/' \ + '--datadir=/usr/share/postgresql/14' '--bindir=/usr/lib/postgresql/14/bin' \ + '--libdir=/usr/lib/x86_64-linux-gnu/' '--libexecdir=/usr/lib/postgresql/' \ + '--includedir=/usr/include/postgresql/' '--with-extra-version= (Ubuntu 2:14-x.focal)' \ + '--enable-nls' '--enable-thread-safety' '--enable-tap-tests' '--enable-debug' \ + '--enable-dtrace' '--disable-rpath' '--with-uuid=e2fs' '--with-gnu-ld' \ + '--with-pgport=5432' '--with-system-tzdata=/usr/share/zoneinfo' '--with-llvm' \ + 'LLVM_CONFIG=/usr/bin/llvm-config-11' 'CLANG=/usr/bin/clang-11' \ + '--with-systemd' '--with-selinux' 'MKDIR_P=/bin/mkdir -p' 'PROVE=/usr/bin/prove' \ + 'TAR=/bin/tar' 'XSLTPROC=xsltproc --nonet' 'CFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security -fno-omit-frame-pointer' \ + 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now' '--with-gssapi' '--with-ldap' \ + 'build_alias=x86_64-linux-gnu' 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' \ + 'CXXFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security' + make world + sudo make install-world - name: Start postgresql cluster run: | - export PATH="/opt/pgsql/bin:$PATH" - /opt/pgsql/bin/initdb -D /opt/pgsql/data - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start + /usr/lib/postgresql/14/bin/initdb -D /opt/pgsql/data + /usr/lib/postgresql/14/bin/pg_ctl -D /opt/pgsql/data -l logfile start + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' - name: Build pg_stat_monitor run: | - export PATH="/opt/pgsql/bin:$PATH" - sudo cp /opt/pgsql/bin/pg_config /usr/bin + export PATH="/usr/lib/postgresql/14/bin:$PATH" + sudo cp /usr/lib/postgresql/14/bin/pg_config /usr/bin make USE_PGXS=1 - make USE_PGXS=1 install + sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ + - name: Load pg_stat_monitor library and Restart Server + run: | + /usr/lib/postgresql/14/bin/pg_ctl -D /opt/pgsql/data -l logfile stop + echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf + /usr/lib/postgresql/14/bin/pg_ctl -D /opt/pgsql/data -l logfile start + working-directory: src/pg_stat_monitor/ - name: Start pg_stat_monitor_tests run: | - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile stop - echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start make installcheck working-directory: src/pg_stat_monitor/ - - name: Report on test fail + - name: Report on pg_stat_monitor test fail uses: actions/upload-artifact@v2 if: ${{ failure() }} with: diff --git a/README.md b/README.md index cac1ab0..5ce62a4 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,6 @@ ![pg13-test](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) ![pg14-test](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) -[![Coverage Status](https://coveralls.io/repos/github/percona/pg_stat_monitor/badge.svg)](https://coveralls.io/github/percona/pg_stat_monitor) - ## What is pg_stat_monitor? **pg_stat_monitor** is a **Query Performance Monitoring** tool for [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL. **pg_stat_monitor** is based on PostgreSQL's contrib module ``pg_stat_statements``. ``pg_stat_statements`` provides the basic statistics, which is sometimes not enough. The major shortcoming in ``pg_stat_statements`` is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user would need to calculate the aggregates, which is quite an expensive operation. diff --git a/regression/expected/error.out b/regression/expected/error.out index b901e14..ccf7968 100644 --- a/regression/expected/error.out +++ b/regression/expected/error.out @@ -28,15 +28,11 @@ SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLA SELECT 1/0; | 20 | 22012 | division by zero SELECT pg_stat_monitor_reset(); | 0 | | SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | - do $$ +| 0 | | - BEGIN +| | | - RAISE WARNING 'warning message'; +| | | - END $$; | | | do $$ +| 19 | 01000 | warning message BEGIN +| | | RAISE WARNING 'warning message'; +| | | END $$; | | | -(7 rows) +(6 rows) SELECT pg_stat_monitor_reset(); pg_stat_monitor_reset diff --git a/regression/expected/error_1.out b/regression/expected/error_1.out index 8a8dd8b..102a92a 100644 --- a/regression/expected/error_1.out +++ b/regression/expected/error_1.out @@ -28,15 +28,11 @@ SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLA SELECT 1/0; | 21 | 22012 | division by zero SELECT pg_stat_monitor_reset(); | 0 | | SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | - do $$ +| 0 | | - BEGIN +| | | - RAISE WARNING 'warning message'; +| | | - END $$; | | | do $$ +| 19 | 01000 | warning message BEGIN +| | | RAISE WARNING 'warning message'; +| | | END $$; | | | -(7 rows) +(6 rows) SELECT pg_stat_monitor_reset(); pg_stat_monitor_reset From 0403ca951cdbd4c0e54edc8c3cf2b6f54b149c11 Mon Sep 17 00:00:00 2001 From: Hamid Akhtar Date: Mon, 27 Sep 2021 13:10:39 +0500 Subject: [PATCH 24/45] Create code-of-conduct.md --- code-of-conduct.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 code-of-conduct.md diff --git a/code-of-conduct.md b/code-of-conduct.md new file mode 100644 index 0000000..6af3e44 --- /dev/null +++ b/code-of-conduct.md @@ -0,0 +1,5 @@ +# Percona Distribution for PostgreSQL Operator Code of Conduct + +All Percona Products follow the [Percona Community Code of Conduct](https://github.com/percona/community/blob/main/content/contribute/coc.md). + +If you notice any unacceptable behavior, let us know as soon as possible by writing to . We will respond within 48 hours. From a9bd80532293c52ae2e1dcfe2e3345e60bdeabcb Mon Sep 17 00:00:00 2001 From: Naeem Akhter Date: Mon, 27 Sep 2021 15:24:17 +0500 Subject: [PATCH 25/45] PG-227: Enable PGSM master branch regression/test with latest PG releases. Enable PGSM master branch regression/test with last Postgres release (actual released packages) for supported server versions. This will make sure that we would have already tested the PGSM compatibility well before PPG releases are done. --- .github/workflows/pg11test-pgdg-packages.yml | 61 ++++++++++++++++++++ .github/workflows/pg12test-pgdg-packages.yml | 61 ++++++++++++++++++++ .github/workflows/pg13test-pgdg-packages.yml | 57 ++++++++++++++++++ .github/workflows/pg14test-pgdg-packages.yml | 61 ++++++++++++++++++++ 4 files changed, 240 insertions(+) create mode 100644 .github/workflows/pg11test-pgdg-packages.yml create mode 100644 .github/workflows/pg12test-pgdg-packages.yml create mode 100644 .github/workflows/pg13test-pgdg-packages.yml create mode 100644 .github/workflows/pg14test-pgdg-packages.yml diff --git a/.github/workflows/pg11test-pgdg-packages.yml b/.github/workflows/pg11test-pgdg-packages.yml new file mode 100644 index 0000000..9126f7c --- /dev/null +++ b/.github/workflows/pg11test-pgdg-packages.yml @@ -0,0 +1,61 @@ +name: Test-with-pg11-pgdg-packages +on: [push] + +jobs: + build: + name: pg11-test-with-pgdg-packages + runs-on: ubuntu-18.04 + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install PG Distribution Postgresql 11 + run: | + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get update + sudo apt-get -y install postgresql-11 + sudo apt-get update + sudo apt-get -y install postgresql-client-11 + sudo apt-get update + sudo apt install postgresql-server-dev-11 + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/11/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg12test-pgdg-packages.yml b/.github/workflows/pg12test-pgdg-packages.yml new file mode 100644 index 0000000..2653ec6 --- /dev/null +++ b/.github/workflows/pg12test-pgdg-packages.yml @@ -0,0 +1,61 @@ +name: Test-with-pg12-pgdg-packages +on: [push] + +jobs: + build: + name: pg12-test-with-pgdg-packages + runs-on: ubuntu-latest + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install PG Distribution Postgresql 12 + run: | + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get update + sudo apt-get -y install postgresql-12 + sudo apt-get update + sudo apt-get -y install postgresql-client-12 + sudo apt-get update + sudo apt install postgresql-server-dev-12 + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/12/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg13test-pgdg-packages.yml b/.github/workflows/pg13test-pgdg-packages.yml new file mode 100644 index 0000000..49e0261 --- /dev/null +++ b/.github/workflows/pg13test-pgdg-packages.yml @@ -0,0 +1,57 @@ +name: Test-with-pg13-pgdg-packages +on: [push] + +jobs: + build: + name: pg13-test-with-pgdg-packages + runs-on: ubuntu-latest + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install PG Distribution Postgresql 13 + run: | + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get update + sudo apt-get -y install postgresql-13 postgresql-client-13 postgresql-contrib postgresql-server-dev-13 + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/13/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg14test-pgdg-packages.yml b/.github/workflows/pg14test-pgdg-packages.yml new file mode 100644 index 0000000..352aac6 --- /dev/null +++ b/.github/workflows/pg14test-pgdg-packages.yml @@ -0,0 +1,61 @@ +name: Test-with-pg14-pgdg-packages +on: [push] + +jobs: + build: + name: pg14-test-with-pgdg-packages + runs-on: ubuntu-latest + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install PG Distribution Postgresql 14 + run: | + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main 14" > /etc/apt/sources.list.d/pgdg.list' + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get update + sudo apt-get -y install postgresql-14 + sudo apt-get update + sudo apt-get -y install postgresql-client-14 + sudo apt-get update + sudo apt-get -y install postgresql-server-dev-14 + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/14/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 From 273f23b161d2e231b5c8655132b9ff328cf6e1e8 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Tue, 28 Sep 2021 16:12:34 -0300 Subject: [PATCH 26/45] PG-230: Fix duplicated query entries. A problem during bucket management was allowing some queries to be duplicated, old entries would sit around without having their statistics updated. The problem would arise during the following chain of events: 1. A query goes through pgss_post_parse_analyze, in this stage (PGSS_PARSE) we only save the query into the query buffer and create an entry in the query hash table. 2. The query then goes through pgss_ExecutorStart (PGSS_EXEC), in this stage we create an entry for query statistic counters with default values, all time stats equal zero, etc. 3. The query then goes through pgss_ExecutorEnd (PGSS_FINISH), in this stage we update the query statistis, no. calls, total time taken, min_time, etc. The problem is that between steps 2 and 3, the current bucket ID timer may have been expired. For example, during steps 1 and 2 the query may have been stored in bucket ID 1, but when the query is finished (pgss_ExecutorEnd) the current bucket ID may have been updated to 2. This is leaving an entry for the query in bucket ID 1 with state ACTIVE, with time statistics not updated yet. This is also creating an entry for the query in the bucket ID 2, with all statistics (time and others) being updated for this entry. To solve this problem, during transition to a new bucket id, we scan all pending queries in the previous bucket id and move them to the new bucket id. This way finished queries will always be associated with the bucket id that was active at the time they've finished. --- hash_query.c | 92 ++++++++++++++++++++++++++++++++++++++++++++--- pg_stat_monitor.c | 4 +-- pg_stat_monitor.h | 2 +- 3 files changed, 91 insertions(+), 7 deletions(-) diff --git a/hash_query.c b/hash_query.c index 4d6c812..6c01a52 100644 --- a/hash_query.c +++ b/hash_query.c @@ -15,9 +15,11 @@ *------------------------------------------------------------------------- */ #include "postgres.h" +#include "nodes/pg_list.h" #include "pg_stat_monitor.h" + static pgssSharedState *pgss; static HTAB *pgss_hash; static HTAB *pgss_query_hash; @@ -130,7 +132,7 @@ hash_memsize(void) } pgssEntry * -hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key,int encoding) +hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding) { pgssEntry *entry = NULL; bool found = false; @@ -222,24 +224,106 @@ hash_query_entry_dealloc(int bucket, unsigned char *buf) /* * Deallocate least-used entries. * + * If old_bucket_id != -1, move all pending queries in old_bucket_id + * to the new bucket id. + * * Caller must hold an exclusive lock on pgss->lock. */ bool -hash_entry_dealloc(int bucket) +hash_entry_dealloc(int new_bucket_id, int old_bucket_id) { HASH_SEQ_STATUS hash_seq; pgssEntry *entry = NULL; + List *pending_entries = NIL; + ListCell *pending_entry; + + /* + * During transition to a new bucket id, a rare but possible race + * condition may happen while reading pgss->current_wbucket. If a + * different thread/process updates pgss->current_wbucket before this + * function is called, it may happen that old_bucket_id == new_bucket_id. + * If that is the case, we adjust the old bucket id here instead of using + * a lock in order to avoid the overhead. + */ + if (old_bucket_id != -1 && old_bucket_id == new_bucket_id) + { + if (old_bucket_id == 0) + old_bucket_id = PGSM_MAX_BUCKETS - 1; + else + old_bucket_id--; + } hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { - if (bucket < 0 || - (entry->key.bucket_id == bucket && + if (new_bucket_id < 0 || + (entry->key.bucket_id == new_bucket_id && (entry->counters.state == PGSS_FINISHED || entry->counters.state == PGSS_ERROR))) { entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); } + + /* + * If we detect a pending query residing in the previous bucket id, + * we add it to a list of pending elements to be moved to the new + * bucket id. + * Can't update the hash table while iterating it inside this loop, + * as this may introduce all sort of problems. + */ + if (old_bucket_id != -1 && entry->key.bucket_id == old_bucket_id) + { + if (entry->counters.state == PGSS_PARSE || + entry->counters.state == PGSS_PLAN || + entry->counters.state == PGSS_EXEC) + { + pgssEntry *bkp_entry = malloc(sizeof(pgssEntry)); + if (!bkp_entry) + { + /* No memory, remove pending query entry from the previous bucket. */ + entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); + continue; + } + + /* Save key/data from the previous entry. */ + memcpy(bkp_entry, entry, sizeof(pgssEntry)); + + /* Update key to use the new bucket id. */ + bkp_entry->key.bucket_id = new_bucket_id; + + /* Add the entry to a list of noded to be processed later. */ + pending_entries = lappend(pending_entries, bkp_entry); + + /* Finally remove the pending query from the expired bucket id. */ + entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); + } + } } + + /* + * Iterate over the list of pending queries in order + * to add them back to the hash table with the updated bucket id. + */ + foreach (pending_entry, pending_entries) { + bool found = false; + pgssEntry *new_entry; + pgssEntry *old_entry = (pgssEntry *) lfirst(pending_entry); + + new_entry = (pgssEntry *) hash_search(pgss_hash, &old_entry->key, HASH_ENTER_NULL, &found); + if (new_entry == NULL) + elog(DEBUG1, "%s", "pg_stat_monitor: out of memory"); + else if (!found) + { + /* Restore counters and other data. */ + new_entry->counters = old_entry->counters; + SpinLockInit(&new_entry->mutex); + new_entry->encoding = old_entry->encoding; + } + + free(old_entry); + } + + list_free(pending_entries); + return true; } diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 418ba78..b319b79 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1591,7 +1591,7 @@ pg_stat_monitor_reset(PG_FUNCTION_ARGS) (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - hash_entry_dealloc(-1); + hash_entry_dealloc(-1, -1); hash_query_entryies_reset(); #ifdef BENCHMARK for (int i = STATS_START; i < STATS_END; ++i) { @@ -2007,7 +2007,7 @@ get_next_wbucket(pgssSharedState *pgss) bucket_id = (tv.tv_sec / PGSM_BUCKET_TIME) % PGSM_MAX_BUCKETS; LWLockAcquire(pgss->lock, LW_EXCLUSIVE); buf = pgss_qbuf[bucket_id]; - hash_entry_dealloc(bucket_id); + hash_entry_dealloc(bucket_id, pgss->current_wbucket); hash_query_entry_dealloc(bucket_id, buf); snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, (int)bucket_id); diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 15ed28d..e9c4f62 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -380,7 +380,7 @@ void hash_entry_reset(void); void hash_query_entryies_reset(void); void hash_query_entries(); void hash_query_entry_dealloc(int bucket, unsigned char *buf); -bool hash_entry_dealloc(int bucket); +bool hash_entry_dealloc(int new_bucket_id, int old_bucket_id); pgssEntry* hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding); Size hash_memsize(void); From d1e01aeaab39635d0021c3c8cc19e84b4611af8f Mon Sep 17 00:00:00 2001 From: Anastasia Alexadrova Date: Thu, 30 Sep 2021 15:37:18 +0300 Subject: [PATCH 27/45] PG-207 Readme updates modified: README.md --- README.md | 274 +++++++++++++++++++++++++++++++++++------------------- 1 file changed, 177 insertions(+), 97 deletions(-) diff --git a/README.md b/README.md index 5ce62a4..cae7861 100644 --- a/README.md +++ b/README.md @@ -3,110 +3,194 @@ ![pg13-test](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) ![pg14-test](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) -## What is pg_stat_monitor? +# pg_stat_monitor: Query Performance Tool for PostgreSQL -**pg_stat_monitor** is a **Query Performance Monitoring** tool for [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL. **pg_stat_monitor** is based on PostgreSQL's contrib module ``pg_stat_statements``. ``pg_stat_statements`` provides the basic statistics, which is sometimes not enough. The major shortcoming in ``pg_stat_statements`` is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user would need to calculate the aggregates, which is quite an expensive operation. +## Table of Contents -**pg_stat_monitor** is developed on the basis of pg_stat_statements as is a more advanced replacement. It provides all the features of pg_stat_statements plus its own feature set. +* [Overview](#overview) +* [pg_stat_monitor features](#pg_stat_monitor-features) +* [Documentation](#documentation) +* [Supported versions](#supported-versions) +* [Supported platforms](#supported-platforms) +* [Installation](#installation) +* [Configuration](#configuration) +* [Setup](#setup) +* [Building from source code](#building-from-source) +* [How to contribute](#how-to-contribute) +* [License](#license) +* [Copyright](#copyright) -### How does pg_stat_monitor work? +## Overview -``pg_stat_monitor`` accumulates the information in the form of buckets. All the aggregated information is bucket based. The size of a bucket and the number of buckets should be configured using GUC (Grand Unified Configuration). When a bucket time elapses, ``pg_stat_monitor`` resets all the statistics and switches to the next bucket. After the last bucket elapses, ``pg_stat_monitor`` goes back to the first bucket. All the data on the first bucket is cleared out with new writes; therefore, to not lose the data, users must read the buckets before that. +**NOTE**: This is a beta release and is subject to further changes. We recommend using it in testing environments only. -## Documentation -1. [Supported PostgreSQL Versions](#supported-postgresql-versions) -2. [Installation](#installation) -3. [Setup](#setup) -4. [User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md) -6. [Release Notes](https://github.com/percona/pg_stat_monitor/blob/master/docs/RELEASE_NOTES.md) -7. [License](https://github.com/percona/pg_stat_monitor/blob/master/LICENSE) -8. [Submitting Bug Reports](#submitting-bug-reports) -9. [Copyright Notice](#copyright-notice) +The `pg_stat_monitor` is a **_Query Performance Monitoring_** tool for PostgreSQL. It attempts to provide a more holistic picture by providing much-needed query performance insights in a single view. -## Supported PostgreSQL Versions -``pg_stat_monitor`` should work on the latest version of both [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL and is currently tested against thes following versions: +`pg_stat_monitor` provides improved insights that allow database users to understand query origins, execution, planning statistics and details, query information, and metadata. This significantly improves observability, enabling users to debug and tune query performance. `pg_stat_monitor` is developed on the basis of `pg_stat_statements` as its more advanced replacement. -| Distribution | Version | Supported | -| ------------------------------------|---------|--------------------| -| PostgreSQL | < 11 | :x: | -| PostgreSQL | 11 | :heavy_check_mark: | -| PostgreSQL | 12 | :heavy_check_mark: | -| PostgreSQL | 13 | :heavy_check_mark: | -| Percona Distribution for PostgreSQL | < 11 | :x: | -| [Percona Distribution for PostgreSQL](https://www.percona.com/downloads/percona-postgresql-11/) | 11 | :heavy_check_mark: | -| [Percona Distribution for PostgreSQL](https://www.percona.com/downloads/percona-postgresql-12/) | 12 | :heavy_check_mark: | -| [Percona Distribution for PostgreSQL](https://www.percona.com/downloads/percona-postgresql-13/) | 13 | :heavy_check_mark: | +While `pg_stat_statements` provides ever-increasing metrics, `pg_stat_monitor` aggregates the collected data, saving user efforts for doing it themselves. `pg_stat_monitor` stores statistics in configurable time-based units – buckets. This allows focusing on statistics generated for shorter time periods and makes query timing information such as max/min/mean time more accurate. -## Installation +To learn about other features, available in `pg_stat_monitor`, see the [Features](#pg_stat_monitor-features) section and the [User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md). -You can install ``pg_stat_monitor`` from [Percona repositories](#installing-from-percona-repositories) and from [source code](#installing-from-source-code). +`pg_stat_monitor` supports PostgreSQL versions 11 and above. It is compatible with both PostgreSQL provided by PostgreSQL Global Development Group (PGDG) and [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution). -### Installing from Percona repositories +The RPM (for RHEL and CentOS) and the DEB (for Debian and Ubuntu) packages are available from Percona repositories for PostgreSQL versions [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/), and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/). -``pg_stat_monitor`` is supplied as part of Percona Distribution for PostgreSQL. The RPM/DEB packages are available from Percona's repositories. To install ``pg_stat_monitor``, follow [the installation instructions](https://www.percona.com/doc/postgresql/LATEST/installing.html). +The RPM packages are also available in the official PostgreSQL (PGDG) yum repositories. -### Installing from PGXN -You can install ``pg_stat_monitor`` from PGXN (PostgreSQL Extensions Network) using the [PGXN client](https://pgxn.github.io/pgxnclient/). +### pg_stat_monitor features +`pg_stat_monitor` simplifies query observability by providing a more holistic view of query from performance, application and analysis perspectives. This is achieved by grouping data in configurable time buckets that allow capturing of load and performance information for smaller time windows. So performance issues and patterns can be identified based on time and workload. + + +* **Time Interval Grouping:** Instead of supplying one set of ever-increasing counts, `pg_stat_monitor` computes stats for a configured number of time intervals - time buckets. This allows for much better data accuracy, especially in the case of high resolution or unreliable networks. +* **Multi-Dimensional Grouping:** While `pg_stat_statements` groups counters by userid, dbid, queryid, `pg_stat_monitor` uses a more detailed group for higher precision. This allows a user to drill down into the performance of queries. +* **Capture Actual Parameters in the Queries:** `pg_stat_monitor` allows you to choose if you want to see queries with placeholders for parameters or actual parameter data. This simplifies debugging and analysis processes by enabling users to execute the same query. +* **Query Plan:** Each SQL is now accompanied by its actual plan that was constructed for its execution. That’s a huge advantage if you want to understand why a particular query is slower than expected. +* **Tables Access Statistics for a Statement:** This allows us to easily identify all queries that accessed a given table. This set is at par with the information provided by the `pg_stat_statements`. +* **Histogram:** Visual representation is very helpful as it can help identify issues. With the help of the histogram function, one can now view a timing/calling data histogram in response to an SQL query. And yes, it even works in psql. + + +### Documentation + +1. [User guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md) +2. pg_stat_monitor vs pg_stat_statements +3. pg_stat_monitor view reference +4. [Release notes](https://github.com/percona/pg_stat_monitor/blob/master/docs/RELEASE_NOTES.md) +5. Contributing guide (https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) + + +### Supported versions + +The `pg_stat_monitor` should work on the latest version of both [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL, but is only tested with these versions: + +| **Distribution** | **Version** | **Provider** | +| ---------------- | --------------- | ------------ | +|[Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution)| [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/) and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/)| Percona| +| PostgreSQL | 11, 12, and 13 | PostgreSQL Global Development Group (PGDG) | + + +### Supported platforms + +The PostgreSQL YUM repository supports `pg_stat_monitor` for all [supported versions](#supported-versions) for the following platforms: + +* Red Hat Enterprise/Rocky/CentOS/Oracle Linux 7 and 8 +* Fedora 33 and 34 + +Find the list of supported platforms for `pg_stat_monitor` within [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) on the [Percona Release Lifecycle Overview](https://www.percona.com/services/policies/percona-software-support-lifecycle#pgsql) page. + + +### Installation + +You can install `pg_stat_monitor` from the following sources: + +* [Percona repositories](#installing-from-percona-repositories), +* [PostgreSQL PGDG yum repositories](#installing-from-postgresql-yum-repositories), +* [PGXN](#installing-from-pgxn) and +* [source code](#building-from-source). + + +#### Installing from Percona repositories + +To install `pg_stat_monitor` from Percona repositories, you need to use the `percona-release` repository management tool. + +1. [Install percona-release](https://www.percona.com/doc/percona-repo-config/installing.html) following the instructions relevant to your operating system +2. Enable Percona repository: + +``` sh +percona-release setup ppgXX +``` + +Replace XX with the desired PostgreSQL version. For example, to install `pg_stat_monitor ` for PostgreSQL 13, specify `ppg13`. + +3. Install `pg_stat_monitor` package + * For Debian and Ubuntu: + ``` sh + apt-get install percona-pg-stat-monitor13 + ``` + * For RHEL and CentOS: + ``` sh + yum install percona-pg-stat-monitor13 + ``` + +#### Installing from PostgreSQL yum repositories + +Install the PostgreSQL repositories following the instructions in the [Linux downloads (Red Hat family)](https://www.postgresql.org/download/linux/redhat/) chapter in PostgreSQL documentation. + +Install `pg_stat_monitor`: + +``` +dnf install -y pg_stat_monitor_ +``` + +Replace the `VERSION` variable with the PostgreSQL version you are using (e.g. specify `pg_stat_monitor_13` for PostgreSQL 13) + + +#### Installing from PGXN + +You can install `pg_stat_monitor` from PGXN (PostgreSQL Extensions Network) using the [PGXN client](https://pgxn.github.io/pgxnclient/). Use the following command: -```sh +``` pgxn install pg_stat_monitor ``` -### Installing from source code +### Configuration -You can download the source code of the latest release of ``pg_stat_monitor`` from [the releases page on GitHub](https://github.com/Percona/pg_stat_monitor/releases) or using git: -```sh -git clone git://github.com/Percona/pg_stat_monitor.git -``` +You can find the configuration parameters of the `pg_stat_monitor` extension in the `pg_stat_monitor_settings` view. To change the default configuration, specify new values for the desired parameters using the GUC (Grant Unified Configuration) system. To learn more, refer to the [Configuration](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md#configuration) section of the user guide. -Compile and install the extension -```sh -cd pg_stat_monitor -make USE_PGXS=1 -make USE_PGXS=1 install -``` -## Setup -``pg_stat_monitor`` cannot be enabled in your running ``postgresql`` instance, it needs to be loaded at the start time. This requires adding the ``pg_stat_monitor`` extension to the ``shared_preload_libraries`` parameter and restarting the ``postgresql`` instance. +### Setup -You can set the ``pg_stat_monitor`` extension in the ``postgresql.conf`` file. +You can enable `pg_stat_monitor` when your `postgresql` instance is not running. + +`pg_stat_monitor` needs to be loaded at the start time. The extension requires additional shared memory; therefore, add the `pg_stat_monitor` value for the `shared_preload_libraries` parameter and restart the `postgresql` instance. + +Use the [ALTER SYSTEM](https://www.postgresql.org/docs/current/sql-altersystem.html)command from `psql` terminal to modify the `shared_preload_libraries` parameter. ``` -# - Shared Library Preloading - - -shared_preload_libraries = 'pg_stat_monitor' # (change requires restart) -#local_preload_libraries = '' -#session_preload_libraries = '' -``` - -Or you can set it from `psql` terminal using the ``ALTER SYSTEM`` command. - -```sql ALTER SYSTEM SET shared_preload_libraries = 'pg_stat_monitor'; + ALTER SYSTEM ``` -```sh +**NOTE**: If you’ve added other values to the shared_preload_libraries parameter, list all of them separated by commas for the `ALTER SYSTEM` command. For example, `ALTER SYSTEM SET shared_preload_libraries = 'foo, bar, pg_stat_monitor'` + +Start or restart the `postgresql` instance to apply the changes. + +* On Debian and Ubuntu: + +``` +sudo systemctl restart postgresql.service +``` + +* On Red Hat Enterprise Linux and CentOS: + + +``` sudo systemctl restart postgresql-13 ``` +Create the extension using the [CREATE EXTENSION](https://www.postgresql.org/docs/current/sql-createextension.html) command. Using this command requires the privileges of a superuser or a database owner. Connect to `psql` as a superuser for a database and run the following command: -Create the extension using the ``CREATE EXTENSION`` command. -```sql + +``` CREATE EXTENSION pg_stat_monitor; CREATE EXTENSION ``` -```sql + +This allows you to see the stats collected by `pg_stat_monitor`. + + +``` -- Select some of the query information, like client_ip, username and application_name etc. -postgres=# SELECT application_name, userid AS user_name, datname AS database_name, substr(query,0, 50) AS query, calls, client_ip +postgres=# SELECT application_name, userid AS user_name, datname AS database_name, substr(query,0, 50) AS query, calls, client_ip FROM pg_stat_monitor; - application_name | user_name | database_name | query | calls | client_ip + application_name | user_name | database_name | query | calls | client_ip ------------------+-----------+---------------+---------------------------------------------------+-------+----------- psql | vagrant | postgres | SELECT application_name, userid::regrole AS user_ | 1 | 127.0.0.1 psql | vagrant | postgres | SELECT application_name, userid AS user_name, dat | 3 | 127.0.0.1 @@ -114,51 +198,47 @@ postgres=# SELECT application_name, userid AS user_name, datname AS database_nam psql | vagrant | postgres | SELECT application_name, userid AS user_name, dat | 8 | 127.0.0.1 psql | vagrant | postgres | SELECT bucket, substr(query,$1, $2) AS query, cmd | 1 | 127.0.0.1 (5 rows) +``` + +To learn more about `pg_stat_monitor` features and usage, see [User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md). To view all other data elements provided by `pg_stat_monitor`, please see the reference. + + +### Building from source + +You can download the source code of the latest release of `pg_stat_monitor` from [the releases page on GitHub](https://github.com/Percona/pg_stat_monitor/releases) or using git: ``` - -```sql --- Select queries along with elevel, message and sqlcode which have some errors. - -SELECT decode_error_level(elevel) AS elevel, sqlcode, query, message FROM pg_stat_monitor WHERE elevel != 0; - elevel. | sqlcode | query | message ---------------------+---------+-------------------------------------------------------------------------------------------+------------------------------------------------ - ERROR | 132 | select count(*) from pgbench_branches | permission denied for table pgbench_branches - ERROR | 130 | select 1/0; | division by zero - ERROR | 132 | SELECT decode_elevel(elevel), sqlcode, message from pg_stat_monitor where elevel != 0; | function decode_elevel(integer) does not exist - ERROR | 132 | drop table if exists pgbench_accounts, pgbench_branches, pgbench_history, pgbench_tellers | must be owner of table pgbench_accounts -(4 rows) - +git clone git://github.com/Percona/pg_stat_monitor.git ``` -To learn more about ``pg_stat_monitor`` configuration and usage, see the [User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md). +Compile and install the extension -## Submitting Feature Requests or Bug Reports +``` +cd pg_stat_monitor +make USE_PGXS=1 +make USE_PGXS=1 install +``` -If you would like to request a feature of if you found a bug in ``pg_stat_monitor``, please submit the report to the [Jira issue tracker](https://jira.percona.com/projects/PG/issues). +### How to contribute -Start by searching the open tickets for a similar report. If you find that someone else has already reported your issue, then you can upvote that report to increase its visibility. +We welcome and strongly encourage community participation and contributions, and are always looking for new members that are as dedicated to serving the community as we are. -If there is no existing report, submit your report following these steps: - -1. Sign in to [Jira issue tracker](https://jira.percona.com/projects/PG/issues). You will need to create an account if you do not have one. - -2. In the *Summary*, *Description*, *Steps To Reproduce*, *Affects Version* fields describe the problem you have detected. - -3. As a general rule of thumb, try to create bug reports that are: - -- Reproducible: describe the steps to reproduce the problem. - -- Unique: check if there already exists a JIRA ticket to describe the problem. - -- Scoped to a Single Bug: only report one bug in one JIRA ticket. +The [Contributing Guide](https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) contains the guidelines on how you can contribute. -## Copyright Notice +### Support, discussions and forums -Portions Copyright © 2018-2021, Percona LLC and/or its affiliates +We welcome your feedback on your experience with `pg_stat_monitor`. Join our [technical forum](https://forums.percona.com/) or [Discord](https://discord.gg/mQEyGPkNbR) channel for help with `pg_stat_monitor` and Percona's open source software for MySQL®, [PostgreSQL](https://www.percona.com/software/postgresql-distribution), and MongoDB® databases. -Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group -Portions Copyright (c) 1994, The Regents of the University of California +### License + +This project is licensed under the same open liberal terms and conditions as the PostgreSQL project itself. Please refer to the [LICENSE](https://github.com/percona/pg_stat_monitor/blob/master/LICENSE) file for more details. + + +### Copyright notice + +* Portions Copyright © 2018-2021, Percona LLC and/or its affiliates +* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group +* Portions Copyright (c) 1994, The Regents of the University of California From 361c7d370a458c86730964a0892ef73a1065f487 Mon Sep 17 00:00:00 2001 From: Ibrar Ahmed Date: Thu, 30 Sep 2021 18:47:12 +0000 Subject: [PATCH 28/45] PG-211: Fix the issue while generating histogram. --- pg_stat_monitor--1.0.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pg_stat_monitor--1.0.sql b/pg_stat_monitor--1.0.sql index f6106a2..65369f5 100644 --- a/pg_stat_monitor--1.0.sql +++ b/pg_stat_monitor--1.0.sql @@ -148,7 +148,7 @@ CREATE VIEW pg_stat_monitor AS SELECT comments, planid, query_plan, - (SELECT query from pg_stat_monitor_internal(true) s where s.queryid = p.top_queryid) AS top_query, + (SELECT query from pg_stat_monitor_internal(true) s where s.queryid = p.top_queryid and s.bucket = p.bucket) AS top_query, application_name, string_to_array(relations, ',') AS relations, cmd_type, From 89743e924315ecd08869ca49b5808c032ceecd8f Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Thu, 30 Sep 2021 17:13:27 -0300 Subject: [PATCH 29/45] PG-244: Fix race condition in get_next_wbucket(). The if condition bellow in geta_next_wbucket() was subject to a race condition: if ((current_usec - pgss->prev_bucket_usec) > (PGSM_BUCKET_TIME * 1000 * 1000)) Two or more threads/processes could easily evaluate this condition to true, thus executing more than once the block that would calculate a new bucket id, clear/move old entries in the pgss_query_hash and pgss_hash hash tables. To avoid this problem, we define prev_bucket_usec and current_wbucket variables as atomic and execute a loop to check if another thread has updated prev_bucket_usec before the current one. --- hash_query.c | 20 ++----------- pg_stat_monitor.c | 76 ++++++++++++++++++++++++++++++++++------------- pg_stat_monitor.h | 24 +++++++-------- 3 files changed, 70 insertions(+), 50 deletions(-) diff --git a/hash_query.c b/hash_query.c index 6c01a52..df39b9b 100644 --- a/hash_query.c +++ b/hash_query.c @@ -146,7 +146,7 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding) entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER_NULL, &found); if (!found) { - pgss->bucket_entry[pgss->current_wbucket]++; + pgss->bucket_entry[pg_atomic_read_u64(&pgss->current_wbucket)]++; /* New entry, initialize it */ /* reset the statistics */ memset(&entry->counters, 0, sizeof(Counters)); @@ -236,22 +236,6 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id) pgssEntry *entry = NULL; List *pending_entries = NIL; ListCell *pending_entry; - - /* - * During transition to a new bucket id, a rare but possible race - * condition may happen while reading pgss->current_wbucket. If a - * different thread/process updates pgss->current_wbucket before this - * function is called, it may happen that old_bucket_id == new_bucket_id. - * If that is the case, we adjust the old bucket id here instead of using - * a lock in order to avoid the overhead. - */ - if (old_bucket_id != -1 && old_bucket_id == new_bucket_id) - { - if (old_bucket_id == 0) - old_bucket_id = PGSM_MAX_BUCKETS - 1; - else - old_bucket_id--; - } hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) @@ -344,7 +328,7 @@ hash_entry_reset() { hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); } - pgss->current_wbucket = 0; + pg_atomic_write_u64(&pgss->current_wbucket, 0); LWLockRelease(pgss->lock); } diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index b319b79..52b9760 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1490,6 +1490,7 @@ pgss_store(uint64 queryid, int application_name_len = pg_get_application_name(application_name); bool reset = false; uint64 bucketid; + uint64 prev_bucket_id; uint64 userid; int con; uint64 dbid = MyDatabaseId; @@ -1511,15 +1512,14 @@ pgss_store(uint64 queryid, extract_query_comments(query, comments, sizeof(comments)); /* Safety check... */ - if (!IsSystemInitialized() || !pgss_qbuf[pgss->current_wbucket]) + if (!IsSystemInitialized() || !pgss_qbuf[pg_atomic_read_u64(&pgss->current_wbucket)]) return; + prev_bucket_id = pg_atomic_read_u64(&pgss->current_wbucket); bucketid = get_next_wbucket(pgss); - if (bucketid != pgss->current_wbucket) - { + + if (bucketid != prev_bucket_id) reset = true; - pgss->current_wbucket = bucketid; - } LWLockAcquire(pgss->lock, LW_EXCLUSIVE); @@ -1990,40 +1990,76 @@ static uint64 get_next_wbucket(pgssSharedState *pgss) { struct timeval tv; - uint64 current_usec; - uint64 bucket_id; - struct tm *lt; + uint64 current_usec; + uint64 current_bucket_usec; + uint64 new_bucket_id; + uint64 prev_bucket_id; + struct tm *lt; + bool update_bucket = false; gettimeofday(&tv,NULL); current_usec = (TimestampTz) tv.tv_sec - ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); current_usec = (current_usec * USECS_PER_SEC) + tv.tv_usec; + current_bucket_usec = pg_atomic_read_u64(&pgss->prev_bucket_usec); - if ((current_usec - pgss->prev_bucket_usec) > (PGSM_BUCKET_TIME * 1000 * 1000)) + /* + * If current bucket expired we loop attempting to update prev_bucket_usec. + * + * pg_atomic_compare_exchange_u64 may fail in two possible ways: + * 1. Another thread/process updated the variable before us. + * 2. A spurious failure / hardware event. + * + * In both failure cases we read prev_bucket_usec from memory again, if it was + * a spurious failure then the value of prev_bucket_usec must be the same as + * before, which will cause the while loop to execute again. + * + * If another thread updated prev_bucket_usec, then its current value will + * definitely make the while condition to fail, we can stop the loop as another + * thread has already updated prev_bucket_usec. + */ + while ((current_usec - current_bucket_usec) > (PGSM_BUCKET_TIME * 1000 * 1000)) + { + if (pg_atomic_compare_exchange_u64(&pgss->prev_bucket_usec, ¤t_bucket_usec, current_usec)) + { + update_bucket = true; + break; + } + + current_bucket_usec = pg_atomic_read_u64(&pgss->prev_bucket_usec); + } + + if (update_bucket) { unsigned char *buf; char file_name[1024]; int sec = 0; - bucket_id = (tv.tv_sec / PGSM_BUCKET_TIME) % PGSM_MAX_BUCKETS; - LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - buf = pgss_qbuf[bucket_id]; - hash_entry_dealloc(bucket_id, pgss->current_wbucket); - hash_query_entry_dealloc(bucket_id, buf); + new_bucket_id = (tv.tv_sec / PGSM_BUCKET_TIME) % PGSM_MAX_BUCKETS; - snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, (int)bucket_id); + /* Update bucket id and retrieve the previous one. */ + prev_bucket_id = pg_atomic_exchange_u64(&pgss->current_wbucket, new_bucket_id); + + LWLockAcquire(pgss->lock, LW_EXCLUSIVE); + buf = pgss_qbuf[new_bucket_id]; + hash_entry_dealloc(new_bucket_id, prev_bucket_id); + hash_query_entry_dealloc(new_bucket_id, buf); + + snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, (int)new_bucket_id); unlink(file_name); LWLockRelease(pgss->lock); - pgss->prev_bucket_usec = current_usec; + lt = localtime(&tv.tv_sec); sec = lt->tm_sec - (lt->tm_sec % PGSM_BUCKET_TIME); if (sec < 0) sec = 0; - snprintf(pgss->bucket_start_time[bucket_id], sizeof(pgss->bucket_start_time[bucket_id]), + snprintf(pgss->bucket_start_time[new_bucket_id], sizeof(pgss->bucket_start_time[new_bucket_id]), "%04d-%02d-%02d %02d:%02d:%02d", lt->tm_year + 1900, lt->tm_mon + 1, lt->tm_mday, lt->tm_hour, lt->tm_min, sec); - return bucket_id; + + return new_bucket_id; } - return pgss->current_wbucket; + + return pg_atomic_read_u64(&pgss->current_wbucket); } #if PG_VERSION_NUM < 140000 @@ -3024,7 +3060,7 @@ pgss_store_query_info(uint64 bucketid, pgssStoreKind kind) { pgssSharedState *pgss = pgsm_get_ss(); - unsigned char *buf = pgss_qbuf[pgss->current_wbucket]; + unsigned char *buf = pgss_qbuf[pg_atomic_read_u64(&pgss->current_wbucket)]; pgssQueryEntry *entry; if (query_len > PGSM_QUERY_MAX_LEN) diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index e9c4f62..85d5261 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -301,16 +301,16 @@ typedef struct pgssEntry */ typedef struct pgssSharedState { - LWLock *lock; /* protects hashtable search/modification */ - double cur_median_usage; /* current median usage in hashtable */ - slock_t mutex; /* protects following fields only: */ - Size extent; /* current extent of query file */ - int64 n_writers; /* number of active writers to query file */ - uint64 current_wbucket; - uint64 prev_bucket_usec; - uint64 bucket_entry[MAX_BUCKETS]; - int64 query_buf_size_bucket; - char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */ + LWLock *lock; /* protects hashtable search/modification */ + double cur_median_usage; /* current median usage in hashtable */ + slock_t mutex; /* protects following fields only: */ + Size extent; /* current extent of query file */ + int64 n_writers; /* number of active writers to query file */ + pg_atomic_uint64 current_wbucket; + pg_atomic_uint64 prev_bucket_usec; + uint64 bucket_entry[MAX_BUCKETS]; + int64 query_buf_size_bucket; + char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */ } pgssSharedState; #define ResetSharedState(x) \ @@ -318,8 +318,8 @@ do { \ x->cur_median_usage = ASSUMED_MEDIAN_INIT; \ x->cur_median_usage = ASSUMED_MEDIAN_INIT; \ x->n_writers = 0; \ - x->current_wbucket = 0; \ - x->prev_bucket_usec = 0; \ + pg_atomic_init_u64(&x->current_wbucket, 0); \ + pg_atomic_init_u64(&x->prev_bucket_usec, 0); \ memset(&x->bucket_entry, 0, MAX_BUCKETS * sizeof(uint64)); \ } while(0) From a959acb3d5a7e490015382b86e4be70e3a32e1c2 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Fri, 1 Oct 2021 15:27:45 -0300 Subject: [PATCH 30/45] PG-244: Move pending queries' text to new bucket after bucket expiration. Added code to move all pending queries text from an expired bucket's query buffer to the next, active query buffer. The previous implementation was not very efficient, it worked like this, as soon as a query is processed and a bucket expires: 1. Allocate memory to save the contents of the next query buffer. 2. Clear the next query buffer. 3. Iterate over pgss_query_hash, then, for each entry: - If the entry's bucket id is equal to the next bucket then: -- If the query for this entry has finished or ended in error, then remove it from the hash table. -- Else, if the query is not yet finished, copy the query from the backup query buffer to the new query buffer. Now, this copy was really expensive, because it was implemented using read_query() / SaveQueryText(), and read_query() scans the whole query buffer looking for some query ID, since we do this extra lookup loop for each pending query we end up with a O(n^2) algorithm. 4. Release the backup query buffer. Since now we always move pending queries from an expired bucket to the next one, there is no need to scan the next query buffer for pending queries (the pending queries are always in the current bucket, and when it expires we move them to the next one). Taking that into consideration, the new implementation works as follows, whenever a bucket expires: 1. Clear the next query buffer (all entries). 2. Define an array to store pending query ids from the expired bucket, we use this array later on in the algorithm. 3. Iterate over pgss_query_hash, then, for each entry: - If the entry's bucket id is equal to the next bucket then: -- If the query for this entry has finished or ended in error, then remove it from the hash table. This is equal to the previous implementation. - Else, if the entry's bucket id is equal to the just expired bucket id (old bucket id) and the query state is pending (not yet finished), then add this query ID to the array of pending query IDs. Note: We define the array to hold up to 128 pending entries, if there are more entries than this we try to allocate memory in the heap to store them, then, if the allocation fails we manually copy every pending query past the 128th to the next query buffer, using the previous algorithm (read_query() / SaveQueryText), this should be a very rare situation. 4. Finally, if there are pending queries, copy them from the previous query buffer to the next one using copy_queries. Now, copy_queries() is better than looping through each query entry and calling read_query() / SaveQueryText() to copy each of them to the new buffer, as explained, read_query() scans the whole query buffer for every call. copy_queries(), instead, scans the query buffer only once, and for every element it checks if the current query id is in the list of queries to be copied, this is an array of uint64 that is small enough to fit in L1 cache. Another important fix in this commit is the addition of the line 1548 in pg_stat_monitor.c / pgss_store(): query_entry->state = kind; Before the addition of this line, all entries in the pgss_query_hash hash table were not having their status updated when the query entered execution / finished or ended in error, effectively leaving all entries as pending, thus whenever a bucket expired all entries were being copied from the expired bucket to the next one. --- hash_query.c | 152 ++++++++++++++++++++++++++++++++++++++++------ pg_stat_monitor.c | 5 +- pg_stat_monitor.h | 2 +- 3 files changed, 138 insertions(+), 21 deletions(-) diff --git a/hash_query.c b/hash_query.c index df39b9b..078fa47 100644 --- a/hash_query.c +++ b/hash_query.c @@ -25,6 +25,15 @@ static HTAB *pgss_hash; static HTAB *pgss_query_hash; static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size); +/* + * Copy all queries from query_buffer[old_bucket_id] to query_buffer[new_bucket_id] + * whose query ids are found in the array 'query_ids', of length 'n_queries'. + */ +static void copy_queries(unsigned char *query_buffer[], + uint64 new_bucket_id, + uint64 old_bucket_id, + uint64 *query_ids, + size_t n_queries); static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size) @@ -178,47 +187,107 @@ hash_query_entryies_reset() /* - * Deallocate finished entries. + * Deallocate finished entries in new_bucket_id. + * + * Move all pending queries in query_buffer[old_bucket_id] to + * query_buffer[new_bucket_id]. * * Caller must hold an exclusive lock on pgss->lock. */ void -hash_query_entry_dealloc(int bucket, unsigned char *buf) +hash_query_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]) { HASH_SEQ_STATUS hash_seq; pgssQueryEntry *entry; - unsigned char *old_buf; pgssSharedState *pgss = pgsm_get_ss(); + /* + * Store pending query ids from the previous bucket. + * If there are more pending queries than MAX_PENDING_QUERIES then + * we try to dynamically allocate memory for them. + */ +#define MAX_PENDING_QUERIES 128 + uint64 pending_query_ids[MAX_PENDING_QUERIES]; + uint64 *pending_query_ids_buf = NULL; + size_t n_pending_queries = 0; + bool out_of_memory = false; - old_buf = palloc0(pgss->query_buf_size_bucket); - memcpy(old_buf, buf, pgss->query_buf_size_bucket); - - memset(buf, 0, pgss->query_buf_size_bucket); + /* Clear all queries in the query buffer for the new bucket. */ + memset(query_buffer[new_bucket_id], 0, pgss->query_buf_size_bucket); hash_seq_init(&hash_seq, pgss_query_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { - if (entry->key.bucket_id == bucket) + /* Remove previous finished query entries matching new bucket id. */ + if (entry->key.bucket_id == new_bucket_id) { if (entry->state == PGSS_FINISHED || entry->state == PGSS_ERROR) { entry = hash_search(pgss_query_hash, &entry->key, HASH_REMOVE, NULL); } + } + /* Set up a list of pending query ids from the previous bucket. */ + else if (entry->key.bucket_id == old_bucket_id && + (entry->state == PGSS_PARSE || + entry->state == PGSS_PLAN || + entry->state == PGSS_EXEC)) + { + if (n_pending_queries < MAX_PENDING_QUERIES) + { + pending_query_ids[n_pending_queries] = entry->key.queryid; + ++n_pending_queries; + } else { - int len; - char query_txt[1024]; - if (read_query(old_buf, entry->key.bucket_id, entry->key.queryid, query_txt) == 0) + /* + * No. of pending queries exceeds MAX_PENDING_QUERIES. + * Try to allocate memory from heap to keep track of pending query ids. + * If allocation fails we manually copy pending query to the next query buffer. + */ + if (!out_of_memory && !pending_query_ids_buf) { - len = read_query_buffer(entry->key.bucket_id, entry->key.queryid, query_txt); - if (len != MAX_QUERY_BUFFER_BUCKET) - snprintf(query_txt, 32, "%s", ""); + /* Allocate enough room for query ids. */ + pending_query_ids_buf = malloc(sizeof(uint64) * hash_get_num_entries(pgss_query_hash)); + if (pending_query_ids_buf != NULL) + memcpy(pending_query_ids_buf, pending_query_ids, n_pending_queries * sizeof(uint64)); + else + out_of_memory = true; + } + + if (!out_of_memory) + { + /* Store pending query id in the dynamic buffer. */ + pending_query_ids_buf[n_pending_queries] = entry->key.queryid; + ++n_pending_queries; + } + else + { + /* No memory, manually copy query from previous buffer. */ + char query_txt[1024]; + + if (read_query(query_buffer[old_bucket_id], old_bucket_id, entry->key.queryid, query_txt) != 0 + || read_query_buffer(old_bucket_id, entry->key.queryid, query_txt) == MAX_QUERY_BUFFER_BUCKET) + { + SaveQueryText(new_bucket_id, entry->key.queryid, query_buffer[new_bucket_id], query_txt, strlen(query_txt)); + } + else + /* There was no space available to store the pending query text. */ + elog(WARNING, "hash_query_entry_dealloc: Failed to move pending query %lX, %s", + entry->key.queryid, + (PGSM_OVERFLOW_TARGET == OVERFLOW_TARGET_NONE) ? + "insufficient shared space for query" : + "I/O error reading query from disk"); } - SaveQueryText(entry->key.bucket_id, entry->key.queryid, buf, query_txt, strlen(query_txt)); } } } - pfree(old_buf); + + /* Copy all detected pending queries from previous bucket id to the new one. */ + if (n_pending_queries > 0) { + if (n_pending_queries < MAX_PENDING_QUERIES) + pending_query_ids_buf = pending_query_ids; + + copy_queries(query_buffer, new_bucket_id, old_bucket_id, pending_query_ids_buf, n_pending_queries); + } } /* @@ -274,7 +343,7 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id) /* Update key to use the new bucket id. */ bkp_entry->key.bucket_id = new_bucket_id; - /* Add the entry to a list of noded to be processed later. */ + /* Add the entry to a list of nodes to be processed later. */ pending_entries = lappend(pending_entries, bkp_entry); /* Finally remove the pending query from the expired bucket id. */ @@ -378,3 +447,52 @@ IsHashInitialize(void) pgss_hash != NULL); } +static void copy_queries(unsigned char *query_buffer[], + uint64 new_bucket_id, + uint64 old_bucket_id, + uint64 *query_ids, + size_t n_queries) +{ + bool found; + uint64 query_id = 0; + uint64 query_len = 0; + uint64 rlen = 0; + uint64 buf_len = 0; + unsigned char *src_buffer = query_buffer[old_bucket_id]; + size_t i; + + memcpy(&buf_len, src_buffer, sizeof (uint64)); + if (buf_len <= 0) + return; + + rlen = sizeof (uint64); /* Move forwad to skip length bytes */ + while (rlen < buf_len) + { + found = false; + memcpy(&query_id, &src_buffer[rlen], sizeof (uint64)); /* query id */ + for (i = 0; i < n_queries; ++i) + { + if (query_id == query_ids[i]) + { + found = true; + break; + } + } + + rlen += sizeof (uint64); + if (buf_len <= rlen) + break; + + memcpy(&query_len, &src_buffer[rlen], sizeof (uint64)); /* query len */ + rlen += sizeof (uint64); + if (buf_len < rlen + query_len) + break; + + if (found) { + SaveQueryText(new_bucket_id, query_id, query_buffer[new_bucket_id], + (const char *)&src_buffer[rlen], query_len); + } + + rlen += query_len; + } +} \ No newline at end of file diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 52b9760..f3c8eff 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1545,6 +1545,7 @@ pgss_store(uint64 queryid, out_of_memory = true; break; } + query_entry->state = kind; entry = pgss_get_entry(bucketid, userid, dbid, queryid, ip, planid, appid); if (entry == NULL) { @@ -2030,7 +2031,6 @@ get_next_wbucket(pgssSharedState *pgss) if (update_bucket) { - unsigned char *buf; char file_name[1024]; int sec = 0; @@ -2040,9 +2040,8 @@ get_next_wbucket(pgssSharedState *pgss) prev_bucket_id = pg_atomic_exchange_u64(&pgss->current_wbucket, new_bucket_id); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - buf = pgss_qbuf[new_bucket_id]; hash_entry_dealloc(new_bucket_id, prev_bucket_id); - hash_query_entry_dealloc(new_bucket_id, buf); + hash_query_entry_dealloc(new_bucket_id, prev_bucket_id, pgss_qbuf); snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, (int)new_bucket_id); unlink(file_name); diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 85d5261..94a0f83 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -379,7 +379,7 @@ HTAB *pgsm_get_plan_hash(void); void hash_entry_reset(void); void hash_query_entryies_reset(void); void hash_query_entries(); -void hash_query_entry_dealloc(int bucket, unsigned char *buf); +void hash_query_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]); bool hash_entry_dealloc(int new_bucket_id, int old_bucket_id); pgssEntry* hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding); Size hash_memsize(void); From a93ba37ac300f36d78f23570a18996317a5a4636 Mon Sep 17 00:00:00 2001 From: Ibrar Ahmed Date: Tue, 5 Oct 2021 20:33:34 +0000 Subject: [PATCH 31/45] PG-246, PG-211, PG-147: Fix performance issue while querying the pg_stat_monitor. This performance fix resolves two more issues (PG-211, PG-147). --- pg_stat_monitor--1.0.sql | 25 ++++++++++---------- pg_stat_monitor.c | 50 ++++++++++++++++++++++++++-------------- 2 files changed, 46 insertions(+), 29 deletions(-) diff --git a/pg_stat_monitor--1.0.sql b/pg_stat_monitor--1.0.sql index 65369f5..67cb3c2 100644 --- a/pg_stat_monitor--1.0.sql +++ b/pg_stat_monitor--1.0.sql @@ -36,6 +36,7 @@ CREATE FUNCTION pg_stat_monitor_internal(IN showtext boolean, OUT query_plan text, OUT state_code int8, OUT top_queryid text, + OUT top_query text, OUT application_name text, OUT relations text, -- 11 @@ -148,7 +149,7 @@ CREATE VIEW pg_stat_monitor AS SELECT comments, planid, query_plan, - (SELECT query from pg_stat_monitor_internal(true) s where s.queryid = p.top_queryid and s.bucket = p.bucket) AS top_query, + top_query, application_name, string_to_array(relations, ',') AS relations, cmd_type, @@ -157,17 +158,17 @@ CREATE VIEW pg_stat_monitor AS SELECT sqlcode, message, calls, - round( CAST(total_time as numeric), 4)::float8 as total_time, - round( CAST(min_time as numeric), 4)::float8 as min_time, - round( CAST(max_time as numeric), 4)::float8 as max_time, - round( CAST(mean_time as numeric), 4)::float8 as mean_time, - round( CAST(stddev_time as numeric), 4)::float8 as stddev_time, + total_time, + min_time, + max_time, + mean_time, + stddev_time, rows_retrieved, plans_calls, - round( CAST(plan_total_time as numeric), 4)::float8 as plan_total_time, - round( CAST(plan_min_time as numeric), 4)::float8 as plan_min_time, - round( CAST(plan_max_time as numeric), 4)::float8 as plan_max_time, - round( CAST(plan_mean_time as numeric), 4)::float8 as plan_mean_time, + plan_total_time, + plan_min_time, + plan_max_time, + plan_mean_time, shared_blks_hit, shared_blks_read, @@ -182,8 +183,8 @@ CREATE VIEW pg_stat_monitor AS SELECT blk_read_time, blk_write_time, (string_to_array(resp_calls, ',')) resp_calls, - round(cpu_user_time::numeric, 4) as cpu_user_time, - round(cpu_sys_time::numeric, 4) as cpu_sys_time, + cpu_user_time, + cpu_sys_time, wal_records, wal_fpi, wal_bytes, diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index f3c8eff..d6b6ddc 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -25,9 +25,11 @@ PG_MODULE_MAGIC; #define BUILD_VERSION "0.9.2-beta1" -#define PG_STAT_STATEMENTS_COLS 52 /* maximum of above */ +#define PG_STAT_STATEMENTS_COLS 53 /* maximum of above */ #define PGSM_TEXT_FILE "/tmp/pg_stat_monitor_query" +#define roundf(x,d) ((floor(((x)*pow(10,d))+.5))/pow(10,d)) + #define PGUNSIXBIT(val) (((val) & 0x3F) + '0') #define _snprintf(_str_dst, _str_src, _len, _max_len)\ @@ -1648,7 +1650,8 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, char parentid_txt[32]; pgssSharedState *pgss = pgsm_get_ss(); HTAB *pgss_hash = pgsm_get_hash(); - char *query_txt = (char*) malloc(PGSM_QUERY_MAX_LEN); + char *query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN); + char *parent_query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN); /* Safety check... */ if (!IsSystemInitialized()) @@ -1675,7 +1678,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "pg_stat_monitor: return type must be a row type"); - if (tupdesc->natts != 49) + if (tupdesc->natts != 50) elog(ERROR, "pg_stat_monitor: incorrect number of output arguments, required %d", tupdesc->natts); tupstore = tuplestore_begin_heap(true, false, work_mem); @@ -1714,6 +1717,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (query_entry == NULL) continue; + if (read_query(buf, bucketid, queryid, query_txt) == 0) { int len; @@ -1734,6 +1738,16 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (tmp.state == PGSS_FINISHED) continue; } + if (tmp.info.parentid != UINT64CONST(0)) + { + int len = 0; + if (read_query(buf, bucketid, tmp.info.parentid, parent_query_txt) == 0) + { + len = read_query_buffer(bucketid, tmp.info.parentid, parent_query_txt); + if (len != MAX_QUERY_BUFFER_BUCKET) + snprintf(parent_query_txt, 32, "%s", ""); + } + } /* bucketid at column number 0 */ values[i++] = Int64GetDatumFast(bucketid); @@ -1808,10 +1822,12 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, { snprintf(parentid_txt, 32, "%08lX",tmp.info.parentid); values[i++] = CStringGetTextDatum(parentid_txt); + values[i++] = CStringGetTextDatum(parent_query_txt); } else { nulls[i++] = true; + nulls[i++] = true; } /* application_name at column number 9 */ @@ -1880,23 +1896,23 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, values[i++] = Int64GetDatumFast(tmp.calls.calls); /* total_time at column number 17 */ - values[i++] = Float8GetDatumFast(tmp.time.total_time); + values[i++] = Float8GetDatumFast(roundf(tmp.time.total_time, 4)); /* min_time at column number 18 */ - values[i++] = Float8GetDatumFast(tmp.time.min_time); + values[i++] = Float8GetDatumFast(roundf(tmp.time.min_time,4)); /* max_time at column number 19 */ - values[i++] = Float8GetDatumFast(tmp.time.max_time); + values[i++] = Float8GetDatumFast(roundf(tmp.time.max_time,4)); /* mean_time at column number 20 */ - values[i++] = Float8GetDatumFast(tmp.time.mean_time); + values[i++] = Float8GetDatumFast(roundf(tmp.time.mean_time,4)); if (tmp.calls.calls > 1) stddev = sqrt(tmp.time.sum_var_time / tmp.calls.calls); else stddev = 0.0; /* calls at column number 21 */ - values[i++] = Float8GetDatumFast(stddev); + values[i++] = Float8GetDatumFast(roundf(stddev,4)); /* calls at column number 22 */ values[i++] = Int64GetDatumFast(tmp.calls.rows); @@ -1912,23 +1928,23 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, values[i++] = Int64GetDatumFast(tmp.plancalls.calls); /* total_time at column number 24 */ - values[i++] = Float8GetDatumFast(tmp.plantime.total_time); + values[i++] = Float8GetDatumFast(roundf(tmp.plantime.total_time,4)); /* min_time at column number 25 */ - values[i++] = Float8GetDatumFast(tmp.plantime.min_time); + values[i++] = Float8GetDatumFast(roundf(tmp.plantime.min_time,4)); /* max_time at column number 26 */ - values[i++] = Float8GetDatumFast(tmp.plantime.max_time); + values[i++] = Float8GetDatumFast(roundf(tmp.plantime.max_time,4)); /* mean_time at column number 27 */ - values[i++] = Float8GetDatumFast(tmp.plantime.mean_time); + values[i++] = Float8GetDatumFast(roundf(tmp.plantime.mean_time,4)); if (tmp.plancalls.calls > 1) stddev = sqrt(tmp.plantime.sum_var_time / tmp.plancalls.calls); else stddev = 0.0; /* calls at column number 28 */ - values[i++] = Float8GetDatumFast(stddev); + values[i++] = Float8GetDatumFast(roundf(stddev,4)); /* blocks are from column number 29 - 40 */ values[i++] = Int64GetDatumFast(tmp.blocks.shared_blks_hit); @@ -1948,10 +1964,10 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, values[i++] = IntArrayGetTextDatum(tmp.resp_calls, MAX_RESPONSE_BUCKET); /* utime at column number 42 */ - values[i++] = Float8GetDatumFast(tmp.sysinfo.utime); + values[i++] = Float8GetDatumFast(roundf(tmp.sysinfo.utime,4)); /* stime at column number 43 */ - values[i++] = Float8GetDatumFast(tmp.sysinfo.stime); + values[i++] = Float8GetDatumFast(roundf(tmp.sysinfo.stime,4)); { char buf[256]; Datum wal_bytes; @@ -1980,7 +1996,8 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, } tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - free(query_txt); + pfree(query_txt); + pfree(parent_query_txt); /* clean up and return the tuplestore */ LWLockRelease(pgss->lock); @@ -3018,7 +3035,6 @@ read_query(unsigned char *buf, uint64 bucketid, uint64 queryid, char * query) memcpy(&query_id, &buf[rlen], sizeof (uint64)); /* query id */ if (query_id == queryid) found = true; - rlen += sizeof (uint64); if (buf_len <= rlen) continue; From fc9e63049740042f9a8473f3caf22caeb9c08aed Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Wed, 6 Oct 2021 14:44:57 -0300 Subject: [PATCH 32/45] PG-254: Postpone variable initialization. There were many variables being initialized in pgss_store() before checking if the module was actually active, this would waste cpu processor if the module is disabled. To fix that, declare variables and initialize them only after check that pg_stat_monitor is active. --- pg_stat_monitor.c | 46 ++++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index d6b6ddc..f739ff3 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1486,37 +1486,44 @@ pgss_store(uint64 queryid, JumbleState *jstate, pgssStoreKind kind) { - pgssEntry *entry; + pgssEntry *entry; pgssSharedState *pgss = pgsm_get_ss(); - char application_name[APPLICATIONNAME_LEN]; - int application_name_len = pg_get_application_name(application_name); - bool reset = false; - uint64 bucketid; - uint64 prev_bucket_id; + char application_name[APPLICATIONNAME_LEN]; + int application_name_len; + bool reset = false; + uint64 bucketid; + uint64 prev_bucket_id; uint64 userid; int con; - uint64 dbid = MyDatabaseId; - uint64 ip = pg_get_client_addr(); - uint64 planid = plan_info ? plan_info->planid: 0; - uint64 appid = djb2_hash((unsigned char *)application_name, application_name_len); + uint64 dbid; + uint64 ip; + uint64 planid; + uint64 appid; char comments[512] = ""; - bool out_of_memory = false; + bool out_of_memory = false; + /* Monitoring is disabled */ if (!PGSM_ENABLED) return; - Assert(query != NULL); - if (kind == PGSS_ERROR) - GetUserIdAndSecContext((unsigned int *)&userid, &con); - else - userid = GetUserId(); - - extract_query_comments(query, comments, sizeof(comments)); - /* Safety check... */ if (!IsSystemInitialized() || !pgss_qbuf[pg_atomic_read_u64(&pgss->current_wbucket)]) return; + Assert(query != NULL); + if (kind == PGSS_ERROR) + GetUserIdAndSecContext((unsigned int *)&userid, &con); + else + userid = GetUserId(); + + dbid = MyDatabaseId; + application_name_len = pg_get_application_name(application_name); + ip = pg_get_client_addr(); + planid = plan_info ? plan_info->planid: 0; + appid = djb2_hash((unsigned char *)application_name, application_name_len); + + extract_query_comments(query, comments, sizeof(comments)); + prev_bucket_id = pg_atomic_read_u64(&pgss->current_wbucket); bucketid = get_next_wbucket(pgss); @@ -1717,7 +1724,6 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (query_entry == NULL) continue; - if (read_query(buf, bucketid, queryid, query_txt) == 0) { int len; From fcb70ffed1dea0921f1eb64c123afe6a1b5d61db Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Wed, 6 Oct 2021 14:57:15 -0300 Subject: [PATCH 33/45] PG-254: Removal of pgss_query_hash hash table. There was no necessity for using a separate hash table to keep track of queries as all the necessary information is already available in the pgss_hash table. The code that moves pending queries' text in hash_query_entry_dealloc was merged into hash_entry_dealloc. Couple functions were not necessary anymore, thus were removed: - hash_create_query_entry - pgss_store_query_info - pgss_get_entry (this logic was added directly into pgss_store). We simplified the logic in pgss_store, it basically works as follows: 1. Create a key (bucketid, queryid, etc...) for the query event. 2. Lookup the key in pgss_hash, if no entry exists for the key, create a new one, save the query text into the current query buffer. 3. If jstate == NULL, then update stats counters for the entry. --- hash_query.c | 278 ++++++++++++++++++---------------------------- pg_stat_monitor.c | 216 ++++++++++++----------------------- pg_stat_monitor.h | 2 +- 3 files changed, 179 insertions(+), 317 deletions(-) diff --git a/hash_query.c b/hash_query.c index 078fa47..bb684d8 100644 --- a/hash_query.c +++ b/hash_query.c @@ -22,7 +22,6 @@ static pgssSharedState *pgss; static HTAB *pgss_hash; -static HTAB *pgss_query_hash; static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size); /* @@ -55,7 +54,6 @@ pgss_startup(void) pgss = NULL; pgss_hash = NULL; - pgss_query_hash = NULL; /* * Create or attach to the shared memory state, including hash table @@ -85,7 +83,6 @@ pgss_startup(void) } pgss_hash = hash_init("pg_stat_monitor: bucket hashtable", sizeof(pgssHashKey), sizeof(pgssEntry), MAX_BUCKET_ENTRIES); - pgss_query_hash = hash_init("pg_stat_monitor: query hashtable", sizeof(pgssQueryHashKey), sizeof(pgssQueryEntry),MAX_BUCKET_ENTRIES); LWLockRelease(AddinShmemInitLock); @@ -169,146 +166,64 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding) elog(DEBUG1, "%s", "pg_stat_monitor: out of memory"); return entry; } + /* - * Reset all the entries. + * Prepare resources for using the new bucket: + * - Deallocate finished hash table entries in new_bucket_id (entries whose + * state is PGSS_FINISHED or PGSS_FINISHED). + * - Clear query buffer for new_bucket_id. + * - If old_bucket_id != -1, move all pending hash table entries in + * old_bucket_id to the new bucket id, also move pending queries from the + * previous query buffer (query_buffer[old_bucket_id]) to the new one + * (query_buffer[new_bucket_id]). * * Caller must hold an exclusive lock on pgss->lock. */ void -hash_query_entryies_reset() -{ - HASH_SEQ_STATUS hash_seq; - pgssQueryEntry *entry; - - hash_seq_init(&hash_seq, pgss_query_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) - entry = hash_search(pgss_query_hash, &entry->key, HASH_REMOVE, NULL); -} - - -/* - * Deallocate finished entries in new_bucket_id. - * - * Move all pending queries in query_buffer[old_bucket_id] to - * query_buffer[new_bucket_id]. - * - * Caller must hold an exclusive lock on pgss->lock. - */ -void -hash_query_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]) -{ - HASH_SEQ_STATUS hash_seq; - pgssQueryEntry *entry; - pgssSharedState *pgss = pgsm_get_ss(); - /* - * Store pending query ids from the previous bucket. - * If there are more pending queries than MAX_PENDING_QUERIES then - * we try to dynamically allocate memory for them. - */ -#define MAX_PENDING_QUERIES 128 - uint64 pending_query_ids[MAX_PENDING_QUERIES]; - uint64 *pending_query_ids_buf = NULL; - size_t n_pending_queries = 0; - bool out_of_memory = false; - - /* Clear all queries in the query buffer for the new bucket. */ - memset(query_buffer[new_bucket_id], 0, pgss->query_buf_size_bucket); - - hash_seq_init(&hash_seq, pgss_query_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) - { - /* Remove previous finished query entries matching new bucket id. */ - if (entry->key.bucket_id == new_bucket_id) - { - if (entry->state == PGSS_FINISHED || entry->state == PGSS_ERROR) - { - entry = hash_search(pgss_query_hash, &entry->key, HASH_REMOVE, NULL); - } - } - /* Set up a list of pending query ids from the previous bucket. */ - else if (entry->key.bucket_id == old_bucket_id && - (entry->state == PGSS_PARSE || - entry->state == PGSS_PLAN || - entry->state == PGSS_EXEC)) - { - if (n_pending_queries < MAX_PENDING_QUERIES) - { - pending_query_ids[n_pending_queries] = entry->key.queryid; - ++n_pending_queries; - } - else - { - /* - * No. of pending queries exceeds MAX_PENDING_QUERIES. - * Try to allocate memory from heap to keep track of pending query ids. - * If allocation fails we manually copy pending query to the next query buffer. - */ - if (!out_of_memory && !pending_query_ids_buf) - { - /* Allocate enough room for query ids. */ - pending_query_ids_buf = malloc(sizeof(uint64) * hash_get_num_entries(pgss_query_hash)); - if (pending_query_ids_buf != NULL) - memcpy(pending_query_ids_buf, pending_query_ids, n_pending_queries * sizeof(uint64)); - else - out_of_memory = true; - } - - if (!out_of_memory) - { - /* Store pending query id in the dynamic buffer. */ - pending_query_ids_buf[n_pending_queries] = entry->key.queryid; - ++n_pending_queries; - } - else - { - /* No memory, manually copy query from previous buffer. */ - char query_txt[1024]; - - if (read_query(query_buffer[old_bucket_id], old_bucket_id, entry->key.queryid, query_txt) != 0 - || read_query_buffer(old_bucket_id, entry->key.queryid, query_txt) == MAX_QUERY_BUFFER_BUCKET) - { - SaveQueryText(new_bucket_id, entry->key.queryid, query_buffer[new_bucket_id], query_txt, strlen(query_txt)); - } - else - /* There was no space available to store the pending query text. */ - elog(WARNING, "hash_query_entry_dealloc: Failed to move pending query %lX, %s", - entry->key.queryid, - (PGSM_OVERFLOW_TARGET == OVERFLOW_TARGET_NONE) ? - "insufficient shared space for query" : - "I/O error reading query from disk"); - } - } - } - } - - /* Copy all detected pending queries from previous bucket id to the new one. */ - if (n_pending_queries > 0) { - if (n_pending_queries < MAX_PENDING_QUERIES) - pending_query_ids_buf = pending_query_ids; - - copy_queries(query_buffer, new_bucket_id, old_bucket_id, pending_query_ids_buf, n_pending_queries); - } -} - -/* - * Deallocate least-used entries. - * - * If old_bucket_id != -1, move all pending queries in old_bucket_id - * to the new bucket id. - * - * Caller must hold an exclusive lock on pgss->lock. - */ -bool -hash_entry_dealloc(int new_bucket_id, int old_bucket_id) +hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]) { HASH_SEQ_STATUS hash_seq; pgssEntry *entry = NULL; - List *pending_entries = NIL; - ListCell *pending_entry; + pgssSharedState *pgss = pgsm_get_ss(); +#define MAX_PENDING_QUERIES 128 + /* + * Variables used to store pending queries from the previous bucket. + * + * We use a linked list to keep a full copy of entries from the hash table + * that must be moved to the new bucket. + * + * We use an array to keep a list of pending query IDs only, the array will + * be used in copy_queries() as a filter of which queries to copy. + * The reason we use a separate array to keep pending queries IDs is that it + * is faster to iterate than the linked list, as following pointers in a list + * almost always make bad use of cpu cache, while a small array of uint64 is + * a good candidate to be stored in L1 cache. + * + * If there are more pending queries than MAX_PENDING_QUERIES then + * we try to dynamically allocate memory for them. + */ + List *pending_entries = NIL; + ListCell *pending_entry; + uint64 pending_query_ids[MAX_PENDING_QUERIES]; + uint64 *pending_query_ids_buf = NULL; + size_t n_pending_queries = 0; + bool out_of_memory = false; + + if (new_bucket_id != -1) + { + /* Clear all queries in the query buffer for the new bucket. */ + memset(query_buffer[new_bucket_id], 0, pgss->query_buf_size_bucket); + } + + /* Iterate over the hash table. */ hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { + /* + * Remove all entries if new_bucket_id == -1. + * Otherwise remove entry in new_bucket_id if it's finished already. + */ if (new_bucket_id < 0 || (entry->key.bucket_id == new_bucket_id && (entry->counters.state == PGSS_FINISHED || entry->counters.state == PGSS_ERROR))) @@ -333,6 +248,7 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id) if (!bkp_entry) { /* No memory, remove pending query entry from the previous bucket. */ + elog(ERROR, "hash_entry_dealloc: out of memory"); entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); continue; } @@ -346,12 +262,63 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id) /* Add the entry to a list of nodes to be processed later. */ pending_entries = lappend(pending_entries, bkp_entry); + /* Add pending query ID to the array. */ + if (n_pending_queries < MAX_PENDING_QUERIES) + { + pending_query_ids[n_pending_queries] = entry->key.queryid; + ++n_pending_queries; + } + else + { + /* + * No. of pending queries exceeds MAX_PENDING_QUERIES. + * Try to dynamically allocate memory to keep track of pending query ids. + * If allocation fails we manually copy pending query to the next query buffer. + */ + if (!out_of_memory && !pending_query_ids_buf) + { + /* Allocate enough room for query ids. */ + pending_query_ids_buf = malloc(sizeof(uint64) * hash_get_num_entries(pgss_hash)); + if (pending_query_ids_buf != NULL) + memcpy(pending_query_ids_buf, pending_query_ids, n_pending_queries * sizeof(uint64)); + else + out_of_memory = true; + } + + if (!out_of_memory) + { + /* Store pending query id in the dynamic buffer. */ + pending_query_ids_buf[n_pending_queries] = entry->key.queryid; + ++n_pending_queries; + } + else + { + /* No memory, manually copy query from previous buffer. */ + char query_txt[1024]; + + if (read_query(query_buffer[old_bucket_id], old_bucket_id, entry->key.queryid, query_txt) != 0 + || read_query_buffer(old_bucket_id, entry->key.queryid, query_txt) == MAX_QUERY_BUFFER_BUCKET) + { + SaveQueryText(new_bucket_id, entry->key.queryid, query_buffer[new_bucket_id], query_txt, strlen(query_txt)); + } + else + /* There was no space available to store the pending query text. */ + elog(ERROR, "hash_entry_dealloc: Failed to move pending query %lX, %s", + entry->key.queryid, + (PGSM_OVERFLOW_TARGET == OVERFLOW_TARGET_NONE) ? + "insufficient shared space for query" : + "I/O error reading query from disk"); + } + } + /* Finally remove the pending query from the expired bucket id. */ entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); } } } + Assert(list_length(pending_entries) == n_pending_queries); + /* * Iterate over the list of pending queries in order * to add them back to the hash table with the updated bucket id. @@ -375,9 +342,15 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id) free(old_entry); } - list_free(pending_entries); + /* Copy all detected pending queries from previous bucket id to the new one. */ + if (n_pending_queries > 0) { + if (n_pending_queries < MAX_PENDING_QUERIES) + pending_query_ids_buf = pending_query_ids; - return true; + copy_queries(query_buffer, new_bucket_id, old_bucket_id, pending_query_ids_buf, n_pending_queries); + } + + list_free(pending_entries); } /* @@ -401,45 +374,6 @@ hash_entry_reset() LWLockRelease(pgss->lock); } -/* Caller must acquire a lock */ -pgssQueryEntry* -hash_create_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid) -{ - pgssQueryHashKey key; - pgssQueryEntry *entry; - bool found; - - key.queryid = queryid; - key.bucket_id = bucket_id; - key.dbid = dbid; - key.userid = userid; - key.ip = ip; - key.appid = appid; - - entry = (pgssQueryEntry *) hash_search(pgss_query_hash, &key, HASH_ENTER_NULL, &found); - return entry; -} - -/* Caller must acquire a lock */ -pgssQueryEntry* -hash_find_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid) -{ - pgssQueryHashKey key; - pgssQueryEntry *entry; - bool found; - - key.queryid = queryid; - key.bucket_id = bucket_id; - key.dbid = dbid; - key.userid = userid; - key.ip = ip; - key.appid = appid; - - /* Lookup the hash table entry with shared lock. */ - entry = (pgssQueryEntry *) hash_search(pgss_query_hash, &key, HASH_FIND, &found); - return entry; -} - bool IsHashInitialize(void) { diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index f739ff3..32334b8 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -147,15 +147,6 @@ static uint64 pgss_hash_string(const char *str, int len); char *unpack_sql_state(int sql_state); static void pgss_store_error(uint64 queryid, const char * query, ErrorData *edata); -static pgssQueryEntry *pgss_store_query_info(uint64 bucketid, - uint64 queryid, - uint64 dbid, - uint64 userid, - uint64 ip, - uint64 appid, - const char *query, - uint64 query_len, - pgssStoreKind kind); static void pgss_store_utility(const char *query, double total_time, @@ -1314,40 +1305,6 @@ pgss_update_entry(pgssEntry *entry, } } -static pgssEntry* -pgss_get_entry(uint64 bucket_id, - uint64 userid, - uint64 dbid, - uint64 queryid, - uint64 ip, - uint64 planid, - uint64 appid) -{ - pgssEntry *entry; - pgssHashKey key; - HTAB *pgss_hash = pgsm_get_hash(); - pgssSharedState *pgss = pgsm_get_ss(); - - key.bucket_id = bucket_id; - key.userid = userid; - key.dbid = MyDatabaseId; - key.queryid = queryid; - key.ip = pg_get_client_addr(); - key.planid = planid; - key.appid = appid; - - entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL); - if(!entry) - { - /* OK to create a new hashtable entry */ - entry = hash_entry_alloc(pgss, &key, GetDatabaseEncoding()); - if (entry == NULL) - return NULL; - } - Assert(entry); - return entry; -} - static void pgss_store_query(uint64 queryid, const char * query, @@ -1486,6 +1443,8 @@ pgss_store(uint64 queryid, JumbleState *jstate, pgssStoreKind kind) { + HTAB *pgss_hash; + pgssHashKey key; pgssEntry *entry; pgssSharedState *pgss = pgsm_get_ss(); char application_name[APPLICATIONNAME_LEN]; @@ -1495,12 +1454,10 @@ pgss_store(uint64 queryid, uint64 prev_bucket_id; uint64 userid; int con; - uint64 dbid; - uint64 ip; uint64 planid; uint64 appid; char comments[512] = ""; - bool out_of_memory = false; + size_t query_len; /* Monitoring is disabled */ if (!PGSM_ENABLED) @@ -1516,9 +1473,7 @@ pgss_store(uint64 queryid, else userid = GetUserId(); - dbid = MyDatabaseId; application_name_len = pg_get_application_name(application_name); - ip = pg_get_client_addr(); planid = plan_info ? plan_info->planid: 0; appid = djb2_hash((unsigned char *)application_name, application_name_len); @@ -1530,63 +1485,74 @@ pgss_store(uint64 queryid, if (bucketid != prev_bucket_id) reset = true; - LWLockAcquire(pgss->lock, LW_EXCLUSIVE); + key.bucket_id = bucketid; + key.userid = userid; + key.dbid = MyDatabaseId; + key.queryid = queryid; + key.ip = pg_get_client_addr(); + key.planid = planid; + key.appid = appid; - switch (kind) + pgss_hash = pgsm_get_hash(); + + LWLockAcquire(pgss->lock, LW_SHARED); + + entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL); + if (!entry) { - case PGSS_PARSE: - case PGSS_PLAN: - { - pgssQueryEntry *query_entry; - query_entry = pgss_store_query_info(bucketid, queryid, dbid, userid, ip, appid, query, strlen(query), kind); - if (query_entry == NULL) - out_of_memory = true; - break; - } - case PGSS_ERROR: - case PGSS_EXEC: - case PGSS_FINISHED: - { - pgssQueryEntry *query_entry; - query_entry = pgss_store_query_info(bucketid, queryid, dbid, userid, ip, appid, query, strlen(query), kind); - if (query_entry == NULL) - { - out_of_memory = true; - break; - } - query_entry->state = kind; - entry = pgss_get_entry(bucketid, userid, dbid, queryid, ip, planid, appid); - if (entry == NULL) - { - out_of_memory = true; - break; - } + uint64 prev_qbuf_len; - if (jstate == NULL) - pgss_update_entry(entry, /* entry */ - bucketid, /* bucketid */ - queryid, /* queryid */ - query, /* query */ - comments, /* comments */ - plan_info, /* PlanInfo */ - cmd_type, /* CmdType */ - sys_info, /* SysInfo */ - error_info, /* ErrorInfo */ - total_time, /* total_time */ - rows, /* rows */ - bufusage, /* bufusage */ - walusage, /* walusage */ - reset, /* reset */ - kind); /* kind */ + query_len = strlen(query); + if (query_len > PGSM_QUERY_MAX_LEN) + query_len = PGSM_QUERY_MAX_LEN; + + /* Need exclusive lock to make a new hashtable entry - promote */ + LWLockRelease(pgss->lock); + LWLockAcquire(pgss->lock, LW_EXCLUSIVE); + + /* + * Save current query buffer length, if we fail to add a new + * new entry to the hash table then we must restore the + * original length. + */ + memcpy(&prev_qbuf_len, pgss_qbuf[bucketid], sizeof(prev_qbuf_len)); + if (!SaveQueryText(bucketid, queryid, pgss_qbuf[bucketid], query, query_len)) + { + LWLockRelease(pgss->lock); + elog(DEBUG1, "pg_stat_monitor: insufficient shared space for query."); + return; + } + + /* OK to create a new hashtable entry */ + entry = hash_entry_alloc(pgss, &key, GetDatabaseEncoding()); + if (entry == NULL) + { + /* Restore previous query buffer length. */ + memcpy(pgss_qbuf[bucketid], &prev_qbuf_len, sizeof(prev_qbuf_len)); + LWLockRelease(pgss->lock); + elog(DEBUG1, "pg_stat_monitor: out of memory"); + return; } - break; - case PGSS_NUMKIND: - case PGSS_INVALID: - break; } + + if (jstate == NULL) + pgss_update_entry(entry, /* entry */ + bucketid, /* bucketid */ + queryid, /* queryid */ + query, /* query */ + comments, /* comments */ + plan_info, /* PlanInfo */ + cmd_type, /* CmdType */ + sys_info, /* SysInfo */ + error_info, /* ErrorInfo */ + total_time, /* total_time */ + rows, /* rows */ + bufusage, /* bufusage */ + walusage, /* walusage */ + reset, /* reset */ + kind); /* kind */ + LWLockRelease(pgss->lock); - if (out_of_memory) - elog(DEBUG1, "pg_stat_monitor: out of memory"); } /* * Reset all statement statistics. @@ -1601,8 +1567,12 @@ pg_stat_monitor_reset(PG_FUNCTION_ARGS) (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - hash_entry_dealloc(-1, -1); - hash_query_entryies_reset(); + hash_entry_dealloc(-1, -1, NULL); + /* Reset query buffers. */ + for (size_t i = 0; i < MAX_BUCKETS; ++i) + { + *(uint64 *)pgss_qbuf[i] = 0; + } #ifdef BENCHMARK for (int i = STATS_START; i < STATS_END; ++i) { pg_hook_stats[i].min_time = 0; @@ -1653,7 +1623,6 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, MemoryContext oldcontext; HASH_SEQ_STATUS hash_seq; pgssEntry *entry; - pgssQueryEntry *query_entry; char parentid_txt[32]; pgssSharedState *pgss = pgsm_get_ss(); HTAB *pgss_hash = pgsm_get_hash(); @@ -1713,16 +1682,12 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, uint64 userid = entry->key.userid; uint64 ip = entry->key.ip; uint64 planid = entry->key.planid; - uint64 appid = entry->key.appid; unsigned char *buf = pgss_qbuf[bucketid]; #if PG_VERSION_NUM < 140000 bool is_allowed_role = is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS); #else bool is_allowed_role = is_member_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS); #endif - query_entry = hash_find_query_entry(bucketid, queryid, dbid, userid, ip, appid); - if (query_entry == NULL) - continue; if (read_query(buf, bucketid, queryid, query_txt) == 0) { @@ -2063,8 +2028,7 @@ get_next_wbucket(pgssSharedState *pgss) prev_bucket_id = pg_atomic_exchange_u64(&pgss->current_wbucket, new_bucket_id); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - hash_entry_dealloc(new_bucket_id, prev_bucket_id); - hash_query_entry_dealloc(new_bucket_id, prev_bucket_id, pgss_qbuf); + hash_entry_dealloc(new_bucket_id, prev_bucket_id, pgss_qbuf); snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, (int)new_bucket_id); unlink(file_name); @@ -3069,42 +3033,6 @@ exit: return 0; } -static pgssQueryEntry* -pgss_store_query_info(uint64 bucketid, - uint64 queryid, - uint64 dbid, - uint64 userid, - uint64 ip, - uint64 appid, - const char *query, - uint64 query_len, - pgssStoreKind kind) -{ - pgssSharedState *pgss = pgsm_get_ss(); - unsigned char *buf = pgss_qbuf[pg_atomic_read_u64(&pgss->current_wbucket)]; - pgssQueryEntry *entry; - - if (query_len > PGSM_QUERY_MAX_LEN) - query_len = PGSM_QUERY_MAX_LEN; - - /* Already have query in the shared buffer, there - * is no need to add that again. - */ - entry = hash_find_query_entry(bucketid, queryid, dbid, userid, ip, appid); - if (entry) - return entry; - - entry = hash_create_query_entry(bucketid, queryid, dbid, userid, ip, appid); - if (!entry) - return NULL; - entry->state = kind; - - if(!SaveQueryText(bucketid, queryid, buf, query, query_len)) - return NULL; - - return entry; -} - bool SaveQueryText(uint64 bucketid, uint64 queryid, unsigned char *buf, const char *query, uint64 query_len) { diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 94a0f83..2eab42b 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -380,7 +380,7 @@ void hash_entry_reset(void); void hash_query_entryies_reset(void); void hash_query_entries(); void hash_query_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]); -bool hash_entry_dealloc(int new_bucket_id, int old_bucket_id); +void hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]); pgssEntry* hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding); Size hash_memsize(void); From f986ac009620cea0924cfcfa2ca9c8b191fd1548 Mon Sep 17 00:00:00 2001 From: Anastasia Alexadrova Date: Thu, 7 Oct 2021 15:11:25 +0300 Subject: [PATCH 34/45] PG-255 Doc - Fix README title and TOC modified: README.md --- README.md | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index cae7861..1dcee35 100644 --- a/README.md +++ b/README.md @@ -3,16 +3,16 @@ ![pg13-test](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) ![pg14-test](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) -# pg_stat_monitor: Query Performance Tool for PostgreSQL +# pg_stat_monitor: Query Performance Monitoring Tool for PostgreSQL ## Table of Contents * [Overview](#overview) -* [pg_stat_monitor features](#pg_stat_monitor-features) -* [Documentation](#documentation) * [Supported versions](#supported-versions) +* [Features](#features) +* [Documentation](#documentation) * [Supported platforms](#supported-platforms) -* [Installation](#installation) +* [Installation guidelines](#installation-guidelines) * [Configuration](#configuration) * [Setup](#setup) * [Building from source code](#building-from-source) @@ -38,8 +38,17 @@ The RPM (for RHEL and CentOS) and the DEB (for Debian and Ubuntu) packages are a The RPM packages are also available in the official PostgreSQL (PGDG) yum repositories. +### Supported versions -### pg_stat_monitor features +The `pg_stat_monitor` should work on the latest version of both [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL, but is only tested with these versions: + +| **Distribution** | **Version** | **Provider** | +| ---------------- | --------------- | ------------ | +|[Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution)| [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/) and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/)| Percona| +| PostgreSQL | 11, 12, and 13 | PostgreSQL Global Development Group (PGDG) | + + +### Features `pg_stat_monitor` simplifies query observability by providing a more holistic view of query from performance, application and analysis perspectives. This is achieved by grouping data in configurable time buckets that allow capturing of load and performance information for smaller time windows. So performance issues and patterns can be identified based on time and workload. @@ -61,16 +70,6 @@ The RPM packages are also available in the official PostgreSQL (PGDG) yum reposi 5. Contributing guide (https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) -### Supported versions - -The `pg_stat_monitor` should work on the latest version of both [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL, but is only tested with these versions: - -| **Distribution** | **Version** | **Provider** | -| ---------------- | --------------- | ------------ | -|[Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution)| [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/) and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/)| Percona| -| PostgreSQL | 11, 12, and 13 | PostgreSQL Global Development Group (PGDG) | - - ### Supported platforms The PostgreSQL YUM repository supports `pg_stat_monitor` for all [supported versions](#supported-versions) for the following platforms: @@ -81,7 +80,7 @@ The PostgreSQL YUM repository supports `pg_stat_monitor` for all [supported vers Find the list of supported platforms for `pg_stat_monitor` within [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) on the [Percona Release Lifecycle Overview](https://www.percona.com/services/policies/percona-software-support-lifecycle#pgsql) page. -### Installation +### Installation Guidelines You can install `pg_stat_monitor` from the following sources: From 8fdf0946fefce265878b05a0c931f98cf2e10d94 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Thu, 7 Oct 2021 10:06:20 -0300 Subject: [PATCH 35/45] PG-254: Add query location to hash table entries. Whenever a new query is added to a query buffer, record the position in which the query was inserted, we can then use this information to locate the query in a faster way later on when required. This allowed to simplify the logic in hash_entry_dealloc(), after creating the list of pending queries, as the list is scanned we can copy the query from the previous query buffer to the new one by using the query position (query_pos), this avoids scanning the whole query buffer when looking up for the queryid. Also, when moving a query to a new buffer (copy_query), we update the query_pos for the hash table entry, so it points to the right place in the new query buffer. read_query() function was updated to allow query position to be passed as argument, if pos != 0 use it to locate the query directly, otherwise fallback to the previous mode of scanning the whole buffer. SaveQueryText() was updated to pass a reference to the query position as argument, this value is updated after the function finishes with the position that the query was stored into the buffer. --- hash_query.c | 170 ++++++++++++---------------------------------- pg_stat_monitor.c | 62 +++++++++++++---- pg_stat_monitor.h | 10 ++- 3 files changed, 98 insertions(+), 144 deletions(-) diff --git a/hash_query.c b/hash_query.c index bb684d8..29d0cc8 100644 --- a/hash_query.c +++ b/hash_query.c @@ -25,14 +25,16 @@ static HTAB *pgss_hash; static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size); /* - * Copy all queries from query_buffer[old_bucket_id] to query_buffer[new_bucket_id] - * whose query ids are found in the array 'query_ids', of length 'n_queries'. + * Copy query from src_buffer to dst_buff. + * Use query_id and query_pos to fast locate query in source buffer. + * Store updated query position in the destination buffer into param query_pos. */ -static void copy_queries(unsigned char *query_buffer[], - uint64 new_bucket_id, - uint64 old_bucket_id, - uint64 *query_ids, - size_t n_queries); +static bool copy_query(uint64 bucket_id, + uint64 query_id, + uint64 query_pos, + unsigned char *dst_buf, + unsigned char *src_buf, + size_t *new_query_pos); static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size) @@ -186,29 +188,9 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu pgssEntry *entry = NULL; pgssSharedState *pgss = pgsm_get_ss(); -#define MAX_PENDING_QUERIES 128 - /* - * Variables used to store pending queries from the previous bucket. - * - * We use a linked list to keep a full copy of entries from the hash table - * that must be moved to the new bucket. - * - * We use an array to keep a list of pending query IDs only, the array will - * be used in copy_queries() as a filter of which queries to copy. - * The reason we use a separate array to keep pending queries IDs is that it - * is faster to iterate than the linked list, as following pointers in a list - * almost always make bad use of cpu cache, while a small array of uint64 is - * a good candidate to be stored in L1 cache. - * - * If there are more pending queries than MAX_PENDING_QUERIES then - * we try to dynamically allocate memory for them. - */ + /* Store pending query ids from the previous bucket. */ List *pending_entries = NIL; ListCell *pending_entry; - uint64 pending_query_ids[MAX_PENDING_QUERIES]; - uint64 *pending_query_ids_buf = NULL; - size_t n_pending_queries = 0; - bool out_of_memory = false; if (new_bucket_id != -1) { @@ -222,7 +204,7 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu { /* * Remove all entries if new_bucket_id == -1. - * Otherwise remove entry in new_bucket_id if it's finished already. + * Otherwise remove entry in new_bucket_id if it has finished already. */ if (new_bucket_id < 0 || (entry->key.bucket_id == new_bucket_id && @@ -262,63 +244,12 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu /* Add the entry to a list of nodes to be processed later. */ pending_entries = lappend(pending_entries, bkp_entry); - /* Add pending query ID to the array. */ - if (n_pending_queries < MAX_PENDING_QUERIES) - { - pending_query_ids[n_pending_queries] = entry->key.queryid; - ++n_pending_queries; - } - else - { - /* - * No. of pending queries exceeds MAX_PENDING_QUERIES. - * Try to dynamically allocate memory to keep track of pending query ids. - * If allocation fails we manually copy pending query to the next query buffer. - */ - if (!out_of_memory && !pending_query_ids_buf) - { - /* Allocate enough room for query ids. */ - pending_query_ids_buf = malloc(sizeof(uint64) * hash_get_num_entries(pgss_hash)); - if (pending_query_ids_buf != NULL) - memcpy(pending_query_ids_buf, pending_query_ids, n_pending_queries * sizeof(uint64)); - else - out_of_memory = true; - } - - if (!out_of_memory) - { - /* Store pending query id in the dynamic buffer. */ - pending_query_ids_buf[n_pending_queries] = entry->key.queryid; - ++n_pending_queries; - } - else - { - /* No memory, manually copy query from previous buffer. */ - char query_txt[1024]; - - if (read_query(query_buffer[old_bucket_id], old_bucket_id, entry->key.queryid, query_txt) != 0 - || read_query_buffer(old_bucket_id, entry->key.queryid, query_txt) == MAX_QUERY_BUFFER_BUCKET) - { - SaveQueryText(new_bucket_id, entry->key.queryid, query_buffer[new_bucket_id], query_txt, strlen(query_txt)); - } - else - /* There was no space available to store the pending query text. */ - elog(ERROR, "hash_entry_dealloc: Failed to move pending query %lX, %s", - entry->key.queryid, - (PGSM_OVERFLOW_TARGET == OVERFLOW_TARGET_NONE) ? - "insufficient shared space for query" : - "I/O error reading query from disk"); - } - } - /* Finally remove the pending query from the expired bucket id. */ entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); } } } - Assert(list_length(pending_entries) == n_pending_queries); - /* * Iterate over the list of pending queries in order * to add them back to the hash table with the updated bucket id. @@ -337,19 +268,18 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu new_entry->counters = old_entry->counters; SpinLockInit(&new_entry->mutex); new_entry->encoding = old_entry->encoding; + /* copy query's text from previous bucket to the new one. */ + copy_query(new_bucket_id, + new_entry->key.queryid, /* query id */ + old_entry->query_pos, /* query position in buffer */ + query_buffer[new_bucket_id], /* destination query buffer */ + query_buffer[old_bucket_id], /* source query buffer */ + &new_entry->query_pos); /* position in which query was inserted into destination buffer */ } free(old_entry); } - /* Copy all detected pending queries from previous bucket id to the new one. */ - if (n_pending_queries > 0) { - if (n_pending_queries < MAX_PENDING_QUERIES) - pending_query_ids_buf = pending_query_ids; - - copy_queries(query_buffer, new_bucket_id, old_bucket_id, pending_query_ids_buf, n_pending_queries); - } - list_free(pending_entries); } @@ -381,52 +311,38 @@ IsHashInitialize(void) pgss_hash != NULL); } -static void copy_queries(unsigned char *query_buffer[], - uint64 new_bucket_id, - uint64 old_bucket_id, - uint64 *query_ids, - size_t n_queries) +static bool copy_query(uint64 bucket_id, + uint64 query_id, + uint64 query_pos, + unsigned char *dst_buf, + unsigned char *src_buf, + size_t *new_query_pos) { - bool found; - uint64 query_id = 0; - uint64 query_len = 0; - uint64 rlen = 0; - uint64 buf_len = 0; - unsigned char *src_buffer = query_buffer[old_bucket_id]; - size_t i; + uint64 query_len = 0; + uint64 buf_len = 0; - memcpy(&buf_len, src_buffer, sizeof (uint64)); + memcpy(&buf_len, src_buf, sizeof (uint64)); if (buf_len <= 0) - return; + return false; - rlen = sizeof (uint64); /* Move forwad to skip length bytes */ - while (rlen < buf_len) + /* Try to locate the query directly. */ + if (query_pos != 0 && (query_pos + sizeof(uint64) + sizeof(uint64)) < buf_len) { - found = false; - memcpy(&query_id, &src_buffer[rlen], sizeof (uint64)); /* query id */ - for (i = 0; i < n_queries; ++i) - { - if (query_id == query_ids[i]) - { - found = true; - break; - } - } + if (*(uint64 *)&src_buf[query_pos] != query_id) + return false; - rlen += sizeof (uint64); - if (buf_len <= rlen) - break; + query_pos += sizeof(uint64); - memcpy(&query_len, &src_buffer[rlen], sizeof (uint64)); /* query len */ - rlen += sizeof (uint64); - if (buf_len < rlen + query_len) - break; + memcpy(&query_len, &src_buf[query_pos], sizeof(uint64)); /* query len */ + query_pos += sizeof(uint64); - if (found) { - SaveQueryText(new_bucket_id, query_id, query_buffer[new_bucket_id], - (const char *)&src_buffer[rlen], query_len); - } + if (query_pos + query_len > buf_len) /* avoid reading past buffer's length. */ + return false; - rlen += query_len; + return SaveQueryText(bucket_id, query_id, dst_buf, + (const char *)&src_buf[query_pos], + query_len, new_query_pos); } -} \ No newline at end of file + + return false; +} diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 32334b8..ee63ba7 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1501,6 +1501,8 @@ pgss_store(uint64 queryid, if (!entry) { uint64 prev_qbuf_len; + /* position in which the query's text was inserted into the query buffer. */ + size_t qpos = 0; query_len = strlen(query); if (query_len > PGSM_QUERY_MAX_LEN) @@ -1516,7 +1518,7 @@ pgss_store(uint64 queryid, * original length. */ memcpy(&prev_qbuf_len, pgss_qbuf[bucketid], sizeof(prev_qbuf_len)); - if (!SaveQueryText(bucketid, queryid, pgss_qbuf[bucketid], query, query_len)) + if (!SaveQueryText(bucketid, queryid, pgss_qbuf[bucketid], query, query_len, &qpos)) { LWLockRelease(pgss->lock); elog(DEBUG1, "pg_stat_monitor: insufficient shared space for query."); @@ -1533,6 +1535,7 @@ pgss_store(uint64 queryid, elog(DEBUG1, "pg_stat_monitor: out of memory"); return; } + entry->query_pos = qpos; } if (jstate == NULL) @@ -1689,7 +1692,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, bool is_allowed_role = is_member_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS); #endif - if (read_query(buf, bucketid, queryid, query_txt) == 0) + if (read_query(buf, queryid, query_txt, entry->query_pos) == 0) { int len; len = read_query_buffer(bucketid, queryid, query_txt); @@ -1709,16 +1712,16 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (tmp.state == PGSS_FINISHED) continue; } - if (tmp.info.parentid != UINT64CONST(0)) - { - int len = 0; - if (read_query(buf, bucketid, tmp.info.parentid, parent_query_txt) == 0) - { - len = read_query_buffer(bucketid, tmp.info.parentid, parent_query_txt); - if (len != MAX_QUERY_BUFFER_BUCKET) - snprintf(parent_query_txt, 32, "%s", ""); - } - } + if (tmp.info.parentid != UINT64CONST(0)) + { + int len = 0; + if (read_query(buf, tmp.info.parentid, parent_query_txt, 0) == 0) + { + len = read_query_buffer(bucketid, tmp.info.parentid, parent_query_txt); + if (len != MAX_QUERY_BUFFER_BUCKET) + snprintf(parent_query_txt, 32, "%s", ""); + } + } /* bucketid at column number 0 */ values[i++] = Int64GetDatumFast(bucketid); @@ -2984,7 +2987,7 @@ intarray_get_datum(int32 arr[], int len) } uint64 -read_query(unsigned char *buf, uint64 bucketid, uint64 queryid, char * query) +read_query(unsigned char *buf, uint64 queryid, char * query, size_t pos) { bool found = false; uint64 query_id = 0; @@ -2996,6 +2999,27 @@ read_query(unsigned char *buf, uint64 bucketid, uint64 queryid, char * query) if (buf_len <= 0) goto exit; + /* If a position hint is given, try to locate the query directly. */ + if (pos != 0 && (pos + sizeof(uint64) + sizeof(uint64)) < buf_len) + { + memcpy(&query_id, &buf[pos], sizeof(uint64)); + if (query_id != queryid) + return 0; + + pos += sizeof(uint64); + + memcpy(&query_len, &buf[pos], sizeof(uint64)); /* query len */ + pos += sizeof(uint64); + + if (pos + query_len > buf_len) /* avoid reading past buffer's length. */ + return 0; + + memcpy(query, &buf[pos], query_len); /* Actual query */ + query[query_len] = '\0'; + + return queryid; + } + rlen = sizeof (uint64); /* Move forwad to skip length bytes */ for(;;) { @@ -3005,6 +3029,7 @@ read_query(unsigned char *buf, uint64 bucketid, uint64 queryid, char * query) memcpy(&query_id, &buf[rlen], sizeof (uint64)); /* query id */ if (query_id == queryid) found = true; + rlen += sizeof (uint64); if (buf_len <= rlen) continue; @@ -3034,7 +3059,12 @@ exit: } bool -SaveQueryText(uint64 bucketid, uint64 queryid, unsigned char *buf, const char *query, uint64 query_len) +SaveQueryText(uint64 bucketid, + uint64 queryid, + unsigned char *buf, + const char *query, + uint64 query_len, + size_t *query_pos) { uint64 buf_len = 0; @@ -3059,6 +3089,8 @@ SaveQueryText(uint64 bucketid, uint64 queryid, unsigned char *buf, const char *q } } + *query_pos = buf_len; + memcpy(&buf[buf_len], &queryid, sizeof (uint64)); /* query id */ buf_len += sizeof (uint64); @@ -3293,7 +3325,7 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt) break; } off += buf_len; - if (read_query(buf, bucket_id, queryid, query_txt)) + if (read_query(buf, queryid, query_txt, 0)) break; } if (fd > 0) diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 2eab42b..2ddfe01 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -294,6 +294,7 @@ typedef struct pgssEntry Counters counters; /* the statistics for this query */ int encoding; /* query text encoding */ slock_t mutex; /* protects the counters only */ + size_t query_pos; /* query location within query buffer */ } pgssEntry; /* @@ -361,7 +362,12 @@ typedef struct JumbleState /* Links to shared memory state */ -bool SaveQueryText(uint64 bucketid, uint64 queryid, unsigned char *buf, const char *query, uint64 query_len); +bool SaveQueryText(uint64 bucketid, + uint64 queryid, + unsigned char *buf, + const char *query, + uint64 query_len, + size_t *query_pos); /* guc.c */ void init_guc(void); @@ -385,7 +391,7 @@ pgssEntry* hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encodin Size hash_memsize(void); int read_query_buffer(int bucket_id, uint64 queryid, char *query_txt); -uint64 read_query(unsigned char *buf, uint64 bucketid, uint64 queryid, char * query); +uint64 read_query(unsigned char *buf, uint64 queryid, char * query, size_t pos); pgssQueryEntry* hash_find_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid); pgssQueryEntry* hash_create_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid); void pgss_startup(void); From 32a13f8462a94efefc8a073ac6aad0a5486caac5 Mon Sep 17 00:00:00 2001 From: Naeem Akhter Date: Wed, 13 Oct 2021 22:00:41 +0500 Subject: [PATCH 36/45] PG-258: Re-enable Coverall code coverage to pg_stat_monitor. As codecov is not approved by IT due to some known vulnerabilities in the past, reverting back to coverall.io to make sure that at least code coverage is available. --- .github/workflows/coverage_test.yml | 97 +++++++++++++++++++++++++++++ README.md | 2 + 2 files changed, 99 insertions(+) create mode 100644 .github/workflows/coverage_test.yml diff --git a/.github/workflows/coverage_test.yml b/.github/workflows/coverage_test.yml new file mode 100644 index 0000000..41429d9 --- /dev/null +++ b/.github/workflows/coverage_test.yml @@ -0,0 +1,97 @@ +name: coverage-test +on: ["push", "pull_request"] + +jobs: + build: + name: coverage-test + runs-on: ubuntu-latest + steps: + - name: Clone postgres repository + uses: actions/checkout@v2 + with: + repository: 'postgres/postgres' + ref: 'REL_13_STABLE' + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + ref: 'master' + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y docbook-xsl docbook-xsl + sudo apt-get install -y libxml2 libxml2-utils libxml2-dev libxslt-dev xsltproc libkrb5-dev libldap2-dev libsystemd-dev gettext tcl-dev libperl-dev + sudo apt-get install -y pkg-config clang-9 llvm-9 llvm-9-dev libselinux1-dev python-dev python3-dev uuid-dev liblz4-dev lcov + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + - name: Create pgsql dir + run: mkdir -p /opt/pgsql + + - name: Build postgres + run: | + export PATH="/opt/pgsql/bin:$PATH" + ./configure '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=${prefix}/include' \ + '--enable-coverage' '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' \ + '--sysconfdir=/etc' '--localstatedir=/var' '--disable-silent-rules' \ + '--libdir=${prefix}/lib/x86_64-linux-gnu' \ + '--libexecdir=${prefix}/lib/x86_64-linux-gnu' '--disable-maintainer-mode' \ + '--disable-dependency-tracking' '--with-icu' '--with-tcl' '--with-perl' \ + '--with-python' '--with-pam' '--with-openssl' '--with-libxml' '--with-libxslt' \ + 'PYTHON=/usr/bin/python3' '--mandir=/usr/share/postgresql/13/man' \ + '--docdir=/usr/share/doc/postgresql-doc-13' \ + '--sysconfdir=/etc/postgresql-common' '--datarootdir=/usr/share/' \ + '--datadir=/usr/share/postgresql/13' '--bindir=/usr/lib/postgresql/13/bin' \ + '--libdir=/usr/lib/x86_64-linux-gnu/' '--libexecdir=/usr/lib/postgresql/' \ + '--includedir=/usr/include/postgresql/' '--with-extra-version= (Ubuntu 2:13-x.focal)' \ + '--enable-nls' '--enable-thread-safety' '--enable-tap-tests' '--enable-debug' \ + '--enable-dtrace' '--disable-rpath' '--with-uuid=e2fs' '--with-gnu-ld' \ + '--with-pgport=5432' '--with-system-tzdata=/usr/share/zoneinfo' '--with-llvm' \ + 'LLVM_CONFIG=/usr/bin/llvm-config-11' 'CLANG=/usr/bin/clang-11' \ + '--with-systemd' '--with-selinux' 'MKDIR_P=/bin/mkdir -p' 'PROVE=/usr/bin/prove' \ + 'TAR=/bin/tar' 'XSLTPROC=xsltproc --nonet' 'CFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security -fno-omit-frame-pointer' \ + 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now' '--with-gssapi' '--with-ldap' \ + 'build_alias=x86_64-linux-gnu' 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' \ + 'CXXFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security' + make + sudo make install + + - name: Start postgresql cluster + run: | + /usr/lib/postgresql/13/bin/initdb -D /opt/pgsql/data + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile start + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Build pg_stat_monitor + run: | + export PATH="/usr/lib/postgresql/13/bin:$PATH" + sudo cp /usr/lib/postgresql/13/bin/pg_config /usr/bin + make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests & Run code coverage + run: | + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile stop + echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile start + make installcheck + make coverage-html + lcov --capture --directory . --output-file coverage/lcov.info + pip install cpp-coveralls + export COVERALLS_REPO_TOKEN="${{ secrets.COVERALL_PG_STAT_MONITOR_TOKEN }}" + coveralls --verbose + working-directory: src/pg_stat_monitor/ + diff --git a/README.md b/README.md index cae7861..636e125 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,8 @@ ![pg13-test](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) ![pg14-test](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) +[![Coverage Status](https://coveralls.io/repos/github/percona/pg_stat_monitor/badge.svg)](https://coveralls.io/github/percona/pg_stat_monitor) + # pg_stat_monitor: Query Performance Tool for PostgreSQL ## Table of Contents From 3b25f395c8c903c0f81a8d1560bb3c39ededc9b4 Mon Sep 17 00:00:00 2001 From: Naeem Akhter Date: Thu, 14 Oct 2021 00:43:55 +0500 Subject: [PATCH 37/45] PG-259: Fixed PGSM build in Test-with-pg13-pgdg-packages GitHub action job. --- .github/workflows/pg11test-pgdg-packages.yml | 2 ++ .github/workflows/pg12test-pgdg-packages.yml | 2 ++ .github/workflows/pg13test-pgdg-packages.yml | 2 ++ .github/workflows/pg14test-pgdg-packages.yml | 2 ++ 4 files changed, 8 insertions(+) diff --git a/.github/workflows/pg11test-pgdg-packages.yml b/.github/workflows/pg11test-pgdg-packages.yml index 9126f7c..ad8ef4d 100644 --- a/.github/workflows/pg11test-pgdg-packages.yml +++ b/.github/workflows/pg11test-pgdg-packages.yml @@ -38,6 +38,8 @@ jobs: - name: Build pg_stat_monitor run: | + export PATH="/usr/lib/postgresql/11/bin:$PATH" + sudo cp /usr/lib/postgresql/11/bin/pg_config /usr/bin sudo make USE_PGXS=1 sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ diff --git a/.github/workflows/pg12test-pgdg-packages.yml b/.github/workflows/pg12test-pgdg-packages.yml index 2653ec6..274e082 100644 --- a/.github/workflows/pg12test-pgdg-packages.yml +++ b/.github/workflows/pg12test-pgdg-packages.yml @@ -38,6 +38,8 @@ jobs: - name: Build pg_stat_monitor run: | + export PATH="/usr/lib/postgresql/12/bin:$PATH" + sudo cp /usr/lib/postgresql/12/bin/pg_config /usr/bin sudo make USE_PGXS=1 sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ diff --git a/.github/workflows/pg13test-pgdg-packages.yml b/.github/workflows/pg13test-pgdg-packages.yml index 49e0261..1a718d2 100644 --- a/.github/workflows/pg13test-pgdg-packages.yml +++ b/.github/workflows/pg13test-pgdg-packages.yml @@ -34,6 +34,8 @@ jobs: - name: Build pg_stat_monitor run: | + export PATH="/usr/lib/postgresql/13/bin:$PATH" + sudo cp /usr/lib/postgresql/13/bin/pg_config /usr/bin sudo make USE_PGXS=1 sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ diff --git a/.github/workflows/pg14test-pgdg-packages.yml b/.github/workflows/pg14test-pgdg-packages.yml index 352aac6..ad6bc91 100644 --- a/.github/workflows/pg14test-pgdg-packages.yml +++ b/.github/workflows/pg14test-pgdg-packages.yml @@ -38,6 +38,8 @@ jobs: - name: Build pg_stat_monitor run: | + export PATH="/usr/lib/postgresql/14/bin:$PATH" + sudo cp /usr/lib/postgresql/14/bin/pg_config /usr/bin sudo make USE_PGXS=1 sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ From 0e06a4c701f2bdbefe8ffa8386b004de6d0b8fdd Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Thu, 14 Oct 2021 10:24:41 -0300 Subject: [PATCH 38/45] PG-260: Fix display of queries in pg_stat_monitor view. Don't display queries in PARSE or PLAN state, to keep it consistent with previous behavior, this avoids showing intermediate scripts like part of a procedure, such as: $1 $1 := $3 --- pg_stat_monitor.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index ee63ba7..2f51c08 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1712,6 +1712,11 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (tmp.state == PGSS_FINISHED) continue; } + + /* Skip queries such as, $1, $2 := $3, etc. */ + if (tmp.state == PGSS_PARSE || tmp.state == PGSS_PLAN) + continue; + if (tmp.info.parentid != UINT64CONST(0)) { int len = 0; From 9e76f6f961a090b5666be844c2378df1e228c3a6 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Thu, 14 Oct 2021 10:39:25 -0300 Subject: [PATCH 39/45] PG-260: Fix regression tests. The regression tests required some adjustmentes as they were based on a wrong behavior in pg_stat_monitor that was fixed in the last commits. The problem was that pg_stat_monitor_reset() was not properly clearing the query buffers, as such, some garbage queries were residing in the buffers after calling pg_stat_monitor_reset(). One example of a problem, a query such as "SELECT 1 AS num" and the same query with comments such as: SELECT $1 AS num /* { "application", psql_app, "real_ip", 192.168.1.3) */ Are evaluated to the same query ID, if a test issue the first query, call pg_stat_monitor_reset() to clear query buffer, then issue the second query with comments, the result in pg_stat_monitor view would still contain the first query without comments, this was leading to tests expecting the wrong output, which is now fixed. --- regression/expected/counters.out | 2 +- regression/expected/state.out | 2 +- regression/expected/tags.out | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/regression/expected/counters.out b/regression/expected/counters.out index c96775e..ebf26f7 100644 --- a/regression/expected/counters.out +++ b/regression/expected/counters.out @@ -68,7 +68,7 @@ end $$; SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; query | calls ---------------------------------------------------------------------------------------------------+------- - SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; | 1000 + SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a | 1000 SELECT pg_stat_monitor_reset(); | 1 SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 1 do $$ +| 1 diff --git a/regression/expected/state.out b/regression/expected/state.out index 147054a..b051091 100644 --- a/regression/expected/state.out +++ b/regression/expected/state.out @@ -16,7 +16,7 @@ ERROR: division by zero SELECT query, state_code, state FROM pg_stat_monitor ORDER BY query COLLATE "C"; query | state_code | state ----------------------------------------------------------------------------------+------------+--------------------- - SELECT $1 AS num | 3 | FINISHED + SELECT $1 | 3 | FINISHED SELECT 1/0; | 4 | FINISHED WITH ERROR SELECT pg_stat_monitor_reset(); | 3 | FINISHED SELECT query, state_code, state FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 2 | ACTIVE diff --git a/regression/expected/tags.out b/regression/expected/tags.out index 9da2b13..dffe468 100644 --- a/regression/expected/tags.out +++ b/regression/expected/tags.out @@ -12,11 +12,11 @@ SELECT 1 AS num /* { "application", psql_app, "real_ip", 192.168.1.3) */; (1 row) SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | comments --------------------------------------------------------------------------+------------------------------------------------------ - SELECT $1 AS num | { "application", psql_app, "real_ip", 192.168.1.3) - SELECT pg_stat_monitor_reset(); | - SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; | + query | comments +---------------------------------------------------------------------------+------------------------------------------------------ + SELECT $1 AS num /* { "application", psql_app, "real_ip", 192.168.1.3) */ | { "application", psql_app, "real_ip", 192.168.1.3) + SELECT pg_stat_monitor_reset(); | + SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; | (3 rows) SELECT pg_stat_monitor_reset(); From bf8c93b1855a5983e94c8830424b6f814c6b513e Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Fri, 15 Oct 2021 11:56:28 -0300 Subject: [PATCH 40/45] PG-261: Fix regression tests on distribution packages. The code added in pgss_store() to handle an assertion failure when GetUserId() was being called introduced a problem with regression tests in some builds, specifically our PG11 and PG12 distributions for Ubuntu. The problem was detected when calling some json functions that would trigger parallel workers, to solve the problem now we check if our hooks are being called by parallel workers, in this case we can avoid doing work, it also fixes the issue mentioned above as we won't call GetUserId() anymore in an invalid context. --- pg_stat_monitor.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 2f51c08..a045cc4 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -14,7 +14,9 @@ * *------------------------------------------------------------------------- */ + #include "postgres.h" +#include "access/parallel.h" #include #ifdef BENCHMARK #include /* clock() */ @@ -367,6 +369,9 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) if (!IsSystemInitialized()) return; + if (IsParallelWorker()) + return; + /* * Clear queryId for prepared statements related utility, as those will * inherit from the underlying statement's one (except DEALLOCATE which is @@ -423,6 +428,9 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query) if (!IsSystemInitialized()) return; + if (IsParallelWorker()) + return; + /* * Utility statements get queryId zero. We do this even in cases where * the statement contains an optimizable statement for which a queryId @@ -485,6 +493,9 @@ pgss_ExecutorStart(QueryDesc *queryDesc, int eflags) else standard_ExecutorStart(queryDesc, eflags); + if (IsParallelWorker()) + return; + /* * If query has queryId zero, don't track it. This prevents double * counting of optimizable statements that are directly contained in @@ -655,7 +666,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) MemoryContextSwitchTo(mct); } - if (queryId != UINT64CONST(0) && queryDesc->totaltime) + if (queryId != UINT64CONST(0) && queryDesc->totaltime && !IsParallelWorker()) { /* * Make sure stats accumulation is done. (Note: it's okay if several @@ -770,7 +781,7 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par { PlannedStmt *result; - if (PGSM_TRACK_PLANNING && query_string && parse->queryId != UINT64CONST(0)) + if (PGSM_TRACK_PLANNING && query_string && parse->queryId != UINT64CONST(0) && !IsParallelWorker()) { PlanInfo plan_info; instr_time start; @@ -942,7 +953,7 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, if (PGSM_TRACK_UTILITY && !IsA(parsetree, ExecuteStmt) && !IsA(parsetree, PrepareStmt) && - !IsA(parsetree, DeallocateStmt)) + !IsA(parsetree, DeallocateStmt) && !IsParallelWorker()) { instr_time start; instr_time duration; @@ -1453,7 +1464,6 @@ pgss_store(uint64 queryid, uint64 bucketid; uint64 prev_bucket_id; uint64 userid; - int con; uint64 planid; uint64 appid; char comments[512] = ""; @@ -1468,10 +1478,7 @@ pgss_store(uint64 queryid, return; Assert(query != NULL); - if (kind == PGSS_ERROR) - GetUserIdAndSecContext((unsigned int *)&userid, &con); - else - userid = GetUserId(); + userid = GetUserId(); application_name_len = pg_get_application_name(application_name); planid = plan_info ? plan_info->planid: 0; @@ -3255,6 +3262,9 @@ pgsm_emit_log_hook(ErrorData *edata) if (!IsSystemInitialized() || edata == NULL) goto exit; + if (IsParallelWorker()) + return; + if ((edata->elevel == ERROR || edata->elevel == WARNING || edata->elevel == INFO || edata->elevel == DEBUG1)) { uint64 queryid = 0; From 350ef723abee8d67ccd8c9cb0aeea1d3ebd15e0c Mon Sep 17 00:00:00 2001 From: Ibrar Ahmed Date: Sat, 16 Oct 2021 07:58:14 +0500 Subject: [PATCH 41/45] Update README.md --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index da2235a..8beb28d 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ -![pg11-test](https://github.com/percona/pg_stat_monitor/workflows/pg11-test/badge.svg) -![pg12-test](https://github.com/percona/pg_stat_monitor/workflows/pg12-test/badge.svg) -![pg13-test](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) -![pg14-test](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) -![pg11package-test](https://github.com/percona/pg_stat_monitor/workflows/pg11package-test/badge.svg) -![pg12package-test](https://github.com/percona/pg_stat_monitor/workflows/pg12package-test/badge.svg) -![pg13package-test](https://github.com/percona/pg_stat_monitor/workflows/pg13package-test/badge.svg) +![PostgreSQL-11](https://github.com/percona/pg_stat_monitor/workflows/pg11-test/badge.svg) +![PostgreSQL-12](https://github.com/percona/pg_stat_monitor/workflows/pg12-test/badge.svg) +![PostgreSQL-13](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) +![PostgreSQL-14](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) +![PostgreSQL-11-Package](https://github.com/percona/pg_stat_monitor/workflows/pg11package-test/badge.svg) +![PostgreSQL-12-Packages](https://github.com/percona/pg_stat_monitor/workflows/pg12package-test/badge.svg) +![PostgreSQL-13-Packages](https://github.com/percona/pg_stat_monitor/workflows/pg13package-test/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/percona/pg_stat_monitor/badge.svg)](https://coveralls.io/github/percona/pg_stat_monitor) From a9a36905f22a64c18d4cc7774afbd31b1ab22de5 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Mon, 18 Oct 2021 11:41:37 -0300 Subject: [PATCH 42/45] PG-262: Fix comment extraction on queries. The regular expression was updated to properly capture comments in SQL in the form /* */. The previous regex was capturing everything from /* until the last */ because regex are greedy, this was presenting problems if a input query has something like: SELECT /* comment 1 */ field /* comment 2 */ from FOO; As such, the previous regex would capture anytying between /* comment 1 and comment 2 */, the result would be: /* comment 1 field comment 2*/. Multiline comments are also captured. Multiple comments in one query are now stored in the pg_stat_monitor comments field in the form: /* Comment 1 */, ... , /* Comment N */. --- pg_stat_monitor.c | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index a045cc4..4f9a504 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -251,7 +251,7 @@ _PG_init(void) /* * Compile regular expression for extracting out query comments only once. */ - rc = regcomp(&preg_query_comments, "/\\*.*\\*/", 0); + rc = regcomp(&preg_query_comments, "/\\*([^*]|[\r\n]|(\\*+([^*/]|[\r\n])))*\\*+/", REG_EXTENDED); if (rc != 0) { elog(ERROR, "pg_stat_monitor: query comments regcomp() failed, return code=(%d)\n", rc); @@ -3461,12 +3461,37 @@ extract_query_comments(const char *query, char *comments, size_t max_len) int rc; size_t nmatch = 1; regmatch_t pmatch; + regoff_t comment_len, total_len = 0; + const char *s = query; - rc = regexec(&preg_query_comments, query, nmatch, &pmatch, 0); - if (rc != 0) - return; + while (total_len < max_len) + { + rc = regexec(&preg_query_comments, s, nmatch, &pmatch, 0); + if (rc != 0) + break; - snprintf(comments, max_len, "%.*s", pmatch.rm_eo - pmatch.rm_so -4, &query[pmatch.rm_so + 2]); + comment_len = pmatch.rm_eo - pmatch.rm_so; + + if (total_len + comment_len > max_len) + break; /* TODO: log error in error view, insufficient space for comment. */ + + total_len += comment_len; + + /* Not 1st iteration, append ", " before next comment. */ + if (s != query) + { + if (total_len + 2 > max_len) + break; /* TODO: log error in error view, insufficient space for ", " + comment. */ + + memcpy(comments, ", ", 2); + comments += 2; + total_len += 2; + } + + memcpy(comments, s + pmatch.rm_so, comment_len); + comments += comment_len; + s += pmatch.rm_eo; + } } #if PG_VERSION_NUM < 140000 From c390f24f5cb7c648a7b5c7a78889b980512aadbe Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Mon, 18 Oct 2021 11:51:49 -0300 Subject: [PATCH 43/45] PG-262: Fix tags regression test. The test was updated to reflect the new output format for query comments. --- regression/expected/tags.out | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/regression/expected/tags.out b/regression/expected/tags.out index dffe468..b2cda90 100644 --- a/regression/expected/tags.out +++ b/regression/expected/tags.out @@ -12,9 +12,9 @@ SELECT 1 AS num /* { "application", psql_app, "real_ip", 192.168.1.3) */; (1 row) SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | comments ----------------------------------------------------------------------------+------------------------------------------------------ - SELECT $1 AS num /* { "application", psql_app, "real_ip", 192.168.1.3) */ | { "application", psql_app, "real_ip", 192.168.1.3) + query | comments +---------------------------------------------------------------------------+---------------------------------------------------------- + SELECT $1 AS num /* { "application", psql_app, "real_ip", 192.168.1.3) */ | /* { "application", psql_app, "real_ip", 192.168.1.3) */ SELECT pg_stat_monitor_reset(); | SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; | (3 rows) From 693838c979aa3e40ab41ec4b2acaa526ccae201a Mon Sep 17 00:00:00 2001 From: Ibrar Ahmed Date: Fri, 22 Oct 2021 16:27:36 +0000 Subject: [PATCH 44/45] PG-263: Bump version to 1.0.0 - Beta2. --- pg_stat_monitor.c | 2 +- regression/expected/version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 4f9a504..41c23c8 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -26,7 +26,7 @@ PG_MODULE_MAGIC; -#define BUILD_VERSION "0.9.2-beta1" +#define BUILD_VERSION "1.0.0 - Beta 2" #define PG_STAT_STATEMENTS_COLS 53 /* maximum of above */ #define PGSM_TEXT_FILE "/tmp/pg_stat_monitor_query" diff --git a/regression/expected/version.out b/regression/expected/version.out index 7c4640e..bdda4d2 100644 --- a/regression/expected/version.out +++ b/regression/expected/version.out @@ -2,7 +2,7 @@ CREATE EXTENSION pg_stat_monitor; SELECT pg_stat_monitor_version(); pg_stat_monitor_version ------------------------- - 0.9.2-beta1 + 1.0.0 - Beta 2 (1 row) DROP EXTENSION pg_stat_monitor; From ac7aa57995ea6c13e0a77f23f842f49593bced32 Mon Sep 17 00:00:00 2001 From: Evgeniy Patlan Date: Mon, 1 Nov 2021 14:49:35 +0200 Subject: [PATCH 45/45] PG-264 fix version --- pg_stat_monitor.c | 2 +- regression/expected/version.out | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 41c23c8..5fb38eb 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -26,7 +26,7 @@ PG_MODULE_MAGIC; -#define BUILD_VERSION "1.0.0 - Beta 2" +#define BUILD_VERSION "1.0.0-beta-2" #define PG_STAT_STATEMENTS_COLS 53 /* maximum of above */ #define PGSM_TEXT_FILE "/tmp/pg_stat_monitor_query" diff --git a/regression/expected/version.out b/regression/expected/version.out index bdda4d2..77ff028 100644 --- a/regression/expected/version.out +++ b/regression/expected/version.out @@ -2,7 +2,7 @@ CREATE EXTENSION pg_stat_monitor; SELECT pg_stat_monitor_version(); pg_stat_monitor_version ------------------------- - 1.0.0 - Beta 2 + 1.0.0-beta-2 (1 row) DROP EXTENSION pg_stat_monitor;