PG-456: Running pgindent to make source PostgreSQL compatible. (#269)

PG-456: Running pgindent to make source indentation/spacing PostgreSQLCompatible.

PostgreSQL uses pgindent from time to time to make source code PostgreSQL
style guide compatible, it is a very long time since we have not done that.
Commit fixes a lot of indentation and spacing issues.

Co-authored-by: Hamid Akhtar <hamid.akhtar@gmail.com>
pull/276/head
Ibrar Ahmed 2022-06-29 00:42:40 +05:00 committed by GitHub
parent 926eade1eb
commit a9187117f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 1287 additions and 1151 deletions

60
guc.c
View File

@ -34,9 +34,11 @@ static bool check_histogram_max(int *newval, void **extra, GucSource source);
void void
init_guc(void) init_guc(void)
{ {
int i = 0, j; int i = 0,
j;
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_max", .guc_name = "pg_stat_monitor.pgsm_max",
.guc_desc = "Sets the maximum size of shared memory in (MB) used for statement's metadata tracked by pg_stat_monitor.", .guc_desc = "Sets the maximum size of shared memory in (MB) used for statement's metadata tracked by pg_stat_monitor.",
.guc_default = 100, .guc_default = 100,
@ -48,7 +50,8 @@ init_guc(void)
}; };
DefineIntGUC(&conf[i++]); DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_query_max_len", .guc_name = "pg_stat_monitor.pgsm_query_max_len",
.guc_desc = "Sets the maximum length of query.", .guc_desc = "Sets the maximum length of query.",
.guc_default = 2048, .guc_default = 2048,
@ -60,7 +63,8 @@ init_guc(void)
}; };
DefineIntGUC(&conf[i++]); DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_track_utility", .guc_name = "pg_stat_monitor.pgsm_track_utility",
.guc_desc = "Selects whether utility commands are tracked.", .guc_desc = "Selects whether utility commands are tracked.",
.guc_default = 1, .guc_default = 1,
@ -72,7 +76,8 @@ init_guc(void)
}; };
DefineBoolGUC(&conf[i++]); DefineBoolGUC(&conf[i++]);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_normalized_query", .guc_name = "pg_stat_monitor.pgsm_normalized_query",
.guc_desc = "Selects whether save query in normalized format.", .guc_desc = "Selects whether save query in normalized format.",
.guc_default = 0, .guc_default = 0,
@ -84,7 +89,8 @@ init_guc(void)
}; };
DefineBoolGUC(&conf[i++]); DefineBoolGUC(&conf[i++]);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_max_buckets", .guc_name = "pg_stat_monitor.pgsm_max_buckets",
.guc_desc = "Sets the maximum number of buckets.", .guc_desc = "Sets the maximum number of buckets.",
.guc_default = 10, .guc_default = 10,
@ -96,7 +102,8 @@ init_guc(void)
}; };
DefineIntGUC(&conf[i++]); DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_bucket_time", .guc_name = "pg_stat_monitor.pgsm_bucket_time",
.guc_desc = "Sets the time in seconds per bucket.", .guc_desc = "Sets the time in seconds per bucket.",
.guc_default = 60, .guc_default = 60,
@ -108,7 +115,8 @@ init_guc(void)
}; };
DefineIntGUC(&conf[i++]); DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_histogram_min", .guc_name = "pg_stat_monitor.pgsm_histogram_min",
.guc_desc = "Sets the time in millisecond.", .guc_desc = "Sets the time in millisecond.",
.guc_default = 0, .guc_default = 0,
@ -120,7 +128,8 @@ init_guc(void)
}; };
DefineIntGUCWithCheck(&conf[i++], check_histogram_min); DefineIntGUCWithCheck(&conf[i++], check_histogram_min);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_histogram_max", .guc_name = "pg_stat_monitor.pgsm_histogram_max",
.guc_desc = "Sets the time in millisecond.", .guc_desc = "Sets the time in millisecond.",
.guc_default = 100000, .guc_default = 100000,
@ -132,7 +141,8 @@ init_guc(void)
}; };
DefineIntGUCWithCheck(&conf[i++], check_histogram_max); DefineIntGUCWithCheck(&conf[i++], check_histogram_max);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_histogram_buckets", .guc_name = "pg_stat_monitor.pgsm_histogram_buckets",
.guc_desc = "Sets the maximum number of histogram buckets", .guc_desc = "Sets the maximum number of histogram buckets",
.guc_default = 10, .guc_default = 10,
@ -144,7 +154,8 @@ init_guc(void)
}; };
DefineIntGUC(&conf[i++]); DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_query_shared_buffer", .guc_name = "pg_stat_monitor.pgsm_query_shared_buffer",
.guc_desc = "Sets the maximum size of shared memory in (MB) used for query tracked by pg_stat_monitor.", .guc_desc = "Sets the maximum size of shared memory in (MB) used for query tracked by pg_stat_monitor.",
.guc_default = 20, .guc_default = 20,
@ -156,7 +167,8 @@ init_guc(void)
}; };
DefineIntGUC(&conf[i++]); DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_overflow_target", .guc_name = "pg_stat_monitor.pgsm_overflow_target",
.guc_desc = "Sets the overflow target for pg_stat_monitor", .guc_desc = "Sets the overflow target for pg_stat_monitor",
.guc_default = 1, .guc_default = 1,
@ -168,7 +180,8 @@ init_guc(void)
}; };
DefineIntGUC(&conf[i++]); DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_enable_query_plan", .guc_name = "pg_stat_monitor.pgsm_enable_query_plan",
.guc_desc = "Enable/Disable query plan monitoring", .guc_desc = "Enable/Disable query plan monitoring",
.guc_default = 0, .guc_default = 0,
@ -180,7 +193,8 @@ init_guc(void)
}; };
DefineBoolGUC(&conf[i++]); DefineBoolGUC(&conf[i++]);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_track", .guc_name = "pg_stat_monitor.pgsm_track",
.guc_desc = "Selects which statements are tracked by pg_stat_monitor.", .guc_desc = "Selects which statements are tracked by pg_stat_monitor.",
.n_options = 3, .n_options = 3,
@ -191,12 +205,14 @@ init_guc(void)
.guc_unit = 0, .guc_unit = 0,
.guc_value = &PGSM_TRACK .guc_value = &PGSM_TRACK
}; };
for (j = 0; j < conf[i].n_options; ++j) { for (j = 0; j < conf[i].n_options; ++j)
{
strlcpy(conf[i].guc_options[j], track_options[j].name, sizeof(conf[i].guc_options[j])); strlcpy(conf[i].guc_options[j], track_options[j].name, sizeof(conf[i].guc_options[j]));
} }
DefineEnumGUC(&conf[i++], track_options); DefineEnumGUC(&conf[i++], track_options);
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_extract_comments", .guc_name = "pg_stat_monitor.pgsm_extract_comments",
.guc_desc = "Enable/Disable extracting comments from queries.", .guc_desc = "Enable/Disable extracting comments from queries.",
.guc_default = 0, .guc_default = 0,
@ -209,7 +225,8 @@ init_guc(void)
DefineBoolGUC(&conf[i++]); DefineBoolGUC(&conf[i++]);
#if PG_VERSION_NUM >= 130000 #if PG_VERSION_NUM >= 130000
conf[i] = (GucVariable) { conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_track_planning", .guc_name = "pg_stat_monitor.pgsm_track_planning",
.guc_desc = "Selects whether planning statistics are tracked.", .guc_desc = "Selects whether planning statistics are tracked.",
.guc_default = 0, .guc_default = 0,
@ -223,7 +240,8 @@ init_guc(void)
#endif #endif
} }
static void DefineIntGUCWithCheck(GucVariable *conf, GucIntCheckHook check) static void
DefineIntGUCWithCheck(GucVariable * conf, GucIntCheckHook check)
{ {
conf->type = PGC_INT; conf->type = PGC_INT;
DefineCustomIntVariable(conf->guc_name, DefineCustomIntVariable(conf->guc_name,
@ -285,7 +303,8 @@ get_conf(int i)
return &conf[i]; return &conf[i];
} }
static bool check_histogram_min(int *newval, void **extra, GucSource source) static bool
check_histogram_min(int *newval, void **extra, GucSource source)
{ {
/* /*
* During module initialization PGSM_HISTOGRAM_MIN is initialized before * During module initialization PGSM_HISTOGRAM_MIN is initialized before
@ -294,7 +313,8 @@ static bool check_histogram_min(int *newval, void **extra, GucSource source)
return (PGSM_HISTOGRAM_MAX == 0 || *newval < PGSM_HISTOGRAM_MAX); return (PGSM_HISTOGRAM_MAX == 0 || *newval < PGSM_HISTOGRAM_MAX);
} }
static bool check_histogram_max(int *newval, void **extra, GucSource source) static bool
check_histogram_max(int *newval, void **extra, GucSource source)
{ {
return (*newval > PGSM_HISTOGRAM_MIN); return (*newval > PGSM_HISTOGRAM_MIN);
} }

View File

@ -29,6 +29,7 @@ static HTAB*
hash_init(const char *hash_name, int key_size, int entry_size, int hash_size) hash_init(const char *hash_name, int key_size, int entry_size, int hash_size)
{ {
HASHCTL info; HASHCTL info;
memset(&info, 0, sizeof(info)); memset(&info, 0, sizeof(info));
info.keysize = key_size; info.keysize = key_size;
info.entrysize = entry_size; info.entrysize = entry_size;
@ -185,15 +186,19 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
while ((entry = hash_seq_search(&hash_seq)) != NULL) while ((entry = hash_seq_search(&hash_seq)) != NULL)
{ {
/* /*
* Remove all entries if new_bucket_id == -1. * Remove all entries if new_bucket_id == -1. Otherwise remove entry
* Otherwise remove entry in new_bucket_id if it has finished already. * in new_bucket_id if it has finished already.
*/ */
if (new_bucket_id < 0 || if (new_bucket_id < 0 ||
(entry->key.bucket_id == new_bucket_id && (entry->key.bucket_id == new_bucket_id &&
(entry->counters.state == PGSS_FINISHED || entry->counters.state == PGSS_ERROR))) (entry->counters.state == PGSS_FINISHED || entry->counters.state == PGSS_ERROR)))
{ {
if (new_bucket_id == -1) { if (new_bucket_id == -1)
/* pg_stat_monitor_reset(), remove entry from query hash table too. */ {
/*
* pg_stat_monitor_reset(), remove entry from query hash table
* too.
*/
hash_search(pgss_query_hash, &(entry->key.queryid), HASH_REMOVE, NULL); hash_search(pgss_query_hash, &(entry->key.queryid), HASH_REMOVE, NULL);
} }
@ -201,11 +206,10 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
} }
/* /*
* If we detect a pending query residing in the previous bucket id, * If we detect a pending query residing in the previous bucket id, we
* we add it to a list of pending elements to be moved to the new * add it to a list of pending elements to be moved to the new bucket
* bucket id. * id. Can't update the hash table while iterating it inside this
* Can't update the hash table while iterating it inside this loop, * loop, as this may introduce all sort of problems.
* as this may introduce all sort of problems.
*/ */
if (old_bucket_id != -1 && entry->key.bucket_id == old_bucket_id) if (old_bucket_id != -1 && entry->key.bucket_id == old_bucket_id)
{ {
@ -214,18 +218,22 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
entry->counters.state == PGSS_EXEC) entry->counters.state == PGSS_EXEC)
{ {
pgssEntry *bkp_entry = malloc(sizeof(pgssEntry)); pgssEntry *bkp_entry = malloc(sizeof(pgssEntry));
if (!bkp_entry) if (!bkp_entry)
{ {
elog(DEBUG1, "hash_entry_dealloc: out of memory"); elog(DEBUG1, "hash_entry_dealloc: out of memory");
/* /*
* No memory, If the entry has calls > 1 then we change the state to finished, * No memory, If the entry has calls > 1 then we change
* as the pending query will likely finish execution during the new bucket * the state to finished, as the pending query will likely
* time window. The pending query will vanish in this case, can't list it * finish execution during the new bucket time window. The
* pending query will vanish in this case, can't list it
* until it completes. * until it completes.
* *
* If there is only one call to the query and it's pending, remove the * If there is only one call to the query and it's
* entry from the previous bucket and allow it to finish in the new bucket, * pending, remove the entry from the previous bucket and
* in order to avoid the query living in the old bucket forever. * allow it to finish in the new bucket, in order to avoid
* the query living in the old bucket forever.
*/ */
if (entry->counters.calls.calls > 1) if (entry->counters.calls.calls > 1)
entry->counters.state = PGSS_FINISHED; entry->counters.state = PGSS_FINISHED;
@ -244,14 +252,16 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
pending_entries = lappend(pending_entries, bkp_entry); pending_entries = lappend(pending_entries, bkp_entry);
/* /*
* If the entry has calls > 1 then we change the state to finished in * If the entry has calls > 1 then we change the state to
* the previous bucket, as the pending query will likely finish execution * finished in the previous bucket, as the pending query will
* during the new bucket time window. Can't remove it from the previous bucket * likely finish execution during the new bucket time window.
* as it may have many calls and we would lose the query statistics. * Can't remove it from the previous bucket as it may have
* many calls and we would lose the query statistics.
* *
* If there is only one call to the query and it's pending, remove the entry * If there is only one call to the query and it's pending,
* from the previous bucket and allow it to finish in the new bucket, * remove the entry from the previous bucket and allow it to
* in order to avoid the query living in the old bucket forever. * finish in the new bucket, in order to avoid the query
* living in the old bucket forever.
*/ */
if (entry->counters.calls.calls > 1) if (entry->counters.calls.calls > 1)
entry->counters.state = PGSS_FINISHED; entry->counters.state = PGSS_FINISHED;
@ -262,10 +272,11 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
} }
/* /*
* Iterate over the list of pending queries in order * Iterate over the list of pending queries in order to add them back to
* to add them back to the hash table with the updated bucket id. * the hash table with the updated bucket id.
*/ */
foreach (pending_entry, pending_entries) { foreach(pending_entry, pending_entries)
{
bool found = false; bool found = false;
pgssEntry *new_entry; pgssEntry *new_entry;
pgssEntry *old_entry = (pgssEntry *) lfirst(pending_entry); pgssEntry *old_entry = (pgssEntry *) lfirst(pending_entry);

View File

@ -73,6 +73,7 @@ static int num_relations; /* Number of relation in the query */
static bool system_init = false; static bool system_init = false;
static struct rusage rusage_start; static struct rusage rusage_start;
static struct rusage rusage_end; static struct rusage rusage_end;
/* Query buffer, store queries' text. */ /* Query buffer, store queries' text. */
static unsigned char *pgss_qbuf = NULL; static unsigned char *pgss_qbuf = NULL;
static char *pgss_explain(QueryDesc *queryDesc); static char *pgss_explain(QueryDesc *queryDesc);
@ -99,6 +100,7 @@ static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
static ExecutorEnd_hook_type prev_ExecutorEnd = NULL; static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
static ProcessUtility_hook_type prev_ProcessUtility = NULL; static ProcessUtility_hook_type prev_ProcessUtility = NULL;
static emit_log_hook_type prev_emit_log_hook = NULL; static emit_log_hook_type prev_emit_log_hook = NULL;
DECLARE_HOOK(void pgsm_emit_log_hook, ErrorData *edata); DECLARE_HOOK(void pgsm_emit_log_hook, ErrorData *edata);
static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static shmem_startup_hook_type prev_shmem_startup_hook = NULL;
static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook = NULL; static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook = NULL;
@ -145,6 +147,7 @@ DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryStri
static uint64 pgss_hash_string(const char *str, int len); static uint64 pgss_hash_string(const char *str, int len);
#else #else
static void BufferUsageAccumDiff(BufferUsage *bufusage, BufferUsage *pgBufferUsage, BufferUsage *bufusage_start); static void BufferUsageAccumDiff(BufferUsage *bufusage, BufferUsage *pgBufferUsage, BufferUsage *bufusage_start);
DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString, DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString,
ProcessUtilityContext context, ParamListInfo params, ProcessUtilityContext context, ParamListInfo params,
QueryEnvironment *queryEnv, QueryEnvironment *queryEnv,
@ -184,12 +187,12 @@ static void JumbleQuery(JumbleState *jstate, Query *query);
static void JumbleRangeTable(JumbleState *jstate, List *rtable, CmdType cmd_type); static void JumbleRangeTable(JumbleState *jstate, List *rtable, CmdType cmd_type);
static void JumbleExpr(JumbleState *jstate, Node *node); static void JumbleExpr(JumbleState *jstate, Node *node);
static void RecordConstLocation(JumbleState *jstate, int location); static void RecordConstLocation(JumbleState *jstate, int location);
/* /*
* Given a possibly multi-statement source string, confine our attention to the * Given a possibly multi-statement source string, confine our attention to the
* relevant part of the string. * relevant part of the string.
*/ */
static const char * static const char *CleanQuerytext(const char *query, int *location, int *len);
CleanQuerytext(const char *query, int *location, int *len);
#endif #endif
static char *generate_normalized_query(JumbleState *jstate, const char *query, static char *generate_normalized_query(JumbleState *jstate, const char *query,
@ -205,12 +208,14 @@ static uint64 get_query_id(JumbleState *jstate, Query *query);
/* Daniel J. Bernstein's hash algorithm: see http://www.cse.yorku.ca/~oz/hash.html */ /* Daniel J. Bernstein's hash algorithm: see http://www.cse.yorku.ca/~oz/hash.html */
static uint64 djb2_hash(unsigned char *str, size_t len); static uint64 djb2_hash(unsigned char *str, size_t len);
/* Same as above, but stores the calculated string length into *out_len (small optimization) */ /* Same as above, but stores the calculated string length into *out_len (small optimization) */
static uint64 djb2_hash_str(unsigned char *str, int *out_len); static uint64 djb2_hash_str(unsigned char *str, int *out_len);
/* /*
* Module load callback * Module load callback
*/ */
// cppcheck-suppress unusedFunction /* cppcheck-suppress unusedFunction */
void void
_PG_init(void) _PG_init(void)
{ {
@ -218,6 +223,7 @@ _PG_init(void)
char file_name[1024]; char file_name[1024];
elog(DEBUG2, "pg_stat_monitor: %s()", __FUNCTION__); elog(DEBUG2, "pg_stat_monitor: %s()", __FUNCTION__);
/* /*
* In order to create our shared memory area, we have to be loaded via * In order to create our shared memory area, we have to be loaded via
* shared_preload_libraries. If not, fall out without hooking into any of * shared_preload_libraries. If not, fall out without hooking into any of
@ -233,6 +239,7 @@ _PG_init(void)
init_guc(); init_guc();
#if PG_VERSION_NUM >= 140000 #if PG_VERSION_NUM >= 140000
/* /*
* Inform the postmaster that we want to enable query_id calculation if * Inform the postmaster that we want to enable query_id calculation if
* compute_query_id is set to auto. * compute_query_id is set to auto.
@ -296,7 +303,7 @@ _PG_init(void)
/* /*
* Module unload callback * Module unload callback
*/ */
// cppcheck-suppress unusedFunction /* cppcheck-suppress unusedFunction */
void void
_PG_fini(void) _PG_fini(void)
{ {
@ -346,8 +353,10 @@ static void
pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query, JumbleState *jstate) pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query, JumbleState *jstate)
{ {
double start_time = (double) clock(); double start_time = (double) clock();
pgss_post_parse_analyze(pstate, query, jstate); pgss_post_parse_analyze(pstate, query, jstate);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_POST_PARSE_ANALYZE, elapsed); update_hook_stats(STATS_PGSS_POST_PARSE_ANALYZE, elapsed);
} }
#endif #endif
@ -409,8 +418,10 @@ static void
pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query) pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query)
{ {
double start_time = (double) clock(); double start_time = (double) clock();
pgss_post_parse_analyze(pstate, query); pgss_post_parse_analyze(pstate, query);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_POST_PARSE_ANALYZE, elapsed); update_hook_stats(STATS_PGSS_POST_PARSE_ANALYZE, elapsed);
} }
#endif #endif
@ -477,8 +488,10 @@ static void
pgss_ExecutorStart_benchmark(QueryDesc *queryDesc, int eflags) pgss_ExecutorStart_benchmark(QueryDesc *queryDesc, int eflags)
{ {
double start_time = (double) clock(); double start_time = (double) clock();
pgss_ExecutorStart(queryDesc, eflags); pgss_ExecutorStart(queryDesc, eflags);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_EXECUTORSTART, elapsed); update_hook_stats(STATS_PGSS_EXECUTORSTART, elapsed);
} }
#endif #endif
@ -544,8 +557,10 @@ pgss_ExecutorRun_benchmark(QueryDesc *queryDesc, ScanDirection direction, uint64
bool execute_once) bool execute_once)
{ {
double start_time = (double) clock(); double start_time = (double) clock();
pgss_ExecutorRun(queryDesc, direction, count, execute_once); pgss_ExecutorRun(queryDesc, direction, count, execute_once);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_EXECUTORUN, elapsed); update_hook_stats(STATS_PGSS_EXECUTORUN, elapsed);
} }
#endif #endif
@ -585,8 +600,10 @@ static void
pgss_ExecutorFinish_benchmark(QueryDesc *queryDesc) pgss_ExecutorFinish_benchmark(QueryDesc *queryDesc)
{ {
double start_time = (double) clock(); double start_time = (double) clock();
pgss_ExecutorFinish(queryDesc); pgss_ExecutorFinish(queryDesc);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_EXECUTORFINISH, elapsed); update_hook_stats(STATS_PGSS_EXECUTORFINISH, elapsed);
} }
#endif #endif
@ -639,8 +656,10 @@ static void
pgss_ExecutorEnd_benchmark(QueryDesc *queryDesc) pgss_ExecutorEnd_benchmark(QueryDesc *queryDesc)
{ {
double start_time = (double) clock(); double start_time = (double) clock();
pgss_ExecutorEnd(queryDesc); pgss_ExecutorEnd(queryDesc);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_EXECUTOREND, elapsed); update_hook_stats(STATS_PGSS_EXECUTOREND, elapsed);
} }
#endif #endif
@ -660,6 +679,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc)
if (queryDesc->operation == CMD_SELECT && PGSM_QUERY_PLAN) if (queryDesc->operation == CMD_SELECT && PGSM_QUERY_PLAN)
{ {
MemoryContext mct = MemoryContextSwitchTo(TopMemoryContext); MemoryContext mct = MemoryContextSwitchTo(TopMemoryContext);
plan_info.plan_len = snprintf(plan_info.plan_text, PLAN_TEXT_LEN, "%s", pgss_explain(queryDesc)); plan_info.plan_len = snprintf(plan_info.plan_text, PLAN_TEXT_LEN, "%s", pgss_explain(queryDesc));
plan_info.planid = DatumGetUInt64(hash_any_extended((const unsigned char *) plan_info.plan_text, plan_info.plan_len, 0)); plan_info.planid = DatumGetUInt64(hash_any_extended((const unsigned char *) plan_info.plan_text, plan_info.plan_len, 0));
plan_ptr = &plan_info; plan_ptr = &plan_info;
@ -711,8 +731,10 @@ pgss_ExecutorCheckPerms_benchmark(List *rt, bool abort)
{ {
bool ret; bool ret;
double start_time = (double) clock(); double start_time = (double) clock();
ret = pgss_ExecutorCheckPerms(rt, abort); ret = pgss_ExecutorCheckPerms(rt, abort);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_EXECUTORCHECKPERMS, elapsed); update_hook_stats(STATS_PGSS_EXECUTORCHECKPERMS, elapsed);
return ret; return ret;
} }
@ -731,12 +753,14 @@ pgss_ExecutorCheckPerms(List *rt, bool abort)
foreach(lr, rt) foreach(lr, rt)
{ {
RangeTblEntry *rte = lfirst(lr); RangeTblEntry *rte = lfirst(lr);
if (rte->rtekind != RTE_RELATION) if (rte->rtekind != RTE_RELATION)
continue; continue;
if (i < REL_LST) if (i < REL_LST)
{ {
bool found = false; bool found = false;
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
{ {
if (list_oid[j] == rte->relid) if (list_oid[j] == rte->relid)
@ -747,6 +771,7 @@ pgss_ExecutorCheckPerms(List *rt, bool abort)
{ {
char *namespace_name; char *namespace_name;
char *relation_name; char *relation_name;
list_oid[j] = rte->relid; list_oid[j] = rte->relid;
namespace_name = get_namespace_name(get_rel_namespace(rte->relid)); namespace_name = get_namespace_name(get_rel_namespace(rte->relid));
relation_name = get_rel_name(rte->relid); relation_name = get_rel_name(rte->relid);
@ -772,8 +797,10 @@ pgss_planner_hook_benchmark(Query *parse, const char *query_string, int cursorOp
{ {
PlannedStmt *ret; PlannedStmt *ret;
double start_time = (double) clock(); double start_time = (double) clock();
ret = pgss_planner_hook(parse, query_string, cursorOptions, boundParams); ret = pgss_planner_hook(parse, query_string, cursorOptions, boundParams);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_PLANNER_HOOK, elapsed); update_hook_stats(STATS_PGSS_PLANNER_HOOK, elapsed);
return ret; return ret;
} }
@ -819,11 +846,11 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par
PG_TRY(); PG_TRY();
{ {
/* /*
* If there is a previous installed hook, then assume it's going to call * If there is a previous installed hook, then assume it's going
* standard_planner() function, otherwise we call the function here. * to call standard_planner() function, otherwise we call the
* This is to avoid calling standard_planner() function twice, since it * function here. This is to avoid calling standard_planner()
* modifies the first argument (Query *), the second call would trigger an * function twice, since it modifies the first argument (Query *),
* assertion failure. * the second call would trigger an assertion failure.
*/ */
if (planner_hook_next) if (planner_hook_next)
result = planner_hook_next(parse, query_string, cursorOptions, boundParams); result = planner_hook_next(parse, query_string, cursorOptions, boundParams);
@ -864,11 +891,11 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par
else else
{ {
/* /*
* If there is a previous installed hook, then assume it's going to call * If there is a previous installed hook, then assume it's going to
* standard_planner() function, otherwise we call the function here. * call standard_planner() function, otherwise we call the function
* This is to avoid calling standard_planner() function twice, since it * here. This is to avoid calling standard_planner() function twice,
* modifies the first argument (Query *), the second call would trigger an * since it modifies the first argument (Query *), the second call
* assertion failure. * would trigger an assertion failure.
*/ */
if (planner_hook_next) if (planner_hook_next)
result = planner_hook_next(parse, query_string, cursorOptions, boundParams); result = planner_hook_next(parse, query_string, cursorOptions, boundParams);
@ -894,12 +921,15 @@ pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString,
QueryCompletion *qc) QueryCompletion *qc)
{ {
double start_time = (double) clock(); double start_time = (double) clock();
pgss_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc); pgss_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed); update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed);
} }
#endif #endif
static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, static void
pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
bool readOnlyTree, bool readOnlyTree,
ProcessUtilityContext context, ProcessUtilityContext context,
ParamListInfo params, QueryEnvironment *queryEnv, ParamListInfo params, QueryEnvironment *queryEnv,
@ -916,12 +946,15 @@ pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString,
QueryCompletion *qc) QueryCompletion *qc)
{ {
double start_time = (double) clock(); double start_time = (double) clock();
pgss_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, qc); pgss_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, qc);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed); update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed);
} }
#endif #endif
static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, static void
pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
ProcessUtilityContext context, ProcessUtilityContext context,
ParamListInfo params, QueryEnvironment *queryEnv, ParamListInfo params, QueryEnvironment *queryEnv,
DestReceiver *dest, DestReceiver *dest,
@ -937,12 +970,15 @@ pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString,
char *completionTag) char *completionTag)
{ {
double start_time = (double) clock(); double start_time = (double) clock();
pgss_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); pgss_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed); update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed);
} }
#endif #endif
static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, static void
pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
ProcessUtilityContext context, ParamListInfo params, ProcessUtilityContext context, ParamListInfo params,
QueryEnvironment *queryEnv, QueryEnvironment *queryEnv,
DestReceiver *dest, DestReceiver *dest,
@ -1262,6 +1298,7 @@ pgss_update_entry(pgssEntry *entry,
/* volatile block */ /* volatile block */
{ {
volatile pgssEntry *e = (volatile pgssEntry *) entry; volatile pgssEntry *e = (volatile pgssEntry *) entry;
SpinLockAcquire(&e->mutex); SpinLockAcquire(&e->mutex);
/* Start collecting data for next bucket and reset all counters */ /* Start collecting data for next bucket and reset all counters */
if (reset) if (reset)
@ -1290,8 +1327,10 @@ pgss_update_entry(pgssEntry *entry,
e->counters.plantime.sum_var_time += (total_time - old_mean) * (total_time - e->counters.plantime.mean_time); e->counters.plantime.sum_var_time += (total_time - old_mean) * (total_time - e->counters.plantime.mean_time);
/* calculate min and max time */ /* calculate min and max time */
if (e->counters.plantime.min_time > total_time) e->counters.plantime.min_time = total_time; if (e->counters.plantime.min_time > total_time)
if (e->counters.plantime.max_time < total_time) e->counters.plantime.max_time = total_time; e->counters.plantime.min_time = total_time;
if (e->counters.plantime.max_time < total_time)
e->counters.plantime.max_time = total_time;
} }
else if (kind == PGSS_FINISHED) else if (kind == PGSS_FINISHED)
{ {
@ -1313,8 +1352,10 @@ pgss_update_entry(pgssEntry *entry,
e->counters.time.sum_var_time += (total_time - old_mean) * (total_time - e->counters.time.mean_time); e->counters.time.sum_var_time += (total_time - old_mean) * (total_time - e->counters.time.mean_time);
/* calculate min and max time */ /* calculate min and max time */
if (e->counters.time.min_time > total_time) e->counters.time.min_time = total_time; if (e->counters.time.min_time > total_time)
if (e->counters.time.max_time < total_time) e->counters.time.max_time = total_time; e->counters.time.min_time = total_time;
if (e->counters.time.max_time < total_time)
e->counters.time.max_time = total_time;
index = get_histogram_bucket(total_time); index = get_histogram_bucket(total_time);
e->counters.resp_calls[index]++; e->counters.resp_calls[index]++;
@ -1456,6 +1497,7 @@ pgss_store(uint64 queryid,
return; return;
#if PG_VERSION_NUM >= 140000 #if PG_VERSION_NUM >= 140000
/* /*
* Nothing to do if compute_query_id isn't enabled and no other module * Nothing to do if compute_query_id isn't enabled and no other module
* computed a query identifier. * computed a query identifier.
@ -1467,12 +1509,14 @@ pgss_store(uint64 queryid,
query = CleanQuerytext(query, &query_location, &query_len); query = CleanQuerytext(query, &query_location, &query_len);
#if PG_VERSION_NUM < 140000 #if PG_VERSION_NUM < 140000
/* /*
* For utility statements, we just hash the query string to get an ID. * For utility statements, we just hash the query string to get an ID.
*/ */
if (queryid == UINT64CONST(0)) if (queryid == UINT64CONST(0))
{ {
queryid = pgss_hash_string(query, query_len); queryid = pgss_hash_string(query, query_len);
/* /*
* If we are unlucky enough to get a hash of zero(invalid), use * If we are unlucky enough to get a hash of zero(invalid), use
* queryID as 2 instead, queryID 1 is already in use for normal * queryID as 2 instead, queryID 1 is already in use for normal
@ -1487,6 +1531,7 @@ pgss_store(uint64 queryid,
if (kind == PGSS_ERROR) if (kind == PGSS_ERROR)
{ {
int sec_ctx; int sec_ctx;
GetUserIdAndSecContext((Oid *) &userid, &sec_ctx); GetUserIdAndSecContext((Oid *) &userid, &sec_ctx);
} }
else else
@ -1599,10 +1644,11 @@ pgss_store(uint64 queryid,
elog(DEBUG1, "pgss_store: insufficient shared space for query."); elog(DEBUG1, "pgss_store: insufficient shared space for query.");
return; return;
} }
/* /*
* Save current query buffer length, if we fail to add a new * Save current query buffer length, if we fail to add a new new
* new entry to the hash table then we must restore the * entry to the hash table then we must restore the original
* original length. * length.
*/ */
memcpy(&prev_qbuf_len, pgss_qbuf, sizeof(prev_qbuf_len)); memcpy(&prev_qbuf_len, pgss_qbuf, sizeof(prev_qbuf_len));
} }
@ -1647,6 +1693,7 @@ pgss_store(uint64 queryid,
if (norm_query) if (norm_query)
pfree(norm_query); pfree(norm_query);
} }
/* /*
* Reset all statement statistics. * Reset all statement statistics.
*/ */
@ -1654,6 +1701,7 @@ Datum
pg_stat_monitor_reset(PG_FUNCTION_ARGS) pg_stat_monitor_reset(PG_FUNCTION_ARGS)
{ {
pgssSharedState *pgss = pgsm_get_ss(); pgssSharedState *pgss = pgsm_get_ss();
/* Safety check... */ /* Safety check... */
if (!IsSystemInitialized()) if (!IsSystemInitialized())
ereport(ERROR, ereport(ERROR,
@ -1668,6 +1716,7 @@ pg_stat_monitor_reset(PG_FUNCTION_ARGS)
#ifdef BENCHMARK #ifdef BENCHMARK
{ {
int i; int i;
for (i = STATS_START; i < STATS_END; ++i) for (i = STATS_START; i < STATS_END; ++i)
{ {
pg_hook_stats[i].min_time = 0; pg_hook_stats[i].min_time = 0;
@ -1692,7 +1741,8 @@ static bool
IsBucketValid(uint64 bucketid) IsBucketValid(uint64 bucketid)
{ {
struct tm tm; struct tm tm;
time_t bucket_t,current_t; time_t bucket_t,
current_t;
double diff_t; double diff_t;
pgssSharedState *pgss = pgsm_get_ss(); pgssSharedState *pgss = pgsm_get_ss();
@ -1789,6 +1839,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo,
if (read_query(pgss_qbuf, queryid, query_txt, entry->query_pos) == 0) if (read_query(pgss_qbuf, queryid, query_txt, entry->query_pos) == 0)
{ {
int rc; int rc;
rc = read_query_buffer(bucketid, queryid, query_txt, entry->query_pos); rc = read_query_buffer(bucketid, queryid, query_txt, entry->query_pos);
if (rc != 1) if (rc != 1)
snprintf(query_txt, 32, "%s", "<insufficient disk/shared space>"); snprintf(query_txt, 32, "%s", "<insufficient disk/shared space>");
@ -1797,12 +1848,16 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo,
/* copy counters to a local variable to keep locking time short */ /* copy counters to a local variable to keep locking time short */
{ {
volatile pgssEntry *e = (volatile pgssEntry *) entry; volatile pgssEntry *e = (volatile pgssEntry *) entry;
SpinLockAcquire(&e->mutex); SpinLockAcquire(&e->mutex);
tmp = e->counters; tmp = e->counters;
SpinLockRelease(&e->mutex); SpinLockRelease(&e->mutex);
} }
/* In case that query plan is enabled, there is no need to show 0 planid query */ /*
* In case that query plan is enabled, there is no need to show 0
* planid query
*/
if (tmp.info.cmd_type == CMD_SELECT && PGSM_QUERY_PLAN && planid == 0) if (tmp.info.cmd_type == CMD_SELECT && PGSM_QUERY_PLAN && planid == 0)
continue; continue;
@ -1821,6 +1876,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo,
if (read_query(pgss_qbuf, tmp.info.parentid, parent_query_txt, 0) == 0) if (read_query(pgss_qbuf, tmp.info.parentid, parent_query_txt, 0) == 0)
{ {
int rc = read_query_buffer(bucketid, tmp.info.parentid, parent_query_txt, 0); int rc = read_query_buffer(bucketid, tmp.info.parentid, parent_query_txt, 0);
if (rc != 1) if (rc != 1)
snprintf(parent_query_txt, 32, "%s", "<insufficient disk/shared space>"); snprintf(parent_query_txt, 32, "%s", "<insufficient disk/shared space>");
} }
@ -1835,9 +1891,8 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo,
values[i++] = ObjectIdGetDatum(dbid); values[i++] = ObjectIdGetDatum(dbid);
/* /*
* ip address at column number 3, * ip address at column number 3, Superusers or members of
* Superusers or members of pg_read_all_stats members * pg_read_all_stats members are allowed
* are allowed
*/ */
if (is_allowed_role || userid == GetUserId()) if (is_allowed_role || userid == GetUserId())
values[i++] = Int64GetDatumFast(ip); values[i++] = Int64GetDatumFast(ip);
@ -1921,7 +1976,10 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo,
char *tmp_str = palloc0(1024); char *tmp_str = palloc0(1024);
bool first = true; bool first = true;
/* Need to calculate the actual size, and avoid unnessary memory usage */ /*
* Need to calculate the actual size, and avoid unnessary memory
* usage
*/
for (j = 0; j < tmp.info.num_relations; j++) for (j = 0; j < tmp.info.num_relations; j++)
{ {
if (first) if (first)
@ -2101,17 +2159,17 @@ get_next_wbucket(pgssSharedState *pgss)
/* /*
* If current bucket expired we loop attempting to update prev_bucket_sec. * If current bucket expired we loop attempting to update prev_bucket_sec.
* *
* pg_atomic_compare_exchange_u64 may fail in two possible ways: * pg_atomic_compare_exchange_u64 may fail in two possible ways: 1.
* 1. Another thread/process updated the variable before us. * Another thread/process updated the variable before us. 2. A spurious
* 2. A spurious failure / hardware event. * failure / hardware event.
* *
* In both failure cases we read prev_bucket_sec from memory again, if it was * In both failure cases we read prev_bucket_sec from memory again, if it
* a spurious failure then the value of prev_bucket_sec must be the same as * was a spurious failure then the value of prev_bucket_sec must be the
* before, which will cause the while loop to execute again. * same as before, which will cause the while loop to execute again.
* *
* If another thread updated prev_bucket_sec, then its current value will * If another thread updated prev_bucket_sec, then its current value will
* definitely make the while condition to fail, we can stop the loop as another * definitely make the while condition to fail, we can stop the loop as
* thread has already updated prev_bucket_sec. * another thread has already updated prev_bucket_sec.
*/ */
if ((current_sec - current_bucket_sec) < (uint64)PGSM_BUCKET_TIME) if ((current_sec - current_bucket_sec) < (uint64)PGSM_BUCKET_TIME)
{ {
@ -2143,9 +2201,9 @@ get_next_wbucket(pgssSharedState *pgss)
if (pgss->n_bucket_cycles >= PGSM_MAX_BUCKETS) if (pgss->n_bucket_cycles >= PGSM_MAX_BUCKETS)
{ {
/* /*
* A full rotation of PGSM_MAX_BUCKETS buckets happened since * A full rotation of PGSM_MAX_BUCKETS buckets happened since we
* we detected a query buffer overflow. * detected a query buffer overflow. Reset overflow state and
* Reset overflow state and remove the dump file. * remove the dump file.
*/ */
pgss->overflow = false; pgss->overflow = false;
pgss->n_bucket_cycles = 0; pgss->n_bucket_cycles = 0;
@ -2801,6 +2859,7 @@ JumbleExpr(JumbleState *jstate, Node *node)
break; break;
} }
} }
/* /*
* Record location of constant within query string of query tree * Record location of constant within query string of query tree
* that is currently being walked. * that is currently being walked.
@ -3242,8 +3301,9 @@ SaveQueryText(uint64 bucketid,
} }
/* /*
* If the query buffer is empty, there is nothing to dump, this also * If the query buffer is empty, there is nothing to dump,
* means that the current query length exceeds MAX_QUERY_BUF. * this also means that the current query length exceeds
* MAX_QUERY_BUF.
*/ */
if (buf_len <= sizeof(uint64)) if (buf_len <= sizeof(uint64))
return false; return false;
@ -3258,15 +3318,16 @@ SaveQueryText(uint64 bucketid,
} }
/* /*
* We must check for overflow again, as the query length may * We must check for overflow again, as the query length
* exceed the total size allocated to the buffer (MAX_QUERY_BUF). * may exceed the total size allocated to the buffer
* (MAX_QUERY_BUF).
*/ */
if (QUERY_BUFFER_OVERFLOW(buf_len, query_len)) if (QUERY_BUFFER_OVERFLOW(buf_len, query_len))
{ {
/* /*
* If we successfully dumped the query buffer to disk, then * If we successfully dumped the query buffer to disk,
* reset the buffer, otherwise we could end up dumping the * then reset the buffer, otherwise we could end up
* same buffer again. * dumping the same buffer again.
*/ */
if (dump_ok) if (dump_ok)
*(uint64 *) buf = 0; *(uint64 *) buf = 0;
@ -3368,6 +3429,7 @@ pg_stat_monitor_settings(PG_FUNCTION_ARGS)
case PGC_INT: case PGC_INT:
{ {
char value[32]; char value[32];
sprintf(value, "%d", conf->guc_variable); sprintf(value, "%d", conf->guc_variable);
values[j++] = CStringGetTextDatum(value); values[j++] = CStringGetTextDatum(value);
@ -3402,6 +3464,7 @@ pg_stat_monitor_settings(PG_FUNCTION_ARGS)
if (conf->type == PGC_ENUM) if (conf->type == PGC_ENUM)
{ {
size_t i; size_t i;
strcat(options, conf->guc_options[0]); strcat(options, conf->guc_options[0]);
for (i = 1; i < conf->n_options; ++i) for (i = 1; i < conf->n_options; ++i)
{ {
@ -3470,6 +3533,7 @@ pg_stat_monitor_hook_stats(PG_FUNCTION_ARGS)
Datum values[5]; Datum values[5];
bool nulls[5]; bool nulls[5];
int j = 0; int j = 0;
memset(values, 0, sizeof(values)); memset(values, 0, sizeof(values));
memset(nulls, 0, sizeof(nulls)); memset(nulls, 0, sizeof(nulls));
@ -3498,8 +3562,10 @@ static void
pgsm_emit_log_hook_benchmark(ErrorData *edata) pgsm_emit_log_hook_benchmark(ErrorData *edata)
{ {
double start_time = (double) clock(); double start_time = (double) clock();
pgsm_emit_log_hook(edata); pgsm_emit_log_hook(edata);
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC; double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
update_hook_stats(STATS_PGSM_EMIT_LOG_HOOK, elapsed); update_hook_stats(STATS_PGSM_EMIT_LOG_HOOK, elapsed);
} }
#endif #endif
@ -3559,8 +3625,10 @@ dump_queries_buffer(int bucket_id, unsigned char *buf, int buf_len)
} }
/* Loop until write buf_len bytes to the file. */ /* Loop until write buf_len bytes to the file. */
do { do
{
ssize_t nwrite = write(fd, buf + off, buf_len - off); ssize_t nwrite = write(fd, buf + off, buf_len - off);
if (nwrite == -1) if (nwrite == -1)
{ {
if (errno == EINTR && tries++ < 3) if (errno == EINTR && tries++ < 3)
@ -3613,11 +3681,14 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos)
{ {
off = 0; off = 0;
/* read a chunck of MAX_QUERY_BUF size. */ /* read a chunck of MAX_QUERY_BUF size. */
do { do
{
nread = read(fd, buf + off, MAX_QUERY_BUF - off); nread = read(fd, buf + off, MAX_QUERY_BUF - off);
if (nread == -1) if (nread == -1)
{ {
if (errno == EINTR && tries++ < 3) /* read() was interrupted, attempt to read again (max attempts=3) */ if (errno == EINTR && tries++ < 3) /* read() was interrupted,
* attempt to read again
* (max attempts=3) */
continue; continue;
goto exit; goto exit;
@ -3643,9 +3714,11 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos)
} }
} }
else else
/* /*
* Either done=true or file has a size not multiple of MAX_QUERY_BUF. * Either done=true or file has a size not multiple of
* It is safe to assume that the file was truncated or corrupted. * MAX_QUERY_BUF. It is safe to assume that the file was truncated
* or corrupted.
*/ */
break; break;
} }
@ -3676,6 +3749,7 @@ time_diff(struct timeval end, struct timeval start)
{ {
double mstart; double mstart;
double mend; double mend;
mend = ((double) end.tv_sec * 1000.0 + (double) end.tv_usec / 1000.0); mend = ((double) end.tv_sec * 1000.0 + (double) end.tv_usec / 1000.0);
mstart = ((double) start.tv_sec * 1000.0 + (double) start.tv_usec / 1000.0); mstart = ((double) start.tv_sec * 1000.0 + (double) start.tv_usec / 1000.0);
return mend - mstart; return mend - mstart;
@ -3719,6 +3793,7 @@ get_histogram_bucket(double q_time)
{ {
int64 b_start = (index == 1) ? 0 : exp(bucket_size * (index - 1)); int64 b_start = (index == 1) ? 0 : exp(bucket_size * (index - 1));
int64 b_end = exp(bucket_size * index); int64 b_end = exp(bucket_size * index);
if ((index == 1 && q_time < b_start) if ((index == 1 && q_time < b_start)
|| (q_time >= b_start && q_time <= b_end) || (q_time >= b_start && q_time <= b_end)
|| (index == b_count && q_time > b_end)) || (index == b_count && q_time > b_end))
@ -3750,6 +3825,7 @@ get_histogram_timings(PG_FUNCTION_ARGS)
{ {
int64 b_start = (index == 1) ? 0 : exp(bucket_size * (index - 1)); int64 b_start = (index == 1) ? 0 : exp(bucket_size * (index - 1));
int64 b_end = exp(bucket_size * index); int64 b_end = exp(bucket_size * index);
if (first) if (first)
{ {
snprintf(text_str, MAX_STRING_LEN, "(%ld - %ld)}", b_start, b_end); snprintf(text_str, MAX_STRING_LEN, "(%ld - %ld)}", b_start, b_end);
@ -3771,7 +3847,8 @@ extract_query_comments(const char *query, char *comments, size_t max_len)
int rc; int rc;
size_t nmatch = 1; size_t nmatch = 1;
regmatch_t pmatch; regmatch_t pmatch;
regoff_t comment_len, total_len = 0; regoff_t comment_len,
total_len = 0;
const char *s = query; const char *s = query;
while (total_len < max_len) while (total_len < max_len)
@ -3783,7 +3860,8 @@ extract_query_comments(const char *query, char *comments, size_t max_len)
comment_len = pmatch.rm_eo - pmatch.rm_so; comment_len = pmatch.rm_eo - pmatch.rm_so;
if (total_len + comment_len > max_len) if (total_len + comment_len > max_len)
break; /* TODO: log error in error view, insufficient space for comment. */ break; /* TODO: log error in error view, insufficient
* space for comment. */
total_len += comment_len; total_len += comment_len;
@ -3791,7 +3869,8 @@ extract_query_comments(const char *query, char *comments, size_t max_len)
if (s != query) if (s != query)
{ {
if (total_len + 2 > max_len) if (total_len + 2 > max_len)
break; /* TODO: log error in error view, insufficient space for ", " + comment. */ break; /* TODO: log error in error view, insufficient
* space for ", " + comment. */
memcpy(comments, ", ", 2); memcpy(comments, ", ", 2);
comments += 2; comments += 2;
@ -3825,17 +3904,20 @@ get_query_id(JumbleState *jstate, Query *query)
} }
#endif #endif
static uint64 djb2_hash(unsigned char *str, size_t len) static uint64
djb2_hash(unsigned char *str, size_t len)
{ {
uint64 hash = 5381LLU; uint64 hash = 5381LLU;
while (len--) while (len--)
hash = ((hash << 5) + hash) ^ *str++; // hash(i - 1) * 33 ^ str[i] hash = ((hash << 5) + hash) ^ *str++;
/* hash(i - 1) * 33 ^ str[i] */
return hash; return hash;
} }
static uint64 djb2_hash_str(unsigned char *str, int *out_len) static uint64
djb2_hash_str(unsigned char *str, int *out_len)
{ {
uint64 hash = 5381LLU; uint64 hash = 5381LLU;
unsigned char *start = str; unsigned char *start = str;
@ -3843,7 +3925,8 @@ static uint64 djb2_hash_str(unsigned char *str, int *out_len)
while ((c = *str) != '\0') while ((c = *str) != '\0')
{ {
hash = ((hash << 5) + hash) ^ c; // hash(i - 1) * 33 ^ str[i] hash = ((hash << 5) + hash) ^ c;
/* hash(i - 1) * 33 ^ str[i] */
++str; ++str;
} }
@ -3853,9 +3936,11 @@ static uint64 djb2_hash_str(unsigned char *str, int *out_len)
} }
#ifdef BENCHMARK #ifdef BENCHMARK
void init_hook_stats(void) void
init_hook_stats(void)
{ {
bool found = false; bool found = false;
pg_hook_stats = ShmemInitStruct("pg_stat_monitor_hook_stats", HOOK_STATS_SIZE, &found); pg_hook_stats = ShmemInitStruct("pg_stat_monitor_hook_stats", HOOK_STATS_SIZE, &found);
if (!found) if (!found)
{ {
@ -3878,11 +3963,13 @@ void init_hook_stats(void)
} }
} }
void update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed) void
update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed)
{ {
Assert(hook_id > STATS_START && hook_id < STATS_END); Assert(hook_id > STATS_START && hook_id < STATS_END);
struct pg_hook_stats_t *p = &pg_hook_stats[hook_id]; struct pg_hook_stats_t *p = &pg_hook_stats[hook_id];
if (time_elapsed < p->min_time) if (time_elapsed < p->min_time)
p->min_time = time_elapsed; p->min_time = time_elapsed;

View File

@ -104,7 +104,8 @@
#define MAX_ENUM_OPTIONS 6 #define MAX_ENUM_OPTIONS 6
typedef struct GucVariables typedef struct GucVariables
{ {
enum config_type type; /* PGC_BOOL, PGC_INT, PGC_REAL, PGC_STRING, PGC_ENUM */ enum config_type type; /* PGC_BOOL, PGC_INT, PGC_REAL, PGC_STRING,
* PGC_ENUM */
int guc_variable; int guc_variable;
char guc_name[TEXT_LEN]; char guc_name[TEXT_LEN];
char guc_desc[TEXT_LEN]; char guc_desc[TEXT_LEN];
@ -214,12 +215,15 @@ typedef struct pgssHashKey
typedef struct QueryInfo typedef struct QueryInfo
{ {
uint64 parentid; /* parent queryid of current query */ uint64 parentid; /* parent queryid of current query */
int64 type; /* type of query, options are query, info, warning, error, fatal */ int64 type; /* type of query, options are query, info,
* warning, error, fatal */
char application_name[APPLICATIONNAME_LEN]; char application_name[APPLICATIONNAME_LEN];
char comments[COMMENTS_LEN]; char comments[COMMENTS_LEN];
char relations[REL_LST][REL_LEN]; /* List of relation involved in the query */ char relations[REL_LST][REL_LEN]; /* List of relation involved
* in the query */
int num_relations; /* Number of relation in the query */ int num_relations; /* Number of relation in the query */
CmdType cmd_type; /* query command type SELECT/UPDATE/DELETE/INSERT */ CmdType cmd_type; /* query command type
* SELECT/UPDATE/DELETE/INSERT */
} QueryInfo; } QueryInfo;
typedef struct ErrorInfo typedef struct ErrorInfo
@ -281,7 +285,8 @@ typedef struct Counters
SysInfo sysinfo; SysInfo sysinfo;
ErrorInfo error; ErrorInfo error;
Wal_Usage walusage; Wal_Usage walusage;
int resp_calls[MAX_RESPONSE_BUCKET]; /* execution time's in msec */ int resp_calls[MAX_RESPONSE_BUCKET]; /* execution time's in
* msec */
uint64 state; /* query state */ uint64 state; /* query state */
} Counters; } Counters;
@ -312,16 +317,19 @@ typedef struct pgssSharedState
pg_atomic_uint64 current_wbucket; pg_atomic_uint64 current_wbucket;
pg_atomic_uint64 prev_bucket_sec; pg_atomic_uint64 prev_bucket_sec;
uint64 bucket_entry[MAX_BUCKETS]; uint64 bucket_entry[MAX_BUCKETS];
char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */ char bucket_start_time[MAX_BUCKETS][60]; /* start time of the
LWLock *errors_lock; /* protects errors hashtable search/modification */ * bucket */
LWLock *errors_lock; /* protects errors hashtable
* search/modification */
/* /*
* These variables are used when pgsm_overflow_target is ON. * These variables are used when pgsm_overflow_target is ON.
* *
* overflow is set to true when the query buffer overflows. * overflow is set to true when the query buffer overflows.
* *
* n_bucket_cycles counts the number of times we changed bucket * n_bucket_cycles counts the number of times we changed bucket since the
* since the query buffer overflowed. When it reaches pgsm_max_buckets * query buffer overflowed. When it reaches pgsm_max_buckets we remove the
* we remove the dump file, also reset the counter. * dump file, also reset the counter.
* *
* This allows us to avoid having a large file on disk that would also * This allows us to avoid having a large file on disk that would also
* slowdown queries to the pg_stat_monitor view. * slowdown queries to the pg_stat_monitor view.
@ -350,6 +358,7 @@ typedef struct LocationLen
int location; /* start offset in query text */ int location; /* start offset in query text */
int length; /* length in bytes, or -1 to ignore */ int length; /* length in bytes, or -1 to ignore */
} LocationLen; } LocationLen;
/* /*
* Working state for computing a query jumble and producing a normalized * Working state for computing a query jumble and producing a normalized
* query string * query string
@ -414,8 +423,10 @@ void set_qbuf(unsigned char *);
/* hash_query.c */ /* hash_query.c */
void pgss_startup(void); void pgss_startup(void);
/*---- GUC variables ----*/ /*---- GUC variables ----*/
typedef enum { typedef enum
{
PSGM_TRACK_NONE = 0, /* track no statements */ PSGM_TRACK_NONE = 0, /* track no statements */
PGSM_TRACK_TOP, /* only top level statements */ PGSM_TRACK_TOP, /* only top level statements */
PGSM_TRACK_ALL /* all statements, including nested ones */ PGSM_TRACK_ALL /* all statements, including nested ones */
@ -452,7 +463,8 @@ static const struct config_enum_entry track_options[] =
* STATS_START and STATS_END are used only to delimit the range. * STATS_START and STATS_END are used only to delimit the range.
* STATS_END is also the length of the valid items in the enum. * STATS_END is also the length of the valid items in the enum.
*/ */
enum pg_hook_stats_id { enum pg_hook_stats_id
{
STATS_START = -1, STATS_START = -1,
STATS_PGSS_POST_PARSE_ANALYZE, STATS_PGSS_POST_PARSE_ANALYZE,
STATS_PGSS_EXECUTORSTART, STATS_PGSS_EXECUTORSTART,
@ -469,7 +481,8 @@ enum pg_hook_stats_id {
}; };
/* Hold time to execute statistics for a hook. */ /* Hold time to execute statistics for a hook. */
struct pg_hook_stats_t { struct pg_hook_stats_t
{
char hook_name[64]; char hook_name[64];
double min_time; double min_time;
double max_time; double max_time;

View File

@ -44,7 +44,8 @@ PG_FUNCTION_INFO_V1(pg_stat_monitor_reset_errors);
static HTAB *pgsm_errors_ht = NULL; static HTAB *pgsm_errors_ht = NULL;
void psgm_errors_init(void) void
psgm_errors_init(void)
{ {
HASHCTL info; HASHCTL info;
#if PG_VERSION_NUM >= 140000 #if PG_VERSION_NUM >= 140000
@ -64,12 +65,14 @@ void psgm_errors_init(void)
flags); flags);
} }
size_t pgsm_errors_size(void) size_t
pgsm_errors_size(void)
{ {
return hash_estimate_size(PSGM_ERRORS_MAX, sizeof(ErrorEntry)); return hash_estimate_size(PSGM_ERRORS_MAX, sizeof(ErrorEntry));
} }
void pgsm_log(PgsmLogSeverity severity, const char *format, ...) void
pgsm_log(PgsmLogSeverity severity, const char *format,...)
{ {
char key[ERROR_MSG_MAX_LEN]; char key[ERROR_MSG_MAX_LEN];
ErrorEntry *entry; ErrorEntry *entry;
@ -94,6 +97,7 @@ void pgsm_log(PgsmLogSeverity severity, const char *format, ...)
if (!entry) if (!entry)
{ {
LWLockRelease(pgss->errors_lock); LWLockRelease(pgss->errors_lock);
/* /*
* We're out of memory, can't track this error message. * We're out of memory, can't track this error message.
*/ */
@ -204,6 +208,7 @@ pg_stat_monitor_errors(PG_FUNCTION_ARGS)
Datum values[4]; Datum values[4];
bool nulls[4]; bool nulls[4];
int i = 0; int i = 0;
memset(values, 0, sizeof(values)); memset(values, 0, sizeof(values));
memset(nulls, 0, sizeof(nulls)); memset(nulls, 0, sizeof(nulls));