PG-456: Running pgindent to make source PostgreSQL compatible. (#269)
PG-456: Running pgindent to make source indentation/spacing PostgreSQLCompatible. PostgreSQL uses pgindent from time to time to make source code PostgreSQL style guide compatible, it is a very long time since we have not done that. Commit fixes a lot of indentation and spacing issues. Co-authored-by: Hamid Akhtar <hamid.akhtar@gmail.com>pull/276/head
parent
926eade1eb
commit
a9187117f9
60
guc.c
60
guc.c
|
@ -34,9 +34,11 @@ static bool check_histogram_max(int *newval, void **extra, GucSource source);
|
|||
void
|
||||
init_guc(void)
|
||||
{
|
||||
int i = 0, j;
|
||||
int i = 0,
|
||||
j;
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_max",
|
||||
.guc_desc = "Sets the maximum size of shared memory in (MB) used for statement's metadata tracked by pg_stat_monitor.",
|
||||
.guc_default = 100,
|
||||
|
@ -48,7 +50,8 @@ init_guc(void)
|
|||
};
|
||||
DefineIntGUC(&conf[i++]);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_query_max_len",
|
||||
.guc_desc = "Sets the maximum length of query.",
|
||||
.guc_default = 2048,
|
||||
|
@ -60,7 +63,8 @@ init_guc(void)
|
|||
};
|
||||
DefineIntGUC(&conf[i++]);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_track_utility",
|
||||
.guc_desc = "Selects whether utility commands are tracked.",
|
||||
.guc_default = 1,
|
||||
|
@ -72,7 +76,8 @@ init_guc(void)
|
|||
};
|
||||
DefineBoolGUC(&conf[i++]);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_normalized_query",
|
||||
.guc_desc = "Selects whether save query in normalized format.",
|
||||
.guc_default = 0,
|
||||
|
@ -84,7 +89,8 @@ init_guc(void)
|
|||
};
|
||||
DefineBoolGUC(&conf[i++]);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_max_buckets",
|
||||
.guc_desc = "Sets the maximum number of buckets.",
|
||||
.guc_default = 10,
|
||||
|
@ -96,7 +102,8 @@ init_guc(void)
|
|||
};
|
||||
DefineIntGUC(&conf[i++]);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_bucket_time",
|
||||
.guc_desc = "Sets the time in seconds per bucket.",
|
||||
.guc_default = 60,
|
||||
|
@ -108,7 +115,8 @@ init_guc(void)
|
|||
};
|
||||
DefineIntGUC(&conf[i++]);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_histogram_min",
|
||||
.guc_desc = "Sets the time in millisecond.",
|
||||
.guc_default = 0,
|
||||
|
@ -120,7 +128,8 @@ init_guc(void)
|
|||
};
|
||||
DefineIntGUCWithCheck(&conf[i++], check_histogram_min);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_histogram_max",
|
||||
.guc_desc = "Sets the time in millisecond.",
|
||||
.guc_default = 100000,
|
||||
|
@ -132,7 +141,8 @@ init_guc(void)
|
|||
};
|
||||
DefineIntGUCWithCheck(&conf[i++], check_histogram_max);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_histogram_buckets",
|
||||
.guc_desc = "Sets the maximum number of histogram buckets",
|
||||
.guc_default = 10,
|
||||
|
@ -144,7 +154,8 @@ init_guc(void)
|
|||
};
|
||||
DefineIntGUC(&conf[i++]);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_query_shared_buffer",
|
||||
.guc_desc = "Sets the maximum size of shared memory in (MB) used for query tracked by pg_stat_monitor.",
|
||||
.guc_default = 20,
|
||||
|
@ -156,7 +167,8 @@ init_guc(void)
|
|||
};
|
||||
DefineIntGUC(&conf[i++]);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_overflow_target",
|
||||
.guc_desc = "Sets the overflow target for pg_stat_monitor",
|
||||
.guc_default = 1,
|
||||
|
@ -168,7 +180,8 @@ init_guc(void)
|
|||
};
|
||||
DefineIntGUC(&conf[i++]);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_enable_query_plan",
|
||||
.guc_desc = "Enable/Disable query plan monitoring",
|
||||
.guc_default = 0,
|
||||
|
@ -180,7 +193,8 @@ init_guc(void)
|
|||
};
|
||||
DefineBoolGUC(&conf[i++]);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_track",
|
||||
.guc_desc = "Selects which statements are tracked by pg_stat_monitor.",
|
||||
.n_options = 3,
|
||||
|
@ -191,12 +205,14 @@ init_guc(void)
|
|||
.guc_unit = 0,
|
||||
.guc_value = &PGSM_TRACK
|
||||
};
|
||||
for (j = 0; j < conf[i].n_options; ++j) {
|
||||
for (j = 0; j < conf[i].n_options; ++j)
|
||||
{
|
||||
strlcpy(conf[i].guc_options[j], track_options[j].name, sizeof(conf[i].guc_options[j]));
|
||||
}
|
||||
DefineEnumGUC(&conf[i++], track_options);
|
||||
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_extract_comments",
|
||||
.guc_desc = "Enable/Disable extracting comments from queries.",
|
||||
.guc_default = 0,
|
||||
|
@ -209,7 +225,8 @@ init_guc(void)
|
|||
DefineBoolGUC(&conf[i++]);
|
||||
|
||||
#if PG_VERSION_NUM >= 130000
|
||||
conf[i] = (GucVariable) {
|
||||
conf[i] = (GucVariable)
|
||||
{
|
||||
.guc_name = "pg_stat_monitor.pgsm_track_planning",
|
||||
.guc_desc = "Selects whether planning statistics are tracked.",
|
||||
.guc_default = 0,
|
||||
|
@ -223,7 +240,8 @@ init_guc(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void DefineIntGUCWithCheck(GucVariable *conf, GucIntCheckHook check)
|
||||
static void
|
||||
DefineIntGUCWithCheck(GucVariable * conf, GucIntCheckHook check)
|
||||
{
|
||||
conf->type = PGC_INT;
|
||||
DefineCustomIntVariable(conf->guc_name,
|
||||
|
@ -285,7 +303,8 @@ get_conf(int i)
|
|||
return &conf[i];
|
||||
}
|
||||
|
||||
static bool check_histogram_min(int *newval, void **extra, GucSource source)
|
||||
static bool
|
||||
check_histogram_min(int *newval, void **extra, GucSource source)
|
||||
{
|
||||
/*
|
||||
* During module initialization PGSM_HISTOGRAM_MIN is initialized before
|
||||
|
@ -294,7 +313,8 @@ static bool check_histogram_min(int *newval, void **extra, GucSource source)
|
|||
return (PGSM_HISTOGRAM_MAX == 0 || *newval < PGSM_HISTOGRAM_MAX);
|
||||
}
|
||||
|
||||
static bool check_histogram_max(int *newval, void **extra, GucSource source)
|
||||
static bool
|
||||
check_histogram_max(int *newval, void **extra, GucSource source)
|
||||
{
|
||||
return (*newval > PGSM_HISTOGRAM_MIN);
|
||||
}
|
||||
|
|
61
hash_query.c
61
hash_query.c
|
@ -29,6 +29,7 @@ static HTAB*
|
|||
hash_init(const char *hash_name, int key_size, int entry_size, int hash_size)
|
||||
{
|
||||
HASHCTL info;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.keysize = key_size;
|
||||
info.entrysize = entry_size;
|
||||
|
@ -185,15 +186,19 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
|
|||
while ((entry = hash_seq_search(&hash_seq)) != NULL)
|
||||
{
|
||||
/*
|
||||
* Remove all entries if new_bucket_id == -1.
|
||||
* Otherwise remove entry in new_bucket_id if it has finished already.
|
||||
* Remove all entries if new_bucket_id == -1. Otherwise remove entry
|
||||
* in new_bucket_id if it has finished already.
|
||||
*/
|
||||
if (new_bucket_id < 0 ||
|
||||
(entry->key.bucket_id == new_bucket_id &&
|
||||
(entry->counters.state == PGSS_FINISHED || entry->counters.state == PGSS_ERROR)))
|
||||
{
|
||||
if (new_bucket_id == -1) {
|
||||
/* pg_stat_monitor_reset(), remove entry from query hash table too. */
|
||||
if (new_bucket_id == -1)
|
||||
{
|
||||
/*
|
||||
* pg_stat_monitor_reset(), remove entry from query hash table
|
||||
* too.
|
||||
*/
|
||||
hash_search(pgss_query_hash, &(entry->key.queryid), HASH_REMOVE, NULL);
|
||||
}
|
||||
|
||||
|
@ -201,11 +206,10 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
|
|||
}
|
||||
|
||||
/*
|
||||
* If we detect a pending query residing in the previous bucket id,
|
||||
* we add it to a list of pending elements to be moved to the new
|
||||
* bucket id.
|
||||
* Can't update the hash table while iterating it inside this loop,
|
||||
* as this may introduce all sort of problems.
|
||||
* If we detect a pending query residing in the previous bucket id, we
|
||||
* add it to a list of pending elements to be moved to the new bucket
|
||||
* id. Can't update the hash table while iterating it inside this
|
||||
* loop, as this may introduce all sort of problems.
|
||||
*/
|
||||
if (old_bucket_id != -1 && entry->key.bucket_id == old_bucket_id)
|
||||
{
|
||||
|
@ -214,18 +218,22 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
|
|||
entry->counters.state == PGSS_EXEC)
|
||||
{
|
||||
pgssEntry *bkp_entry = malloc(sizeof(pgssEntry));
|
||||
|
||||
if (!bkp_entry)
|
||||
{
|
||||
elog(DEBUG1, "hash_entry_dealloc: out of memory");
|
||||
|
||||
/*
|
||||
* No memory, If the entry has calls > 1 then we change the state to finished,
|
||||
* as the pending query will likely finish execution during the new bucket
|
||||
* time window. The pending query will vanish in this case, can't list it
|
||||
* No memory, If the entry has calls > 1 then we change
|
||||
* the state to finished, as the pending query will likely
|
||||
* finish execution during the new bucket time window. The
|
||||
* pending query will vanish in this case, can't list it
|
||||
* until it completes.
|
||||
*
|
||||
* If there is only one call to the query and it's pending, remove the
|
||||
* entry from the previous bucket and allow it to finish in the new bucket,
|
||||
* in order to avoid the query living in the old bucket forever.
|
||||
* If there is only one call to the query and it's
|
||||
* pending, remove the entry from the previous bucket and
|
||||
* allow it to finish in the new bucket, in order to avoid
|
||||
* the query living in the old bucket forever.
|
||||
*/
|
||||
if (entry->counters.calls.calls > 1)
|
||||
entry->counters.state = PGSS_FINISHED;
|
||||
|
@ -244,14 +252,16 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
|
|||
pending_entries = lappend(pending_entries, bkp_entry);
|
||||
|
||||
/*
|
||||
* If the entry has calls > 1 then we change the state to finished in
|
||||
* the previous bucket, as the pending query will likely finish execution
|
||||
* during the new bucket time window. Can't remove it from the previous bucket
|
||||
* as it may have many calls and we would lose the query statistics.
|
||||
* If the entry has calls > 1 then we change the state to
|
||||
* finished in the previous bucket, as the pending query will
|
||||
* likely finish execution during the new bucket time window.
|
||||
* Can't remove it from the previous bucket as it may have
|
||||
* many calls and we would lose the query statistics.
|
||||
*
|
||||
* If there is only one call to the query and it's pending, remove the entry
|
||||
* from the previous bucket and allow it to finish in the new bucket,
|
||||
* in order to avoid the query living in the old bucket forever.
|
||||
* If there is only one call to the query and it's pending,
|
||||
* remove the entry from the previous bucket and allow it to
|
||||
* finish in the new bucket, in order to avoid the query
|
||||
* living in the old bucket forever.
|
||||
*/
|
||||
if (entry->counters.calls.calls > 1)
|
||||
entry->counters.state = PGSS_FINISHED;
|
||||
|
@ -262,10 +272,11 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
|
|||
}
|
||||
|
||||
/*
|
||||
* Iterate over the list of pending queries in order
|
||||
* to add them back to the hash table with the updated bucket id.
|
||||
* Iterate over the list of pending queries in order to add them back to
|
||||
* the hash table with the updated bucket id.
|
||||
*/
|
||||
foreach (pending_entry, pending_entries) {
|
||||
foreach(pending_entry, pending_entries)
|
||||
{
|
||||
bool found = false;
|
||||
pgssEntry *new_entry;
|
||||
pgssEntry *old_entry = (pgssEntry *) lfirst(pending_entry);
|
||||
|
|
|
@ -73,6 +73,7 @@ static int num_relations; /* Number of relation in the query */
|
|||
static bool system_init = false;
|
||||
static struct rusage rusage_start;
|
||||
static struct rusage rusage_end;
|
||||
|
||||
/* Query buffer, store queries' text. */
|
||||
static unsigned char *pgss_qbuf = NULL;
|
||||
static char *pgss_explain(QueryDesc *queryDesc);
|
||||
|
@ -99,6 +100,7 @@ static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
|
|||
static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
|
||||
static ProcessUtility_hook_type prev_ProcessUtility = NULL;
|
||||
static emit_log_hook_type prev_emit_log_hook = NULL;
|
||||
|
||||
DECLARE_HOOK(void pgsm_emit_log_hook, ErrorData *edata);
|
||||
static shmem_startup_hook_type prev_shmem_startup_hook = NULL;
|
||||
static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook = NULL;
|
||||
|
@ -145,6 +147,7 @@ DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryStri
|
|||
static uint64 pgss_hash_string(const char *str, int len);
|
||||
#else
|
||||
static void BufferUsageAccumDiff(BufferUsage *bufusage, BufferUsage *pgBufferUsage, BufferUsage *bufusage_start);
|
||||
|
||||
DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString,
|
||||
ProcessUtilityContext context, ParamListInfo params,
|
||||
QueryEnvironment *queryEnv,
|
||||
|
@ -184,12 +187,12 @@ static void JumbleQuery(JumbleState *jstate, Query *query);
|
|||
static void JumbleRangeTable(JumbleState *jstate, List *rtable, CmdType cmd_type);
|
||||
static void JumbleExpr(JumbleState *jstate, Node *node);
|
||||
static void RecordConstLocation(JumbleState *jstate, int location);
|
||||
|
||||
/*
|
||||
* Given a possibly multi-statement source string, confine our attention to the
|
||||
* relevant part of the string.
|
||||
*/
|
||||
static const char *
|
||||
CleanQuerytext(const char *query, int *location, int *len);
|
||||
static const char *CleanQuerytext(const char *query, int *location, int *len);
|
||||
#endif
|
||||
|
||||
static char *generate_normalized_query(JumbleState *jstate, const char *query,
|
||||
|
@ -205,12 +208,14 @@ static uint64 get_query_id(JumbleState *jstate, Query *query);
|
|||
|
||||
/* Daniel J. Bernstein's hash algorithm: see http://www.cse.yorku.ca/~oz/hash.html */
|
||||
static uint64 djb2_hash(unsigned char *str, size_t len);
|
||||
|
||||
/* Same as above, but stores the calculated string length into *out_len (small optimization) */
|
||||
static uint64 djb2_hash_str(unsigned char *str, int *out_len);
|
||||
|
||||
/*
|
||||
* Module load callback
|
||||
*/
|
||||
// cppcheck-suppress unusedFunction
|
||||
/* cppcheck-suppress unusedFunction */
|
||||
void
|
||||
_PG_init(void)
|
||||
{
|
||||
|
@ -218,6 +223,7 @@ _PG_init(void)
|
|||
char file_name[1024];
|
||||
|
||||
elog(DEBUG2, "pg_stat_monitor: %s()", __FUNCTION__);
|
||||
|
||||
/*
|
||||
* In order to create our shared memory area, we have to be loaded via
|
||||
* shared_preload_libraries. If not, fall out without hooking into any of
|
||||
|
@ -233,6 +239,7 @@ _PG_init(void)
|
|||
init_guc();
|
||||
|
||||
#if PG_VERSION_NUM >= 140000
|
||||
|
||||
/*
|
||||
* Inform the postmaster that we want to enable query_id calculation if
|
||||
* compute_query_id is set to auto.
|
||||
|
@ -296,7 +303,7 @@ _PG_init(void)
|
|||
/*
|
||||
* Module unload callback
|
||||
*/
|
||||
// cppcheck-suppress unusedFunction
|
||||
/* cppcheck-suppress unusedFunction */
|
||||
void
|
||||
_PG_fini(void)
|
||||
{
|
||||
|
@ -346,8 +353,10 @@ static void
|
|||
pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query, JumbleState *jstate)
|
||||
{
|
||||
double start_time = (double) clock();
|
||||
|
||||
pgss_post_parse_analyze(pstate, query, jstate);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_POST_PARSE_ANALYZE, elapsed);
|
||||
}
|
||||
#endif
|
||||
|
@ -409,8 +418,10 @@ static void
|
|||
pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query)
|
||||
{
|
||||
double start_time = (double) clock();
|
||||
|
||||
pgss_post_parse_analyze(pstate, query);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_POST_PARSE_ANALYZE, elapsed);
|
||||
}
|
||||
#endif
|
||||
|
@ -477,8 +488,10 @@ static void
|
|||
pgss_ExecutorStart_benchmark(QueryDesc *queryDesc, int eflags)
|
||||
{
|
||||
double start_time = (double) clock();
|
||||
|
||||
pgss_ExecutorStart(queryDesc, eflags);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_EXECUTORSTART, elapsed);
|
||||
}
|
||||
#endif
|
||||
|
@ -544,8 +557,10 @@ pgss_ExecutorRun_benchmark(QueryDesc *queryDesc, ScanDirection direction, uint64
|
|||
bool execute_once)
|
||||
{
|
||||
double start_time = (double) clock();
|
||||
|
||||
pgss_ExecutorRun(queryDesc, direction, count, execute_once);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_EXECUTORUN, elapsed);
|
||||
}
|
||||
#endif
|
||||
|
@ -585,8 +600,10 @@ static void
|
|||
pgss_ExecutorFinish_benchmark(QueryDesc *queryDesc)
|
||||
{
|
||||
double start_time = (double) clock();
|
||||
|
||||
pgss_ExecutorFinish(queryDesc);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_EXECUTORFINISH, elapsed);
|
||||
}
|
||||
#endif
|
||||
|
@ -639,8 +656,10 @@ static void
|
|||
pgss_ExecutorEnd_benchmark(QueryDesc *queryDesc)
|
||||
{
|
||||
double start_time = (double) clock();
|
||||
|
||||
pgss_ExecutorEnd(queryDesc);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_EXECUTOREND, elapsed);
|
||||
}
|
||||
#endif
|
||||
|
@ -660,6 +679,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc)
|
|||
if (queryDesc->operation == CMD_SELECT && PGSM_QUERY_PLAN)
|
||||
{
|
||||
MemoryContext mct = MemoryContextSwitchTo(TopMemoryContext);
|
||||
|
||||
plan_info.plan_len = snprintf(plan_info.plan_text, PLAN_TEXT_LEN, "%s", pgss_explain(queryDesc));
|
||||
plan_info.planid = DatumGetUInt64(hash_any_extended((const unsigned char *) plan_info.plan_text, plan_info.plan_len, 0));
|
||||
plan_ptr = &plan_info;
|
||||
|
@ -711,8 +731,10 @@ pgss_ExecutorCheckPerms_benchmark(List *rt, bool abort)
|
|||
{
|
||||
bool ret;
|
||||
double start_time = (double) clock();
|
||||
|
||||
ret = pgss_ExecutorCheckPerms(rt, abort);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_EXECUTORCHECKPERMS, elapsed);
|
||||
return ret;
|
||||
}
|
||||
|
@ -731,12 +753,14 @@ pgss_ExecutorCheckPerms(List *rt, bool abort)
|
|||
foreach(lr, rt)
|
||||
{
|
||||
RangeTblEntry *rte = lfirst(lr);
|
||||
|
||||
if (rte->rtekind != RTE_RELATION)
|
||||
continue;
|
||||
|
||||
if (i < REL_LST)
|
||||
{
|
||||
bool found = false;
|
||||
|
||||
for (j = 0; j < i; j++)
|
||||
{
|
||||
if (list_oid[j] == rte->relid)
|
||||
|
@ -747,6 +771,7 @@ pgss_ExecutorCheckPerms(List *rt, bool abort)
|
|||
{
|
||||
char *namespace_name;
|
||||
char *relation_name;
|
||||
|
||||
list_oid[j] = rte->relid;
|
||||
namespace_name = get_namespace_name(get_rel_namespace(rte->relid));
|
||||
relation_name = get_rel_name(rte->relid);
|
||||
|
@ -772,8 +797,10 @@ pgss_planner_hook_benchmark(Query *parse, const char *query_string, int cursorOp
|
|||
{
|
||||
PlannedStmt *ret;
|
||||
double start_time = (double) clock();
|
||||
|
||||
ret = pgss_planner_hook(parse, query_string, cursorOptions, boundParams);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_PLANNER_HOOK, elapsed);
|
||||
return ret;
|
||||
}
|
||||
|
@ -819,11 +846,11 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par
|
|||
PG_TRY();
|
||||
{
|
||||
/*
|
||||
* If there is a previous installed hook, then assume it's going to call
|
||||
* standard_planner() function, otherwise we call the function here.
|
||||
* This is to avoid calling standard_planner() function twice, since it
|
||||
* modifies the first argument (Query *), the second call would trigger an
|
||||
* assertion failure.
|
||||
* If there is a previous installed hook, then assume it's going
|
||||
* to call standard_planner() function, otherwise we call the
|
||||
* function here. This is to avoid calling standard_planner()
|
||||
* function twice, since it modifies the first argument (Query *),
|
||||
* the second call would trigger an assertion failure.
|
||||
*/
|
||||
if (planner_hook_next)
|
||||
result = planner_hook_next(parse, query_string, cursorOptions, boundParams);
|
||||
|
@ -864,11 +891,11 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par
|
|||
else
|
||||
{
|
||||
/*
|
||||
* If there is a previous installed hook, then assume it's going to call
|
||||
* standard_planner() function, otherwise we call the function here.
|
||||
* This is to avoid calling standard_planner() function twice, since it
|
||||
* modifies the first argument (Query *), the second call would trigger an
|
||||
* assertion failure.
|
||||
* If there is a previous installed hook, then assume it's going to
|
||||
* call standard_planner() function, otherwise we call the function
|
||||
* here. This is to avoid calling standard_planner() function twice,
|
||||
* since it modifies the first argument (Query *), the second call
|
||||
* would trigger an assertion failure.
|
||||
*/
|
||||
if (planner_hook_next)
|
||||
result = planner_hook_next(parse, query_string, cursorOptions, boundParams);
|
||||
|
@ -894,12 +921,15 @@ pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString,
|
|||
QueryCompletion *qc)
|
||||
{
|
||||
double start_time = (double) clock();
|
||||
|
||||
pgss_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed);
|
||||
}
|
||||
#endif
|
||||
static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
static void
|
||||
pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
bool readOnlyTree,
|
||||
ProcessUtilityContext context,
|
||||
ParamListInfo params, QueryEnvironment *queryEnv,
|
||||
|
@ -916,12 +946,15 @@ pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString,
|
|||
QueryCompletion *qc)
|
||||
{
|
||||
double start_time = (double) clock();
|
||||
|
||||
pgss_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, qc);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed);
|
||||
}
|
||||
#endif
|
||||
static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
static void
|
||||
pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
ProcessUtilityContext context,
|
||||
ParamListInfo params, QueryEnvironment *queryEnv,
|
||||
DestReceiver *dest,
|
||||
|
@ -937,12 +970,15 @@ pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString,
|
|||
char *completionTag)
|
||||
{
|
||||
double start_time = (double) clock();
|
||||
|
||||
pgss_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed);
|
||||
}
|
||||
#endif
|
||||
static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
static void
|
||||
pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
ProcessUtilityContext context, ParamListInfo params,
|
||||
QueryEnvironment *queryEnv,
|
||||
DestReceiver *dest,
|
||||
|
@ -1262,6 +1298,7 @@ pgss_update_entry(pgssEntry *entry,
|
|||
/* volatile block */
|
||||
{
|
||||
volatile pgssEntry *e = (volatile pgssEntry *) entry;
|
||||
|
||||
SpinLockAcquire(&e->mutex);
|
||||
/* Start collecting data for next bucket and reset all counters */
|
||||
if (reset)
|
||||
|
@ -1290,8 +1327,10 @@ pgss_update_entry(pgssEntry *entry,
|
|||
e->counters.plantime.sum_var_time += (total_time - old_mean) * (total_time - e->counters.plantime.mean_time);
|
||||
|
||||
/* calculate min and max time */
|
||||
if (e->counters.plantime.min_time > total_time) e->counters.plantime.min_time = total_time;
|
||||
if (e->counters.plantime.max_time < total_time) e->counters.plantime.max_time = total_time;
|
||||
if (e->counters.plantime.min_time > total_time)
|
||||
e->counters.plantime.min_time = total_time;
|
||||
if (e->counters.plantime.max_time < total_time)
|
||||
e->counters.plantime.max_time = total_time;
|
||||
}
|
||||
else if (kind == PGSS_FINISHED)
|
||||
{
|
||||
|
@ -1313,8 +1352,10 @@ pgss_update_entry(pgssEntry *entry,
|
|||
e->counters.time.sum_var_time += (total_time - old_mean) * (total_time - e->counters.time.mean_time);
|
||||
|
||||
/* calculate min and max time */
|
||||
if (e->counters.time.min_time > total_time) e->counters.time.min_time = total_time;
|
||||
if (e->counters.time.max_time < total_time) e->counters.time.max_time = total_time;
|
||||
if (e->counters.time.min_time > total_time)
|
||||
e->counters.time.min_time = total_time;
|
||||
if (e->counters.time.max_time < total_time)
|
||||
e->counters.time.max_time = total_time;
|
||||
|
||||
index = get_histogram_bucket(total_time);
|
||||
e->counters.resp_calls[index]++;
|
||||
|
@ -1456,6 +1497,7 @@ pgss_store(uint64 queryid,
|
|||
return;
|
||||
|
||||
#if PG_VERSION_NUM >= 140000
|
||||
|
||||
/*
|
||||
* Nothing to do if compute_query_id isn't enabled and no other module
|
||||
* computed a query identifier.
|
||||
|
@ -1467,12 +1509,14 @@ pgss_store(uint64 queryid,
|
|||
query = CleanQuerytext(query, &query_location, &query_len);
|
||||
|
||||
#if PG_VERSION_NUM < 140000
|
||||
|
||||
/*
|
||||
* For utility statements, we just hash the query string to get an ID.
|
||||
*/
|
||||
if (queryid == UINT64CONST(0))
|
||||
{
|
||||
queryid = pgss_hash_string(query, query_len);
|
||||
|
||||
/*
|
||||
* If we are unlucky enough to get a hash of zero(invalid), use
|
||||
* queryID as 2 instead, queryID 1 is already in use for normal
|
||||
|
@ -1487,6 +1531,7 @@ pgss_store(uint64 queryid,
|
|||
if (kind == PGSS_ERROR)
|
||||
{
|
||||
int sec_ctx;
|
||||
|
||||
GetUserIdAndSecContext((Oid *) &userid, &sec_ctx);
|
||||
}
|
||||
else
|
||||
|
@ -1599,10 +1644,11 @@ pgss_store(uint64 queryid,
|
|||
elog(DEBUG1, "pgss_store: insufficient shared space for query.");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save current query buffer length, if we fail to add a new
|
||||
* new entry to the hash table then we must restore the
|
||||
* original length.
|
||||
* Save current query buffer length, if we fail to add a new new
|
||||
* entry to the hash table then we must restore the original
|
||||
* length.
|
||||
*/
|
||||
memcpy(&prev_qbuf_len, pgss_qbuf, sizeof(prev_qbuf_len));
|
||||
}
|
||||
|
@ -1647,6 +1693,7 @@ pgss_store(uint64 queryid,
|
|||
if (norm_query)
|
||||
pfree(norm_query);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset all statement statistics.
|
||||
*/
|
||||
|
@ -1654,6 +1701,7 @@ Datum
|
|||
pg_stat_monitor_reset(PG_FUNCTION_ARGS)
|
||||
{
|
||||
pgssSharedState *pgss = pgsm_get_ss();
|
||||
|
||||
/* Safety check... */
|
||||
if (!IsSystemInitialized())
|
||||
ereport(ERROR,
|
||||
|
@ -1668,6 +1716,7 @@ pg_stat_monitor_reset(PG_FUNCTION_ARGS)
|
|||
#ifdef BENCHMARK
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = STATS_START; i < STATS_END; ++i)
|
||||
{
|
||||
pg_hook_stats[i].min_time = 0;
|
||||
|
@ -1692,7 +1741,8 @@ static bool
|
|||
IsBucketValid(uint64 bucketid)
|
||||
{
|
||||
struct tm tm;
|
||||
time_t bucket_t,current_t;
|
||||
time_t bucket_t,
|
||||
current_t;
|
||||
double diff_t;
|
||||
pgssSharedState *pgss = pgsm_get_ss();
|
||||
|
||||
|
@ -1789,6 +1839,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo,
|
|||
if (read_query(pgss_qbuf, queryid, query_txt, entry->query_pos) == 0)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = read_query_buffer(bucketid, queryid, query_txt, entry->query_pos);
|
||||
if (rc != 1)
|
||||
snprintf(query_txt, 32, "%s", "<insufficient disk/shared space>");
|
||||
|
@ -1797,12 +1848,16 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo,
|
|||
/* copy counters to a local variable to keep locking time short */
|
||||
{
|
||||
volatile pgssEntry *e = (volatile pgssEntry *) entry;
|
||||
|
||||
SpinLockAcquire(&e->mutex);
|
||||
tmp = e->counters;
|
||||
SpinLockRelease(&e->mutex);
|
||||
}
|
||||
|
||||
/* In case that query plan is enabled, there is no need to show 0 planid query */
|
||||
/*
|
||||
* In case that query plan is enabled, there is no need to show 0
|
||||
* planid query
|
||||
*/
|
||||
if (tmp.info.cmd_type == CMD_SELECT && PGSM_QUERY_PLAN && planid == 0)
|
||||
continue;
|
||||
|
||||
|
@ -1821,6 +1876,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo,
|
|||
if (read_query(pgss_qbuf, tmp.info.parentid, parent_query_txt, 0) == 0)
|
||||
{
|
||||
int rc = read_query_buffer(bucketid, tmp.info.parentid, parent_query_txt, 0);
|
||||
|
||||
if (rc != 1)
|
||||
snprintf(parent_query_txt, 32, "%s", "<insufficient disk/shared space>");
|
||||
}
|
||||
|
@ -1835,9 +1891,8 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo,
|
|||
values[i++] = ObjectIdGetDatum(dbid);
|
||||
|
||||
/*
|
||||
* ip address at column number 3,
|
||||
* Superusers or members of pg_read_all_stats members
|
||||
* are allowed
|
||||
* ip address at column number 3, Superusers or members of
|
||||
* pg_read_all_stats members are allowed
|
||||
*/
|
||||
if (is_allowed_role || userid == GetUserId())
|
||||
values[i++] = Int64GetDatumFast(ip);
|
||||
|
@ -1921,7 +1976,10 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo,
|
|||
char *tmp_str = palloc0(1024);
|
||||
bool first = true;
|
||||
|
||||
/* Need to calculate the actual size, and avoid unnessary memory usage */
|
||||
/*
|
||||
* Need to calculate the actual size, and avoid unnessary memory
|
||||
* usage
|
||||
*/
|
||||
for (j = 0; j < tmp.info.num_relations; j++)
|
||||
{
|
||||
if (first)
|
||||
|
@ -2101,17 +2159,17 @@ get_next_wbucket(pgssSharedState *pgss)
|
|||
/*
|
||||
* If current bucket expired we loop attempting to update prev_bucket_sec.
|
||||
*
|
||||
* pg_atomic_compare_exchange_u64 may fail in two possible ways:
|
||||
* 1. Another thread/process updated the variable before us.
|
||||
* 2. A spurious failure / hardware event.
|
||||
* pg_atomic_compare_exchange_u64 may fail in two possible ways: 1.
|
||||
* Another thread/process updated the variable before us. 2. A spurious
|
||||
* failure / hardware event.
|
||||
*
|
||||
* In both failure cases we read prev_bucket_sec from memory again, if it was
|
||||
* a spurious failure then the value of prev_bucket_sec must be the same as
|
||||
* before, which will cause the while loop to execute again.
|
||||
* In both failure cases we read prev_bucket_sec from memory again, if it
|
||||
* was a spurious failure then the value of prev_bucket_sec must be the
|
||||
* same as before, which will cause the while loop to execute again.
|
||||
*
|
||||
* If another thread updated prev_bucket_sec, then its current value will
|
||||
* definitely make the while condition to fail, we can stop the loop as another
|
||||
* thread has already updated prev_bucket_sec.
|
||||
* definitely make the while condition to fail, we can stop the loop as
|
||||
* another thread has already updated prev_bucket_sec.
|
||||
*/
|
||||
if ((current_sec - current_bucket_sec) < (uint64)PGSM_BUCKET_TIME)
|
||||
{
|
||||
|
@ -2143,9 +2201,9 @@ get_next_wbucket(pgssSharedState *pgss)
|
|||
if (pgss->n_bucket_cycles >= PGSM_MAX_BUCKETS)
|
||||
{
|
||||
/*
|
||||
* A full rotation of PGSM_MAX_BUCKETS buckets happened since
|
||||
* we detected a query buffer overflow.
|
||||
* Reset overflow state and remove the dump file.
|
||||
* A full rotation of PGSM_MAX_BUCKETS buckets happened since we
|
||||
* detected a query buffer overflow. Reset overflow state and
|
||||
* remove the dump file.
|
||||
*/
|
||||
pgss->overflow = false;
|
||||
pgss->n_bucket_cycles = 0;
|
||||
|
@ -2801,6 +2859,7 @@ JumbleExpr(JumbleState *jstate, Node *node)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Record location of constant within query string of query tree
|
||||
* that is currently being walked.
|
||||
|
@ -3242,8 +3301,9 @@ SaveQueryText(uint64 bucketid,
|
|||
}
|
||||
|
||||
/*
|
||||
* If the query buffer is empty, there is nothing to dump, this also
|
||||
* means that the current query length exceeds MAX_QUERY_BUF.
|
||||
* If the query buffer is empty, there is nothing to dump,
|
||||
* this also means that the current query length exceeds
|
||||
* MAX_QUERY_BUF.
|
||||
*/
|
||||
if (buf_len <= sizeof(uint64))
|
||||
return false;
|
||||
|
@ -3258,15 +3318,16 @@ SaveQueryText(uint64 bucketid,
|
|||
}
|
||||
|
||||
/*
|
||||
* We must check for overflow again, as the query length may
|
||||
* exceed the total size allocated to the buffer (MAX_QUERY_BUF).
|
||||
* We must check for overflow again, as the query length
|
||||
* may exceed the total size allocated to the buffer
|
||||
* (MAX_QUERY_BUF).
|
||||
*/
|
||||
if (QUERY_BUFFER_OVERFLOW(buf_len, query_len))
|
||||
{
|
||||
/*
|
||||
* If we successfully dumped the query buffer to disk, then
|
||||
* reset the buffer, otherwise we could end up dumping the
|
||||
* same buffer again.
|
||||
* If we successfully dumped the query buffer to disk,
|
||||
* then reset the buffer, otherwise we could end up
|
||||
* dumping the same buffer again.
|
||||
*/
|
||||
if (dump_ok)
|
||||
*(uint64 *) buf = 0;
|
||||
|
@ -3368,6 +3429,7 @@ pg_stat_monitor_settings(PG_FUNCTION_ARGS)
|
|||
case PGC_INT:
|
||||
{
|
||||
char value[32];
|
||||
|
||||
sprintf(value, "%d", conf->guc_variable);
|
||||
values[j++] = CStringGetTextDatum(value);
|
||||
|
||||
|
@ -3402,6 +3464,7 @@ pg_stat_monitor_settings(PG_FUNCTION_ARGS)
|
|||
if (conf->type == PGC_ENUM)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
strcat(options, conf->guc_options[0]);
|
||||
for (i = 1; i < conf->n_options; ++i)
|
||||
{
|
||||
|
@ -3470,6 +3533,7 @@ pg_stat_monitor_hook_stats(PG_FUNCTION_ARGS)
|
|||
Datum values[5];
|
||||
bool nulls[5];
|
||||
int j = 0;
|
||||
|
||||
memset(values, 0, sizeof(values));
|
||||
memset(nulls, 0, sizeof(nulls));
|
||||
|
||||
|
@ -3498,8 +3562,10 @@ static void
|
|||
pgsm_emit_log_hook_benchmark(ErrorData *edata)
|
||||
{
|
||||
double start_time = (double) clock();
|
||||
|
||||
pgsm_emit_log_hook(edata);
|
||||
double elapsed = ((double) clock() - start_time) / CLOCKS_PER_SEC;
|
||||
|
||||
update_hook_stats(STATS_PGSM_EMIT_LOG_HOOK, elapsed);
|
||||
}
|
||||
#endif
|
||||
|
@ -3559,8 +3625,10 @@ dump_queries_buffer(int bucket_id, unsigned char *buf, int buf_len)
|
|||
}
|
||||
|
||||
/* Loop until write buf_len bytes to the file. */
|
||||
do {
|
||||
do
|
||||
{
|
||||
ssize_t nwrite = write(fd, buf + off, buf_len - off);
|
||||
|
||||
if (nwrite == -1)
|
||||
{
|
||||
if (errno == EINTR && tries++ < 3)
|
||||
|
@ -3613,11 +3681,14 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos)
|
|||
{
|
||||
off = 0;
|
||||
/* read a chunck of MAX_QUERY_BUF size. */
|
||||
do {
|
||||
do
|
||||
{
|
||||
nread = read(fd, buf + off, MAX_QUERY_BUF - off);
|
||||
if (nread == -1)
|
||||
{
|
||||
if (errno == EINTR && tries++ < 3) /* read() was interrupted, attempt to read again (max attempts=3) */
|
||||
if (errno == EINTR && tries++ < 3) /* read() was interrupted,
|
||||
* attempt to read again
|
||||
* (max attempts=3) */
|
||||
continue;
|
||||
|
||||
goto exit;
|
||||
|
@ -3643,9 +3714,11 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos)
|
|||
}
|
||||
}
|
||||
else
|
||||
|
||||
/*
|
||||
* Either done=true or file has a size not multiple of MAX_QUERY_BUF.
|
||||
* It is safe to assume that the file was truncated or corrupted.
|
||||
* Either done=true or file has a size not multiple of
|
||||
* MAX_QUERY_BUF. It is safe to assume that the file was truncated
|
||||
* or corrupted.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
@ -3676,6 +3749,7 @@ time_diff(struct timeval end, struct timeval start)
|
|||
{
|
||||
double mstart;
|
||||
double mend;
|
||||
|
||||
mend = ((double) end.tv_sec * 1000.0 + (double) end.tv_usec / 1000.0);
|
||||
mstart = ((double) start.tv_sec * 1000.0 + (double) start.tv_usec / 1000.0);
|
||||
return mend - mstart;
|
||||
|
@ -3719,6 +3793,7 @@ get_histogram_bucket(double q_time)
|
|||
{
|
||||
int64 b_start = (index == 1) ? 0 : exp(bucket_size * (index - 1));
|
||||
int64 b_end = exp(bucket_size * index);
|
||||
|
||||
if ((index == 1 && q_time < b_start)
|
||||
|| (q_time >= b_start && q_time <= b_end)
|
||||
|| (index == b_count && q_time > b_end))
|
||||
|
@ -3750,6 +3825,7 @@ get_histogram_timings(PG_FUNCTION_ARGS)
|
|||
{
|
||||
int64 b_start = (index == 1) ? 0 : exp(bucket_size * (index - 1));
|
||||
int64 b_end = exp(bucket_size * index);
|
||||
|
||||
if (first)
|
||||
{
|
||||
snprintf(text_str, MAX_STRING_LEN, "(%ld - %ld)}", b_start, b_end);
|
||||
|
@ -3771,7 +3847,8 @@ extract_query_comments(const char *query, char *comments, size_t max_len)
|
|||
int rc;
|
||||
size_t nmatch = 1;
|
||||
regmatch_t pmatch;
|
||||
regoff_t comment_len, total_len = 0;
|
||||
regoff_t comment_len,
|
||||
total_len = 0;
|
||||
const char *s = query;
|
||||
|
||||
while (total_len < max_len)
|
||||
|
@ -3783,7 +3860,8 @@ extract_query_comments(const char *query, char *comments, size_t max_len)
|
|||
comment_len = pmatch.rm_eo - pmatch.rm_so;
|
||||
|
||||
if (total_len + comment_len > max_len)
|
||||
break; /* TODO: log error in error view, insufficient space for comment. */
|
||||
break; /* TODO: log error in error view, insufficient
|
||||
* space for comment. */
|
||||
|
||||
total_len += comment_len;
|
||||
|
||||
|
@ -3791,7 +3869,8 @@ extract_query_comments(const char *query, char *comments, size_t max_len)
|
|||
if (s != query)
|
||||
{
|
||||
if (total_len + 2 > max_len)
|
||||
break; /* TODO: log error in error view, insufficient space for ", " + comment. */
|
||||
break; /* TODO: log error in error view, insufficient
|
||||
* space for ", " + comment. */
|
||||
|
||||
memcpy(comments, ", ", 2);
|
||||
comments += 2;
|
||||
|
@ -3825,17 +3904,20 @@ get_query_id(JumbleState *jstate, Query *query)
|
|||
}
|
||||
#endif
|
||||
|
||||
static uint64 djb2_hash(unsigned char *str, size_t len)
|
||||
static uint64
|
||||
djb2_hash(unsigned char *str, size_t len)
|
||||
{
|
||||
uint64 hash = 5381LLU;
|
||||
|
||||
while (len--)
|
||||
hash = ((hash << 5) + hash) ^ *str++; // hash(i - 1) * 33 ^ str[i]
|
||||
hash = ((hash << 5) + hash) ^ *str++;
|
||||
/* hash(i - 1) * 33 ^ str[i] */
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
static uint64 djb2_hash_str(unsigned char *str, int *out_len)
|
||||
static uint64
|
||||
djb2_hash_str(unsigned char *str, int *out_len)
|
||||
{
|
||||
uint64 hash = 5381LLU;
|
||||
unsigned char *start = str;
|
||||
|
@ -3843,7 +3925,8 @@ static uint64 djb2_hash_str(unsigned char *str, int *out_len)
|
|||
|
||||
while ((c = *str) != '\0')
|
||||
{
|
||||
hash = ((hash << 5) + hash) ^ c; // hash(i - 1) * 33 ^ str[i]
|
||||
hash = ((hash << 5) + hash) ^ c;
|
||||
/* hash(i - 1) * 33 ^ str[i] */
|
||||
++str;
|
||||
}
|
||||
|
||||
|
@ -3853,9 +3936,11 @@ static uint64 djb2_hash_str(unsigned char *str, int *out_len)
|
|||
}
|
||||
|
||||
#ifdef BENCHMARK
|
||||
void init_hook_stats(void)
|
||||
void
|
||||
init_hook_stats(void)
|
||||
{
|
||||
bool found = false;
|
||||
|
||||
pg_hook_stats = ShmemInitStruct("pg_stat_monitor_hook_stats", HOOK_STATS_SIZE, &found);
|
||||
if (!found)
|
||||
{
|
||||
|
@ -3878,11 +3963,13 @@ void init_hook_stats(void)
|
|||
}
|
||||
}
|
||||
|
||||
void update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed)
|
||||
void
|
||||
update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed)
|
||||
{
|
||||
Assert(hook_id > STATS_START && hook_id < STATS_END);
|
||||
|
||||
struct pg_hook_stats_t *p = &pg_hook_stats[hook_id];
|
||||
|
||||
if (time_elapsed < p->min_time)
|
||||
p->min_time = time_elapsed;
|
||||
|
||||
|
|
|
@ -104,7 +104,8 @@
|
|||
#define MAX_ENUM_OPTIONS 6
|
||||
typedef struct GucVariables
|
||||
{
|
||||
enum config_type type; /* PGC_BOOL, PGC_INT, PGC_REAL, PGC_STRING, PGC_ENUM */
|
||||
enum config_type type; /* PGC_BOOL, PGC_INT, PGC_REAL, PGC_STRING,
|
||||
* PGC_ENUM */
|
||||
int guc_variable;
|
||||
char guc_name[TEXT_LEN];
|
||||
char guc_desc[TEXT_LEN];
|
||||
|
@ -214,12 +215,15 @@ typedef struct pgssHashKey
|
|||
typedef struct QueryInfo
|
||||
{
|
||||
uint64 parentid; /* parent queryid of current query */
|
||||
int64 type; /* type of query, options are query, info, warning, error, fatal */
|
||||
int64 type; /* type of query, options are query, info,
|
||||
* warning, error, fatal */
|
||||
char application_name[APPLICATIONNAME_LEN];
|
||||
char comments[COMMENTS_LEN];
|
||||
char relations[REL_LST][REL_LEN]; /* List of relation involved in the query */
|
||||
char relations[REL_LST][REL_LEN]; /* List of relation involved
|
||||
* in the query */
|
||||
int num_relations; /* Number of relation in the query */
|
||||
CmdType cmd_type; /* query command type SELECT/UPDATE/DELETE/INSERT */
|
||||
CmdType cmd_type; /* query command type
|
||||
* SELECT/UPDATE/DELETE/INSERT */
|
||||
} QueryInfo;
|
||||
|
||||
typedef struct ErrorInfo
|
||||
|
@ -281,7 +285,8 @@ typedef struct Counters
|
|||
SysInfo sysinfo;
|
||||
ErrorInfo error;
|
||||
Wal_Usage walusage;
|
||||
int resp_calls[MAX_RESPONSE_BUCKET]; /* execution time's in msec */
|
||||
int resp_calls[MAX_RESPONSE_BUCKET]; /* execution time's in
|
||||
* msec */
|
||||
uint64 state; /* query state */
|
||||
} Counters;
|
||||
|
||||
|
@ -312,16 +317,19 @@ typedef struct pgssSharedState
|
|||
pg_atomic_uint64 current_wbucket;
|
||||
pg_atomic_uint64 prev_bucket_sec;
|
||||
uint64 bucket_entry[MAX_BUCKETS];
|
||||
char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */
|
||||
LWLock *errors_lock; /* protects errors hashtable search/modification */
|
||||
char bucket_start_time[MAX_BUCKETS][60]; /* start time of the
|
||||
* bucket */
|
||||
LWLock *errors_lock; /* protects errors hashtable
|
||||
* search/modification */
|
||||
|
||||
/*
|
||||
* These variables are used when pgsm_overflow_target is ON.
|
||||
*
|
||||
* overflow is set to true when the query buffer overflows.
|
||||
*
|
||||
* n_bucket_cycles counts the number of times we changed bucket
|
||||
* since the query buffer overflowed. When it reaches pgsm_max_buckets
|
||||
* we remove the dump file, also reset the counter.
|
||||
* n_bucket_cycles counts the number of times we changed bucket since the
|
||||
* query buffer overflowed. When it reaches pgsm_max_buckets we remove the
|
||||
* dump file, also reset the counter.
|
||||
*
|
||||
* This allows us to avoid having a large file on disk that would also
|
||||
* slowdown queries to the pg_stat_monitor view.
|
||||
|
@ -350,6 +358,7 @@ typedef struct LocationLen
|
|||
int location; /* start offset in query text */
|
||||
int length; /* length in bytes, or -1 to ignore */
|
||||
} LocationLen;
|
||||
|
||||
/*
|
||||
* Working state for computing a query jumble and producing a normalized
|
||||
* query string
|
||||
|
@ -414,8 +423,10 @@ void set_qbuf(unsigned char *);
|
|||
|
||||
/* hash_query.c */
|
||||
void pgss_startup(void);
|
||||
|
||||
/*---- GUC variables ----*/
|
||||
typedef enum {
|
||||
typedef enum
|
||||
{
|
||||
PSGM_TRACK_NONE = 0, /* track no statements */
|
||||
PGSM_TRACK_TOP, /* only top level statements */
|
||||
PGSM_TRACK_ALL /* all statements, including nested ones */
|
||||
|
@ -452,7 +463,8 @@ static const struct config_enum_entry track_options[] =
|
|||
* STATS_START and STATS_END are used only to delimit the range.
|
||||
* STATS_END is also the length of the valid items in the enum.
|
||||
*/
|
||||
enum pg_hook_stats_id {
|
||||
enum pg_hook_stats_id
|
||||
{
|
||||
STATS_START = -1,
|
||||
STATS_PGSS_POST_PARSE_ANALYZE,
|
||||
STATS_PGSS_EXECUTORSTART,
|
||||
|
@ -469,7 +481,8 @@ enum pg_hook_stats_id {
|
|||
};
|
||||
|
||||
/* Hold time to execute statistics for a hook. */
|
||||
struct pg_hook_stats_t {
|
||||
struct pg_hook_stats_t
|
||||
{
|
||||
char hook_name[64];
|
||||
double min_time;
|
||||
double max_time;
|
||||
|
|
|
@ -44,7 +44,8 @@ PG_FUNCTION_INFO_V1(pg_stat_monitor_reset_errors);
|
|||
|
||||
static HTAB *pgsm_errors_ht = NULL;
|
||||
|
||||
void psgm_errors_init(void)
|
||||
void
|
||||
psgm_errors_init(void)
|
||||
{
|
||||
HASHCTL info;
|
||||
#if PG_VERSION_NUM >= 140000
|
||||
|
@ -64,12 +65,14 @@ void psgm_errors_init(void)
|
|||
flags);
|
||||
}
|
||||
|
||||
size_t pgsm_errors_size(void)
|
||||
size_t
|
||||
pgsm_errors_size(void)
|
||||
{
|
||||
return hash_estimate_size(PSGM_ERRORS_MAX, sizeof(ErrorEntry));
|
||||
}
|
||||
|
||||
void pgsm_log(PgsmLogSeverity severity, const char *format, ...)
|
||||
void
|
||||
pgsm_log(PgsmLogSeverity severity, const char *format,...)
|
||||
{
|
||||
char key[ERROR_MSG_MAX_LEN];
|
||||
ErrorEntry *entry;
|
||||
|
@ -94,6 +97,7 @@ void pgsm_log(PgsmLogSeverity severity, const char *format, ...)
|
|||
if (!entry)
|
||||
{
|
||||
LWLockRelease(pgss->errors_lock);
|
||||
|
||||
/*
|
||||
* We're out of memory, can't track this error message.
|
||||
*/
|
||||
|
@ -204,6 +208,7 @@ pg_stat_monitor_errors(PG_FUNCTION_ARGS)
|
|||
Datum values[4];
|
||||
bool nulls[4];
|
||||
int i = 0;
|
||||
|
||||
memset(values, 0, sizeof(values));
|
||||
memset(nulls, 0, sizeof(nulls));
|
||||
|
||||
|
|
Loading…
Reference in New Issue