PG-456: Running pgindent to make source PostgreSQL compatible. (#269)

PG-456: Running pgindent to make source indentation/spacing PostgreSQLCompatible.

PostgreSQL uses pgindent from time to time to make source code PostgreSQL
style guide compatible, it is a very long time since we have not done that.
Commit fixes a lot of indentation and spacing issues.

Co-authored-by: Hamid Akhtar <hamid.akhtar@gmail.com>
pull/276/head
Ibrar Ahmed 2022-06-29 00:42:40 +05:00 committed by GitHub
parent 926eade1eb
commit a9187117f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 1287 additions and 1151 deletions

320
guc.c
View File

@ -19,10 +19,10 @@
#include "pg_stat_monitor.h"
GucVariable conf[MAX_SETTINGS];
static void DefineIntGUC(GucVariable *conf);
static void DefineIntGUCWithCheck(GucVariable *conf, GucIntCheckHook check);
static void DefineBoolGUC(GucVariable *conf);
static void DefineEnumGUC(GucVariable *conf, const struct config_enum_entry *options);
static void DefineIntGUC(GucVariable * conf);
static void DefineIntGUCWithCheck(GucVariable * conf, GucIntCheckHook check);
static void DefineBoolGUC(GucVariable * conf);
static void DefineEnumGUC(GucVariable * conf, const struct config_enum_entry *options);
/* Check hooks to ensure histogram_min < histogram_max */
static bool check_histogram_min(int *newval, void **extra, GucSource source);
@ -34,196 +34,214 @@ static bool check_histogram_max(int *newval, void **extra, GucSource source);
void
init_guc(void)
{
int i = 0, j;
int i = 0,
j;
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_max",
.guc_desc = "Sets the maximum size of shared memory in (MB) used for statement's metadata tracked by pg_stat_monitor.",
.guc_default = 100,
.guc_min = 1,
.guc_max = 1000,
.guc_restart = true,
.guc_unit = GUC_UNIT_MB,
.guc_value = &PGSM_MAX
};
.guc_desc = "Sets the maximum size of shared memory in (MB) used for statement's metadata tracked by pg_stat_monitor.",
.guc_default = 100,
.guc_min = 1,
.guc_max = 1000,
.guc_restart = true,
.guc_unit = GUC_UNIT_MB,
.guc_value = &PGSM_MAX
};
DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_query_max_len",
.guc_desc = "Sets the maximum length of query.",
.guc_default = 2048,
.guc_min = 1024,
.guc_max = INT_MAX,
.guc_unit = 0,
.guc_restart = true,
.guc_value = &PGSM_QUERY_MAX_LEN
.guc_desc = "Sets the maximum length of query.",
.guc_default = 2048,
.guc_min = 1024,
.guc_max = INT_MAX,
.guc_unit = 0,
.guc_restart = true,
.guc_value = &PGSM_QUERY_MAX_LEN
};
DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_track_utility",
.guc_desc = "Selects whether utility commands are tracked.",
.guc_default = 1,
.guc_min = 0,
.guc_max = 0,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_TRACK_UTILITY
.guc_desc = "Selects whether utility commands are tracked.",
.guc_default = 1,
.guc_min = 0,
.guc_max = 0,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_TRACK_UTILITY
};
DefineBoolGUC(&conf[i++]);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_normalized_query",
.guc_desc = "Selects whether save query in normalized format.",
.guc_default = 0,
.guc_min = 0,
.guc_max = 0,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_NORMALIZED_QUERY
.guc_desc = "Selects whether save query in normalized format.",
.guc_default = 0,
.guc_min = 0,
.guc_max = 0,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_NORMALIZED_QUERY
};
DefineBoolGUC(&conf[i++]);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_max_buckets",
.guc_desc = "Sets the maximum number of buckets.",
.guc_default = 10,
.guc_min = 1,
.guc_max = 10,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_MAX_BUCKETS
.guc_desc = "Sets the maximum number of buckets.",
.guc_default = 10,
.guc_min = 1,
.guc_max = 10,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_MAX_BUCKETS
};
DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_bucket_time",
.guc_desc = "Sets the time in seconds per bucket.",
.guc_default = 60,
.guc_min = 1,
.guc_max = INT_MAX,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_BUCKET_TIME
.guc_desc = "Sets the time in seconds per bucket.",
.guc_default = 60,
.guc_min = 1,
.guc_max = INT_MAX,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_BUCKET_TIME
};
DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_histogram_min",
.guc_desc = "Sets the time in millisecond.",
.guc_default = 0,
.guc_min = 0,
.guc_max = INT_MAX,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_HISTOGRAM_MIN
.guc_desc = "Sets the time in millisecond.",
.guc_default = 0,
.guc_min = 0,
.guc_max = INT_MAX,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_HISTOGRAM_MIN
};
DefineIntGUCWithCheck(&conf[i++], check_histogram_min);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_histogram_max",
.guc_desc = "Sets the time in millisecond.",
.guc_default = 100000,
.guc_min = 10,
.guc_max = INT_MAX,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_HISTOGRAM_MAX
.guc_desc = "Sets the time in millisecond.",
.guc_default = 100000,
.guc_min = 10,
.guc_max = INT_MAX,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_HISTOGRAM_MAX
};
DefineIntGUCWithCheck(&conf[i++], check_histogram_max);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_histogram_buckets",
.guc_desc = "Sets the maximum number of histogram buckets",
.guc_default = 10,
.guc_min = 2,
.guc_max = MAX_RESPONSE_BUCKET,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_HISTOGRAM_BUCKETS
.guc_desc = "Sets the maximum number of histogram buckets",
.guc_default = 10,
.guc_min = 2,
.guc_max = MAX_RESPONSE_BUCKET,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_HISTOGRAM_BUCKETS
};
DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_query_shared_buffer",
.guc_desc = "Sets the maximum size of shared memory in (MB) used for query tracked by pg_stat_monitor.",
.guc_default = 20,
.guc_min = 1,
.guc_max = 10000,
.guc_restart = true,
.guc_unit = GUC_UNIT_MB,
.guc_value = &PGSM_QUERY_SHARED_BUFFER
.guc_desc = "Sets the maximum size of shared memory in (MB) used for query tracked by pg_stat_monitor.",
.guc_default = 20,
.guc_min = 1,
.guc_max = 10000,
.guc_restart = true,
.guc_unit = GUC_UNIT_MB,
.guc_value = &PGSM_QUERY_SHARED_BUFFER
};
DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_overflow_target",
.guc_desc = "Sets the overflow target for pg_stat_monitor",
.guc_default = 1,
.guc_min = 0,
.guc_max = 1,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_OVERFLOW_TARGET
.guc_desc = "Sets the overflow target for pg_stat_monitor",
.guc_default = 1,
.guc_min = 0,
.guc_max = 1,
.guc_restart = true,
.guc_unit = 0,
.guc_value = &PGSM_OVERFLOW_TARGET
};
DefineIntGUC(&conf[i++]);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_enable_query_plan",
.guc_desc = "Enable/Disable query plan monitoring",
.guc_default = 0,
.guc_min = 0,
.guc_max = 0,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_QUERY_PLAN
.guc_desc = "Enable/Disable query plan monitoring",
.guc_default = 0,
.guc_min = 0,
.guc_max = 0,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_QUERY_PLAN
};
DefineBoolGUC(&conf[i++]);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_track",
.guc_desc = "Selects which statements are tracked by pg_stat_monitor.",
.n_options = 3,
.guc_default = PGSM_TRACK_TOP,
.guc_min = PSGM_TRACK_NONE,
.guc_max = PGSM_TRACK_ALL,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_TRACK
.guc_desc = "Selects which statements are tracked by pg_stat_monitor.",
.n_options = 3,
.guc_default = PGSM_TRACK_TOP,
.guc_min = PSGM_TRACK_NONE,
.guc_max = PGSM_TRACK_ALL,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_TRACK
};
for (j = 0; j < conf[i].n_options; ++j) {
for (j = 0; j < conf[i].n_options; ++j)
{
strlcpy(conf[i].guc_options[j], track_options[j].name, sizeof(conf[i].guc_options[j]));
}
DefineEnumGUC(&conf[i++], track_options);
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_extract_comments",
.guc_desc = "Enable/Disable extracting comments from queries.",
.guc_default = 0,
.guc_min = 0,
.guc_max = 0,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_EXTRACT_COMMENTS
.guc_desc = "Enable/Disable extracting comments from queries.",
.guc_default = 0,
.guc_min = 0,
.guc_max = 0,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_EXTRACT_COMMENTS
};
DefineBoolGUC(&conf[i++]);
#if PG_VERSION_NUM >= 130000
conf[i] = (GucVariable) {
conf[i] = (GucVariable)
{
.guc_name = "pg_stat_monitor.pgsm_track_planning",
.guc_desc = "Selects whether planning statistics are tracked.",
.guc_default = 0,
.guc_min = 0,
.guc_max = 0,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_TRACK_PLANNING
.guc_desc = "Selects whether planning statistics are tracked.",
.guc_default = 0,
.guc_min = 0,
.guc_max = 0,
.guc_restart = false,
.guc_unit = 0,
.guc_value = &PGSM_TRACK_PLANNING
};
DefineBoolGUC(&conf[i++]);
#endif
}
static void DefineIntGUCWithCheck(GucVariable *conf, GucIntCheckHook check)
static void
DefineIntGUCWithCheck(GucVariable * conf, GucIntCheckHook check)
{
conf->type = PGC_INT;
DefineCustomIntVariable(conf->guc_name,
@ -241,21 +259,21 @@ static void DefineIntGUCWithCheck(GucVariable *conf, GucIntCheckHook check)
}
static void
DefineIntGUC(GucVariable *conf)
DefineIntGUC(GucVariable * conf)
{
DefineIntGUCWithCheck(conf, NULL);
}
static void
DefineBoolGUC(GucVariable *conf)
DefineBoolGUC(GucVariable * conf)
{
conf->type = PGC_BOOL;
DefineCustomBoolVariable(conf->guc_name,
conf->guc_desc,
NULL,
(bool*)conf->guc_value,
conf->guc_default,
conf->guc_restart ? PGC_POSTMASTER : PGC_USERSET,
conf->guc_desc,
NULL,
(bool *) conf->guc_value,
conf->guc_default,
conf->guc_restart ? PGC_POSTMASTER : PGC_USERSET,
0,
NULL,
NULL,
@ -263,29 +281,30 @@ DefineBoolGUC(GucVariable *conf)
}
static void
DefineEnumGUC(GucVariable *conf, const struct config_enum_entry *options)
DefineEnumGUC(GucVariable * conf, const struct config_enum_entry *options)
{
conf->type = PGC_ENUM;
DefineCustomEnumVariable(conf->guc_name,
conf->guc_desc,
NULL,
conf->guc_value,
conf->guc_default,
options,
conf->guc_restart ? PGC_POSTMASTER : PGC_USERSET,
0,
NULL,
NULL,
NULL);
conf->guc_desc,
NULL,
conf->guc_value,
conf->guc_default,
options,
conf->guc_restart ? PGC_POSTMASTER : PGC_USERSET,
0,
NULL,
NULL,
NULL);
}
GucVariable*
GucVariable *
get_conf(int i)
{
return &conf[i];
}
static bool check_histogram_min(int *newval, void **extra, GucSource source)
static bool
check_histogram_min(int *newval, void **extra, GucSource source)
{
/*
* During module initialization PGSM_HISTOGRAM_MIN is initialized before
@ -294,7 +313,8 @@ static bool check_histogram_min(int *newval, void **extra, GucSource source)
return (PGSM_HISTOGRAM_MAX == 0 || *newval < PGSM_HISTOGRAM_MAX);
}
static bool check_histogram_max(int *newval, void **extra, GucSource source)
static bool
check_histogram_max(int *newval, void **extra, GucSource source)
{
return (*newval > PGSM_HISTOGRAM_MIN);
}

View File

@ -25,10 +25,11 @@ static HTAB *pgss_hash;
static HTAB *pgss_query_hash;
static HTAB*
static HTAB *
hash_init(const char *hash_name, int key_size, int entry_size, int hash_size)
{
HASHCTL info;
HASHCTL info;
memset(&info, 0, sizeof(info));
info.keysize = key_size;
info.entrysize = entry_size;
@ -46,8 +47,8 @@ pgss_startup(void)
pgss_hash = NULL;
/*
* Create or attach to the shared memory state, including hash table
*/
* Create or attach to the shared memory state, including hash table
*/
LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);
pgss = ShmemInitStruct("pg_stat_monitor", sizeof(pgssSharedState), &found);
@ -63,7 +64,7 @@ pgss_startup(void)
init_hook_stats();
#endif
set_qbuf((unsigned char *)ShmemAlloc(MAX_QUERY_BUF));
set_qbuf((unsigned char *) ShmemAlloc(MAX_QUERY_BUF));
pgss_hash = hash_init("pg_stat_monitor: bucket hashtable", sizeof(pgssHashKey), sizeof(pgssEntry), MAX_BUCKET_ENTRIES);
pgss_query_hash = hash_init("pg_stat_monitor: queryID hashtable", sizeof(uint64), sizeof(pgssQueryEntry), MAX_BUCKET_ENTRIES);
@ -77,19 +78,19 @@ pgss_startup(void)
on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
}
pgssSharedState*
pgssSharedState *
pgsm_get_ss(void)
{
return pgss;
}
HTAB*
HTAB *
pgsm_get_hash(void)
{
return pgss_hash;
}
HTAB*
HTAB *
pgsm_get_query_hash(void)
{
return pgss_query_hash;
@ -117,7 +118,7 @@ pgss_shmem_shutdown(int code, Datum arg)
Size
hash_memsize(void)
{
Size size;
Size size;
size = MAXALIGN(sizeof(pgssSharedState));
size += MAXALIGN(MAX_QUERY_BUF);
@ -130,7 +131,7 @@ hash_memsize(void)
pgssEntry *
hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding)
{
pgssEntry *entry = NULL;
pgssEntry *entry = NULL;
bool found = false;
if (hash_get_num_entries(pgss_hash) >= MAX_BUCKET_ENTRIES)
@ -164,7 +165,7 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding)
* state is PGSS_FINISHED or PGSS_FINISHED).
* - Clear query buffer for new_bucket_id.
* - If old_bucket_id != -1, move all pending hash table entries in
* old_bucket_id to the new bucket id, also move pending queries from the
* old_bucket_id to the new bucket id, also move pending queries from the
* previous query buffer (query_buffer[old_bucket_id]) to the new one
* (query_buffer[new_bucket_id]).
*
@ -174,26 +175,30 @@ void
hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer)
{
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry = NULL;
pgssEntry *entry = NULL;
/* Store pending query ids from the previous bucket. */
List *pending_entries = NIL;
ListCell *pending_entry;
List *pending_entries = NIL;
ListCell *pending_entry;
/* Iterate over the hash table. */
hash_seq_init(&hash_seq, pgss_hash);
while ((entry = hash_seq_search(&hash_seq)) != NULL)
{
/*
* Remove all entries if new_bucket_id == -1.
* Otherwise remove entry in new_bucket_id if it has finished already.
* Remove all entries if new_bucket_id == -1. Otherwise remove entry
* in new_bucket_id if it has finished already.
*/
if (new_bucket_id < 0 ||
(entry->key.bucket_id == new_bucket_id &&
(entry->counters.state == PGSS_FINISHED || entry->counters.state == PGSS_ERROR)))
(entry->counters.state == PGSS_FINISHED || entry->counters.state == PGSS_ERROR)))
{
if (new_bucket_id == -1) {
/* pg_stat_monitor_reset(), remove entry from query hash table too. */
if (new_bucket_id == -1)
{
/*
* pg_stat_monitor_reset(), remove entry from query hash table
* too.
*/
hash_search(pgss_query_hash, &(entry->key.queryid), HASH_REMOVE, NULL);
}
@ -201,11 +206,10 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
}
/*
* If we detect a pending query residing in the previous bucket id,
* we add it to a list of pending elements to be moved to the new
* bucket id.
* Can't update the hash table while iterating it inside this loop,
* as this may introduce all sort of problems.
* If we detect a pending query residing in the previous bucket id, we
* add it to a list of pending elements to be moved to the new bucket
* id. Can't update the hash table while iterating it inside this
* loop, as this may introduce all sort of problems.
*/
if (old_bucket_id != -1 && entry->key.bucket_id == old_bucket_id)
{
@ -213,19 +217,23 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
entry->counters.state == PGSS_PLAN ||
entry->counters.state == PGSS_EXEC)
{
pgssEntry *bkp_entry = malloc(sizeof(pgssEntry));
pgssEntry *bkp_entry = malloc(sizeof(pgssEntry));
if (!bkp_entry)
{
elog(DEBUG1, "hash_entry_dealloc: out of memory");
/*
* No memory, If the entry has calls > 1 then we change the state to finished,
* as the pending query will likely finish execution during the new bucket
* time window. The pending query will vanish in this case, can't list it
* No memory, If the entry has calls > 1 then we change
* the state to finished, as the pending query will likely
* finish execution during the new bucket time window. The
* pending query will vanish in this case, can't list it
* until it completes.
*
* If there is only one call to the query and it's pending, remove the
* entry from the previous bucket and allow it to finish in the new bucket,
* in order to avoid the query living in the old bucket forever.
* If there is only one call to the query and it's
* pending, remove the entry from the previous bucket and
* allow it to finish in the new bucket, in order to avoid
* the query living in the old bucket forever.
*/
if (entry->counters.calls.calls > 1)
entry->counters.state = PGSS_FINISHED;
@ -244,14 +252,16 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
pending_entries = lappend(pending_entries, bkp_entry);
/*
* If the entry has calls > 1 then we change the state to finished in
* the previous bucket, as the pending query will likely finish execution
* during the new bucket time window. Can't remove it from the previous bucket
* as it may have many calls and we would lose the query statistics.
* If the entry has calls > 1 then we change the state to
* finished in the previous bucket, as the pending query will
* likely finish execution during the new bucket time window.
* Can't remove it from the previous bucket as it may have
* many calls and we would lose the query statistics.
*
* If there is only one call to the query and it's pending, remove the entry
* from the previous bucket and allow it to finish in the new bucket,
* in order to avoid the query living in the old bucket forever.
* If there is only one call to the query and it's pending,
* remove the entry from the previous bucket and allow it to
* finish in the new bucket, in order to avoid the query
* living in the old bucket forever.
*/
if (entry->counters.calls.calls > 1)
entry->counters.state = PGSS_FINISHED;
@ -262,13 +272,14 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
}
/*
* Iterate over the list of pending queries in order
* to add them back to the hash table with the updated bucket id.
* Iterate over the list of pending queries in order to add them back to
* the hash table with the updated bucket id.
*/
foreach (pending_entry, pending_entries) {
bool found = false;
pgssEntry *new_entry;
pgssEntry *old_entry = (pgssEntry *) lfirst(pending_entry);
foreach(pending_entry, pending_entries)
{
bool found = false;
pgssEntry *new_entry;
pgssEntry *old_entry = (pgssEntry *) lfirst(pending_entry);
new_entry = (pgssEntry *) hash_search(pgss_hash, &old_entry->key, HASH_ENTER_NULL, &found);
if (new_entry == NULL)
@ -294,9 +305,9 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu
void
hash_entry_reset()
{
pgssSharedState *pgss = pgsm_get_ss();
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry;
pgssSharedState *pgss = pgsm_get_ss();
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry;
LWLockAcquire(pgss->lock, LW_EXCLUSIVE);

File diff suppressed because it is too large Load Diff

View File

@ -104,27 +104,28 @@
#define MAX_ENUM_OPTIONS 6
typedef struct GucVariables
{
enum config_type type; /* PGC_BOOL, PGC_INT, PGC_REAL, PGC_STRING, PGC_ENUM */
int guc_variable;
char guc_name[TEXT_LEN];
char guc_desc[TEXT_LEN];
int guc_default;
int guc_min;
int guc_max;
int guc_unit;
int *guc_value;
bool guc_restart;
int n_options;
char guc_options[MAX_ENUM_OPTIONS][32];
} GucVariable;
enum config_type type; /* PGC_BOOL, PGC_INT, PGC_REAL, PGC_STRING,
* PGC_ENUM */
int guc_variable;
char guc_name[TEXT_LEN];
char guc_desc[TEXT_LEN];
int guc_default;
int guc_min;
int guc_max;
int guc_unit;
int *guc_value;
bool guc_restart;
int n_options;
char guc_options[MAX_ENUM_OPTIONS][32];
} GucVariable;
#if PG_VERSION_NUM < 130000
typedef struct WalUsage
{
long wal_records; /* # of WAL records produced */
long wal_fpi; /* # of WAL full page images produced */
uint64 wal_bytes; /* size of WAL records produced */
long wal_records; /* # of WAL records produced */
long wal_fpi; /* # of WAL full page images produced */
uint64 wal_bytes; /* size of WAL records produced */
} WalUsage;
#endif
@ -132,7 +133,7 @@ typedef enum OVERFLOW_TARGET
{
OVERFLOW_TARGET_NONE = 0,
OVERFLOW_TARGET_DISK
} OVERFLOW_TARGET;
} OVERFLOW_TARGET;
typedef enum pgssStoreKind
{
@ -163,19 +164,19 @@ typedef enum AGG_KEY
AGG_KEY_DATABASE = 0,
AGG_KEY_USER,
AGG_KEY_HOST
} AGG_KEY;
} AGG_KEY;
#define MAX_QUERY_LEN 1024
/* shared memory storage for the query */
typedef struct CallTime
{
double total_time; /* total execution time, in msec */
double min_time; /* minimum execution time in msec */
double max_time; /* maximum execution time in msec */
double mean_time; /* mean execution time in msec */
double sum_var_time; /* sum of variances in execution time in msec */
} CallTime;
double total_time; /* total execution time, in msec */
double min_time; /* minimum execution time in msec */
double max_time; /* maximum execution time in msec */
double mean_time; /* mean execution time in msec */
double sum_var_time; /* sum of variances in execution time in msec */
} CallTime;
/*
* Entry type for queries hash table (query ID).
@ -188,16 +189,16 @@ typedef struct CallTime
*/
typedef struct pgssQueryEntry
{
uint64 queryid; /* query identifier, also the key. */
size_t query_pos; /* query location within query buffer */
} pgssQueryEntry;
uint64 queryid; /* query identifier, also the key. */
size_t query_pos; /* query location within query buffer */
} pgssQueryEntry;
typedef struct PlanInfo
{
uint64 planid; /* plan identifier */
char plan_text[PLAN_TEXT_LEN]; /* plan text */
size_t plan_len; /* strlen(plan_text) */
} PlanInfo;
uint64 planid; /* plan identifier */
char plan_text[PLAN_TEXT_LEN]; /* plan text */
size_t plan_len; /* strlen(plan_text) */
} PlanInfo;
typedef struct pgssHashKey
{
@ -208,63 +209,66 @@ typedef struct pgssHashKey
uint64 ip; /* client ip address */
uint64 planid; /* plan identifier */
uint64 appid; /* hash of application name */
uint64 toplevel; /* query executed at top level */
uint64 toplevel; /* query executed at top level */
} pgssHashKey;
typedef struct QueryInfo
{
uint64 parentid; /* parent queryid of current query*/
int64 type; /* type of query, options are query, info, warning, error, fatal */
uint64 parentid; /* parent queryid of current query */
int64 type; /* type of query, options are query, info,
* warning, error, fatal */
char application_name[APPLICATIONNAME_LEN];
char comments[COMMENTS_LEN];
char relations[REL_LST][REL_LEN]; /* List of relation involved in the query */
int num_relations; /* Number of relation in the query */
CmdType cmd_type; /* query command type SELECT/UPDATE/DELETE/INSERT */
char relations[REL_LST][REL_LEN]; /* List of relation involved
* in the query */
int num_relations; /* Number of relation in the query */
CmdType cmd_type; /* query command type
* SELECT/UPDATE/DELETE/INSERT */
} QueryInfo;
typedef struct ErrorInfo
{
int64 elevel; /* error elevel */
char sqlcode[SQLCODE_LEN]; /* error sqlcode */
char message[ERROR_MESSAGE_LEN]; /* error message text */
} ErrorInfo;
int64 elevel; /* error elevel */
char sqlcode[SQLCODE_LEN]; /* error sqlcode */
char message[ERROR_MESSAGE_LEN]; /* error message text */
} ErrorInfo;
typedef struct Calls
{
int64 calls; /* # of times executed */
int64 rows; /* total # of retrieved or affected rows */
double usage; /* usage factor */
} Calls;
int64 calls; /* # of times executed */
int64 rows; /* total # of retrieved or affected rows */
double usage; /* usage factor */
} Calls;
typedef struct Blocks
{
int64 shared_blks_hit; /* # of shared buffer hits */
int64 shared_blks_read; /* # of shared disk blocks read */
int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */
int64 shared_blks_written; /* # of shared disk blocks written */
int64 local_blks_hit; /* # of local buffer hits */
int64 local_blks_read; /* # of local disk blocks read */
int64 local_blks_dirtied; /* # of local disk blocks dirtied */
int64 local_blks_written; /* # of local disk blocks written */
int64 temp_blks_read; /* # of temp blocks read */
int64 temp_blks_written; /* # of temp blocks written */
double blk_read_time; /* time spent reading, in msec */
double blk_write_time; /* time spent writing, in msec */
} Blocks;
int64 shared_blks_hit; /* # of shared buffer hits */
int64 shared_blks_read; /* # of shared disk blocks read */
int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */
int64 shared_blks_written; /* # of shared disk blocks written */
int64 local_blks_hit; /* # of local buffer hits */
int64 local_blks_read; /* # of local disk blocks read */
int64 local_blks_dirtied; /* # of local disk blocks dirtied */
int64 local_blks_written; /* # of local disk blocks written */
int64 temp_blks_read; /* # of temp blocks read */
int64 temp_blks_written; /* # of temp blocks written */
double blk_read_time; /* time spent reading, in msec */
double blk_write_time; /* time spent writing, in msec */
} Blocks;
typedef struct SysInfo
{
float utime; /* user cpu time */
float stime; /* system cpu time */
} SysInfo;
float utime; /* user cpu time */
float stime; /* system cpu time */
} SysInfo;
typedef struct Wal_Usage
{
int64 wal_records; /* # of WAL records generated */
int64 wal_fpi; /* # of WAL full page images generated */
uint64 wal_bytes; /* total amount of WAL bytes generated */
} Wal_Usage;
} Wal_Usage;
typedef struct Counters
{
@ -275,14 +279,15 @@ typedef struct Counters
Calls plancalls;
CallTime plantime;
PlanInfo planinfo;
PlanInfo planinfo;
Blocks blocks;
SysInfo sysinfo;
ErrorInfo error;
Wal_Usage walusage;
int resp_calls[MAX_RESPONSE_BUCKET]; /* execution time's in msec */
uint64 state; /* query state */
ErrorInfo error;
Wal_Usage walusage;
int resp_calls[MAX_RESPONSE_BUCKET]; /* execution time's in
* msec */
uint64 state; /* query state */
} Counters;
/* Some global structure to get the cpu usage, really don't like the idea of global variable */
@ -292,11 +297,11 @@ typedef struct Counters
*/
typedef struct pgssEntry
{
pgssHashKey key; /* hash key of entry - MUST BE FIRST */
Counters counters; /* the statistics for this query */
int encoding; /* query text encoding */
slock_t mutex; /* protects the counters only */
size_t query_pos; /* query location within query buffer */
pgssHashKey key; /* hash key of entry - MUST BE FIRST */
Counters counters; /* the statistics for this query */
int encoding; /* query text encoding */
slock_t mutex; /* protects the counters only */
size_t query_pos; /* query location within query buffer */
} pgssEntry;
/*
@ -304,30 +309,33 @@ typedef struct pgssEntry
*/
typedef struct pgssSharedState
{
LWLock *lock; /* protects hashtable search/modification */
double cur_median_usage; /* current median usage in hashtable */
slock_t mutex; /* protects following fields only: */
Size extent; /* current extent of query file */
int64 n_writers; /* number of active writers to query file */
pg_atomic_uint64 current_wbucket;
pg_atomic_uint64 prev_bucket_sec;
uint64 bucket_entry[MAX_BUCKETS];
char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */
LWLock *errors_lock; /* protects errors hashtable search/modification */
LWLock *lock; /* protects hashtable search/modification */
double cur_median_usage; /* current median usage in hashtable */
slock_t mutex; /* protects following fields only: */
Size extent; /* current extent of query file */
int64 n_writers; /* number of active writers to query file */
pg_atomic_uint64 current_wbucket;
pg_atomic_uint64 prev_bucket_sec;
uint64 bucket_entry[MAX_BUCKETS];
char bucket_start_time[MAX_BUCKETS][60]; /* start time of the
* bucket */
LWLock *errors_lock; /* protects errors hashtable
* search/modification */
/*
* These variables are used when pgsm_overflow_target is ON.
*
* overflow is set to true when the query buffer overflows.
*
* n_bucket_cycles counts the number of times we changed bucket
* since the query buffer overflowed. When it reaches pgsm_max_buckets
* we remove the dump file, also reset the counter.
* n_bucket_cycles counts the number of times we changed bucket since the
* query buffer overflowed. When it reaches pgsm_max_buckets we remove the
* dump file, also reset the counter.
*
* This allows us to avoid having a large file on disk that would also
* slowdown queries to the pg_stat_monitor view.
*/
bool overflow;
size_t n_bucket_cycles;
bool overflow;
size_t n_bucket_cycles;
} pgssSharedState;
#define ResetSharedState(x) \
@ -350,6 +358,7 @@ typedef struct LocationLen
int location; /* start offset in query text */
int length; /* length in bytes, or -1 to ignore */
} LocationLen;
/*
* Working state for computing a query jumble and producing a normalized
* query string
@ -378,54 +387,56 @@ typedef struct JumbleState
/* Links to shared memory state */
bool SaveQueryText(uint64 bucketid,
uint64 queryid,
unsigned char *buf,
const char *query,
uint64 query_len,
size_t *query_pos);
bool SaveQueryText(uint64 bucketid,
uint64 queryid,
unsigned char *buf,
const char *query,
uint64 query_len,
size_t *query_pos);
/* guc.c */
void init_guc(void);
void init_guc(void);
GucVariable *get_conf(int i);
/* hash_create.c */
bool IsHashInitialize(void);
void pgss_shmem_startup(void);
void pgss_shmem_shutdown(int code, Datum arg);
int pgsm_get_bucket_size(void);
pgssSharedState* pgsm_get_ss(void);
HTAB *pgsm_get_plan_hash(void);
HTAB *pgsm_get_hash(void);
HTAB *pgsm_get_query_hash(void);
HTAB *pgsm_get_plan_hash(void);
void hash_entry_reset(void);
void hash_query_entryies_reset(void);
void hash_query_entries();
void hash_query_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]);
void hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer);
pgssEntry* hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding);
Size hash_memsize(void);
bool IsHashInitialize(void);
void pgss_shmem_startup(void);
void pgss_shmem_shutdown(int code, Datum arg);
int pgsm_get_bucket_size(void);
pgssSharedState *pgsm_get_ss(void);
HTAB *pgsm_get_plan_hash(void);
HTAB *pgsm_get_hash(void);
HTAB *pgsm_get_query_hash(void);
HTAB *pgsm_get_plan_hash(void);
void hash_entry_reset(void);
void hash_query_entryies_reset(void);
void hash_query_entries();
void hash_query_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]);
void hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer);
pgssEntry *hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding);
Size hash_memsize(void);
int read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos);
uint64 read_query(unsigned char *buf, uint64 queryid, char * query, size_t pos);
void pgss_startup(void);
void set_qbuf(unsigned char *);
int read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos);
uint64 read_query(unsigned char *buf, uint64 queryid, char *query, size_t pos);
void pgss_startup(void);
void set_qbuf(unsigned char *);
/* hash_query.c */
void pgss_startup(void);
void pgss_startup(void);
/*---- GUC variables ----*/
typedef enum {
PSGM_TRACK_NONE = 0, /* track no statements */
PGSM_TRACK_TOP, /* only top level statements */
PGSM_TRACK_ALL /* all statements, including nested ones */
} PGSMTrackLevel;
typedef enum
{
PSGM_TRACK_NONE = 0, /* track no statements */
PGSM_TRACK_TOP, /* only top level statements */
PGSM_TRACK_ALL /* all statements, including nested ones */
} PGSMTrackLevel;
static const struct config_enum_entry track_options[] =
{
{"none", PSGM_TRACK_NONE, false},
{"top", PGSM_TRACK_TOP, false},
{"all", PGSM_TRACK_ALL, false},
{NULL, 0, false}
{"none", PSGM_TRACK_NONE, false},
{"top", PGSM_TRACK_TOP, false},
{"all", PGSM_TRACK_ALL, false},
{NULL, 0, false}
};
#define PGSM_MAX get_conf(0)->guc_variable
@ -447,12 +458,13 @@ static const struct config_enum_entry track_options[] =
/*---- Benchmarking ----*/
#ifdef BENCHMARK
/*
/*
* These enumerator values are used as index in the hook stats array.
* STATS_START and STATS_END are used only to delimit the range.
* STATS_END is also the length of the valid items in the enum.
*/
enum pg_hook_stats_id {
enum pg_hook_stats_id
{
STATS_START = -1,
STATS_PGSS_POST_PARSE_ANALYZE,
STATS_PGSS_EXECUTORSTART,
@ -469,21 +481,22 @@ enum pg_hook_stats_id {
};
/* Hold time to execute statistics for a hook. */
struct pg_hook_stats_t {
char hook_name[64];
double min_time;
double max_time;
double total_time;
uint64 ncalls;
struct pg_hook_stats_t
{
char hook_name[64];
double min_time;
double max_time;
double total_time;
uint64 ncalls;
};
#define HOOK_STATS_SIZE MAXALIGN((size_t)STATS_END * sizeof(struct pg_hook_stats_t))
/* Allocate a pg_hook_stats_t array of size HOOK_STATS_SIZE on shared memory. */
void init_hook_stats(void);
void init_hook_stats(void);
/* Update hook time execution statistics. */
void update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed);
void update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed);
/*
* Macro used to declare a hook function:
@ -509,7 +522,7 @@ void update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed);
*/
#define HOOK(name) name##_benchmark
#else /* #ifdef BENCHMARK */
#else /* #ifdef BENCHMARK */
#define DECLARE_HOOK(hook, ...) \
static hook(__VA_ARGS__);

View File

@ -44,13 +44,14 @@ PG_FUNCTION_INFO_V1(pg_stat_monitor_reset_errors);
static HTAB *pgsm_errors_ht = NULL;
void psgm_errors_init(void)
void
psgm_errors_init(void)
{
HASHCTL info;
HASHCTL info;
#if PG_VERSION_NUM >= 140000
int flags = HASH_ELEM | HASH_STRINGS;
int flags = HASH_ELEM | HASH_STRINGS;
#else
int flags = HASH_ELEM | HASH_BLOBS;
int flags = HASH_ELEM | HASH_BLOBS;
#endif
@ -58,26 +59,28 @@ void psgm_errors_init(void)
info.keysize = ERROR_MSG_MAX_LEN;
info.entrysize = sizeof(ErrorEntry);
pgsm_errors_ht = ShmemInitHash("pg_stat_monitor: errors hashtable",
PSGM_ERRORS_MAX, /* initial size */
PSGM_ERRORS_MAX, /* maximum size */
&info,
flags);
PSGM_ERRORS_MAX, /* initial size */
PSGM_ERRORS_MAX, /* maximum size */
&info,
flags);
}
size_t pgsm_errors_size(void)
size_t
pgsm_errors_size(void)
{
return hash_estimate_size(PSGM_ERRORS_MAX, sizeof(ErrorEntry));
return hash_estimate_size(PSGM_ERRORS_MAX, sizeof(ErrorEntry));
}
void pgsm_log(PgsmLogSeverity severity, const char *format, ...)
void
pgsm_log(PgsmLogSeverity severity, const char *format,...)
{
char key[ERROR_MSG_MAX_LEN];
char key[ERROR_MSG_MAX_LEN];
ErrorEntry *entry;
bool found = false;
va_list ap;
int n;
bool found = false;
va_list ap;
int n;
struct timeval tv;
struct tm *lt;
struct tm *lt;
pgssSharedState *pgss;
va_start(ap, format);
@ -94,9 +97,10 @@ void pgsm_log(PgsmLogSeverity severity, const char *format, ...)
if (!entry)
{
LWLockRelease(pgss->errors_lock);
/*
* We're out of memory, can't track this error message.
*/
/*
* We're out of memory, can't track this error message.
*/
return;
}
@ -110,13 +114,13 @@ void pgsm_log(PgsmLogSeverity severity, const char *format, ...)
gettimeofday(&tv, NULL);
lt = localtime(&tv.tv_sec);
snprintf(entry->time, sizeof(entry->time),
"%04d-%02d-%02d %02d:%02d:%02d",
lt->tm_year + 1900,
lt->tm_mon + 1,
lt->tm_mday,
lt->tm_hour,
lt->tm_min,
lt->tm_sec);
"%04d-%02d-%02d %02d:%02d:%02d",
lt->tm_year + 1900,
lt->tm_mon + 1,
lt->tm_mday,
lt->tm_hour,
lt->tm_min,
lt->tm_sec);
entry->calls++;
@ -129,15 +133,15 @@ void pgsm_log(PgsmLogSeverity severity, const char *format, ...)
Datum
pg_stat_monitor_reset_errors(PG_FUNCTION_ARGS)
{
HASH_SEQ_STATUS hash_seq;
ErrorEntry *entry;
pgssSharedState *pgss = pgsm_get_ss();
HASH_SEQ_STATUS hash_seq;
ErrorEntry *entry;
pgssSharedState *pgss = pgsm_get_ss();
/* Safety check... */
if (!IsSystemInitialized())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries")));
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries")));
LWLockAcquire(pgss->errors_lock, LW_EXCLUSIVE);
@ -157,26 +161,26 @@ pg_stat_monitor_reset_errors(PG_FUNCTION_ARGS)
Datum
pg_stat_monitor_errors(PG_FUNCTION_ARGS)
{
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
Tuplestorestate *tupstore;
MemoryContext per_query_ctx;
MemoryContext oldcontext;
HASH_SEQ_STATUS hash_seq;
ErrorEntry *error_entry;
pgssSharedState *pgss = pgsm_get_ss();
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
Tuplestorestate *tupstore;
MemoryContext per_query_ctx;
MemoryContext oldcontext;
HASH_SEQ_STATUS hash_seq;
ErrorEntry *error_entry;
pgssSharedState *pgss = pgsm_get_ss();
/* Safety check... */
if (!IsSystemInitialized())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries")));
errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries")));
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("pg_stat_monitor: set-valued function called in context that cannot accept a set")));
errmsg("pg_stat_monitor: set-valued function called in context that cannot accept a set")));
/* Switch into long-lived context to construct returned data structures */
per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
@ -204,6 +208,7 @@ pg_stat_monitor_errors(PG_FUNCTION_ARGS)
Datum values[4];
bool nulls[4];
int i = 0;
memset(values, 0, sizeof(values));
memset(nulls, 0, sizeof(nulls));
@ -219,5 +224,5 @@ pg_stat_monitor_errors(PG_FUNCTION_ARGS)
/* clean up and return the tuplestore */
tuplestore_donestoring(tupstore);
return (Datum)0;
}
return (Datum) 0;
}