diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f8ee2e8..15a347a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,7 +6,7 @@ We're glad that you would like to become a Percona community member and particip You can contribute in one of the following ways: -1. Reach us on our [Forums](https://forums.percona.com/) and Discord. +1. Reach us on our [Forums](https://forums.percona.com/c/postgresql/pg-stat-monitor/69). 2. [Submit a bug report or a feature request](#submit-a-bug-report-or-a-feature-request) 3. [Submit a pull request (PR) with the code patch](#submit-a-pull-request) 4. [Contribute to documentation](#contributing-to-documentation) diff --git a/Makefile b/Makefile index ac1225d..1b2a38d 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ PGFILEDESC = "pg_stat_monitor - execution statistics of SQL statements" LDFLAGS_SL += $(filter -lm, $(LIBS)) REGRESS_OPTS = --temp-config $(top_srcdir)/contrib/pg_stat_monitor/pg_stat_monitor.conf --inputdir=regression -REGRESS = basic version guc counters relations database top_query application_name cmd_type error state rows tags +REGRESS = basic version guc counters relations database error_insert application_name application_name_unique top_query cmd_type error rows tags histogram # Disabled because these tests require "shared_preload_libraries=pg_stat_statements", # which typical installcheck users do not have (e.g. buildfarm clients). @@ -23,19 +23,39 @@ PG_VERSION := $(shell pg_config --version | awk {'print $$1 $$2'}) MAJOR := $(shell echo $(PG_VERSION) | sed -e 's/\.[^./]*$$//') ifneq (,$(findstring PostgreSQL14,$(MAJOR))) - CP := $(shell cp pg_stat_monitor--1.0.14.sql.in pg_stat_monitor--1.0.sql) + ifneq (,$(wildcard ../pg_stat_monitor--1.0.14.sql.in)) + CP := $(shell cp ../pg_stat_monitor--1.0.14.sql.in ../pg_stat_monitor--1.0.sql) + endif + ifneq (,$(wildcard pg_stat_monitor--1.0.14.sql.in)) + CP := $(shell cp pg_stat_monitor--1.0.14.sql.in pg_stat_monitor--1.0.sql) + endif endif ifneq (,$(findstring PostgreSQL13,$(MAJOR))) - CP := $(shell cp pg_stat_monitor--1.0.13.sql.in pg_stat_monitor--1.0.sql) + ifneq (,$(wildcard ../pg_stat_monitor--1.0.13.sql.in)) + CP := $(shell cp ../pg_stat_monitor--1.0.13.sql.in ../pg_stat_monitor--1.0.sql) + endif + ifneq (,$(wildcard pg_stat_monitor--1.0.13.sql.in)) + CP := $(shell cp pg_stat_monitor--1.0.13.sql.in pg_stat_monitor--1.0.sql) + endif endif ifneq (,$(findstring PostgreSQL12,$(MAJOR))) - CP := $(shell cp pg_stat_monitor--1.0.sql.in pg_stat_monitor--1.0.sql) + ifneq (,$(wildcard ../pg_stat_monitor--1.0.sql.in)) + CP := $(shell cp ../pg_stat_monitor--1.0.sql.in ../pg_stat_monitor--1.0.sql) + endif + ifneq (,$(wildcard pg_stat_monitor--1.0.sql.in)) + CP := $(shell cp pg_stat_monitor--1.0.sql.in pg_stat_monitor--1.0.sql) + endif endif ifneq (,$(findstring PostgreSQL11,$(MAJOR))) - CP := $(shell cp pg_stat_monitor--1.0.sql.in pg_stat_monitor--1.0.sql) + ifneq (,$(wildcard ../pg_stat_monitor--1.0.sql.in)) + CP := $(shell cp ../pg_stat_monitor--1.0.sql.in ../pg_stat_monitor--1.0.sql) + endif + ifneq (,$(wildcard pg_stat_monitor--1.0.sql.in)) + CP := $(shell cp pg_stat_monitor--1.0.sql.in pg_stat_monitor--1.0.sql) + endif endif ifdef USE_PGXS diff --git a/README.md b/README.md index 4c516d8..9c93e8f 100644 --- a/README.md +++ b/README.md @@ -10,25 +10,32 @@ ## Table of Contents -* [Overview](#overview) -* [Supported versions](#supported-versions) -* [Features](#features) -* [Documentation](#documentation) -* [Supported platforms](#supported-platforms) -* [Installation guidelines](#installation-guidelines) -* [Configuration](#configuration) -* [Setup](#setup) -* [Building from source code](#building-from-source) -* [How to contribute](#how-to-contribute) -* [Support, discussions and forums](#support-discussions-and-forums) -* [License](#license) -* [Copyright notice](#copyright-notice) +- [pg_stat_monitor: Query Performance Monitoring Tool for PostgreSQL](#pg_stat_monitor-query-performance-monitoring-tool-for-postgresql) + - [Table of Contents](#table-of-contents) + - [Overview](#overview) + - [Supported versions](#supported-versions) + - [Features](#features) + - [Documentation](#documentation) + - [Supported platforms](#supported-platforms) + - [Installation guidelines](#installation-guidelines) + - [Installing from Percona repositories](#installing-from-percona-repositories) + - [Installing from PostgreSQL `yum` repositories](#installing-from-postgresql-yum-repositories) + - [Installing from PGXN](#installing-from-pgxn) + - [Configuration](#configuration) + - [Setup](#setup) + - [Building from source](#building-from-source) + - [Uninstall `pg_stat_monitor`](#uninstall-pg_stat_monitor) + - [How to contribute](#how-to-contribute) + - [Report a bug](#report-a-bug) + - [Support, discussions and forums](#support-discussions-and-forums) + - [License](#license) + - [Copyright notice](#copyright-notice) ## Overview **NOTE**: This is a beta release and is subject to further changes. We recommend using it in testing environments only. -The `pg_stat_monitor` is a **_Query Performance Monitoring_** tool for PostgreSQL. It attempts to provide a more holistic picture by providing much-needed query performance insights in a single view. +The `pg_stat_monitor` is a **_Query Performance Monitoring_** tool for PostgreSQL. It attempts to provide a more holistic picture by providing much-needed query performance insights in a [single view](https://github.com/percona/pg_stat_monitor/blob/master/docs/REFERENCE.md). `pg_stat_monitor` provides improved insights that allow database users to understand query origins, execution, planning statistics and details, query information, and metadata. This significantly improves observability, enabling users to debug and tune query performance. `pg_stat_monitor` is developed on the basis of `pg_stat_statements` as its more advanced replacement. @@ -38,7 +45,7 @@ To learn about other features, available in `pg_stat_monitor`, see the [Features `pg_stat_monitor` supports PostgreSQL versions 11 and above. It is compatible with both PostgreSQL provided by PostgreSQL Global Development Group (PGDG) and [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution). -The RPM (for RHEL and CentOS) and the DEB (for Debian and Ubuntu) packages are available from Percona repositories for PostgreSQL versions [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/), and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/). +The `RPM` (for RHEL and CentOS) and the `DEB` (for Debian and Ubuntu) packages are available from Percona repositories for PostgreSQL versions [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/), and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/). The RPM packages are also available in the official PostgreSQL (PGDG) yum repositories. @@ -48,8 +55,8 @@ The `pg_stat_monitor` should work on the latest version of both [Percona Distrib | **Distribution** | **Version** | **Provider** | | ---------------- | --------------- | ------------ | -|[Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution)| [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/) and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/)| Percona| -| PostgreSQL | 11, 12, and 13 | PostgreSQL Global Development Group (PGDG) | +|[Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution)| [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/), [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/) and [14](https://www.percona.com/downloads/postgresql-distribution-14/LATEST/)| Percona| +| PostgreSQL | 11, 12, 13 and 14 | PostgreSQL Global Development Group (PGDG) | ### Features @@ -68,10 +75,10 @@ The `pg_stat_monitor` should work on the latest version of both [Percona Distrib ### Documentation 1. [User guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md) -2. pg_stat_monitor vs pg_stat_statements -3. pg_stat_monitor view reference +2. [Comparing `pg_stat_monitor` and `pg_stat_statements`](https://github.com/percona/pg_stat_monitor/blob/master/docs/COMPARISON.md) +3. [pg_stat_monitor view reference](https://github.com/percona/pg_stat_monitor/blob/master/docs/REFERENCE.md) 4. [Release notes](https://github.com/percona/pg_stat_monitor/blob/master/docs/RELEASE_NOTES.md) -5. Contributing guide (https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) +5. [Contributing guide](https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) ### Supported platforms @@ -84,7 +91,7 @@ The PostgreSQL YUM repository supports `pg_stat_monitor` for all [supported vers Find the list of supported platforms for `pg_stat_monitor` within [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) on the [Percona Release Lifecycle Overview](https://www.percona.com/services/policies/percona-software-support-lifecycle#pgsql) page. -### Installation Guidelines +### Installation guidelines You can install `pg_stat_monitor` from the following sources: @@ -117,7 +124,7 @@ Replace XX with the desired PostgreSQL version. For example, to install `pg_stat yum install percona-pg-stat-monitor13 ``` -#### Installing from PostgreSQL yum repositories +#### Installing from PostgreSQL `yum` repositories Install the PostgreSQL repositories following the instructions in the [Linux downloads (Red Hat family)](https://www.postgresql.org/download/linux/redhat/) chapter in PostgreSQL documentation. @@ -153,10 +160,8 @@ You can enable `pg_stat_monitor` when your `postgresql` instance is not running. Use the [ALTER SYSTEM](https://www.postgresql.org/docs/current/sql-altersystem.html)command from `psql` terminal to modify the `shared_preload_libraries` parameter. -``` +```sql ALTER SYSTEM SET shared_preload_libraries = 'pg_stat_monitor'; - -ALTER SYSTEM ``` > **NOTE**: If you’ve added other modules to the `shared_preload_libraries` parameter (for example, `pg_stat_statements`), list all of them separated by commas for the `ALTER SYSTEM` command. @@ -169,23 +174,22 @@ Start or restart the `postgresql` instance to apply the changes. * On Debian and Ubuntu: -``` +```sh sudo systemctl restart postgresql.service ``` * On Red Hat Enterprise Linux and CentOS: -``` +```sh sudo systemctl restart postgresql-13 ``` Create the extension using the [CREATE EXTENSION](https://www.postgresql.org/docs/current/sql-createextension.html) command. Using this command requires the privileges of a superuser or a database owner. Connect to `psql` as a superuser for a database and run the following command: -``` +```sql CREATE EXTENSION pg_stat_monitor; -CREATE EXTENSION ``` @@ -235,16 +239,61 @@ make USE_PGXS=1 make USE_PGXS=1 install ``` +### Uninstall `pg_stat_monitor` + +To uninstall `pg_stat_monitor`, do the following: + +1. Disable statistics collection. From the `psql` terminal, run the following command: + + ```sql + ALTER SYSTEM SET pg_stat_monitor.pgsm_enable = 0; + ``` + +2. Drop `pg_stat_monitor` extension: + + ```sql + DROP EXTENSION pg_stat_monitor; + ``` + +3. Remove `pg_stat_monitor` from the `shared_preload_libraries` configuration parameter: + + ```sql + ALTER SYSTEM SET shared_preload_libraries = ''; + ``` + + **Important**: If the `shared_preload_libraries` parameter includes other modules, specify them all for the `ALTER SYSTEM SET` command to keep using them. + +4. Restart the `postgresql` instance to apply the changes. The following command restarts PostgreSQL 13. Replace the version value with the one you are using. + + * On Debian and Ubuntu: + + ```sh + sudo systemctl restart postgresql.service + ``` + + * On Red Hat Enterprise Linux and CentOS: + + + ```sh + sudo systemctl restart postgresql-13 + ``` + ### How to contribute We welcome and strongly encourage community participation and contributions, and are always looking for new members that are as dedicated to serving the community as we are. The [Contributing Guide](https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) contains the guidelines on how you can contribute. +### Report a bug + +If you would like to suggest a new feature / an improvement or you found a bug in `pg_stat_monitor`, please submit the report to the [Percona Jira issue tracker](https://jira.percona.com/projects/PG). + +Refer to the [Submit a bug report or a feature request](https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md#submit-a-bug-report-or-a-feature-request) section for bug reporting guidelines. + ### Support, discussions and forums -We welcome your feedback on your experience with `pg_stat_monitor`. Join our [technical forum](https://forums.percona.com/) or [Discord](https://discord.gg/mQEyGPkNbR) channel for help with `pg_stat_monitor` and Percona's open source software for MySQL®, [PostgreSQL](https://www.percona.com/software/postgresql-distribution), and MongoDB® databases. +We welcome your feedback on your experience with `pg_stat_monitor`. Join our [technical forum](https://forums.percona.com/c/postgresql/pg-stat-monitor/69) for help with `pg_stat_monitor`. ### License diff --git a/percona-packaging/debian/changelog b/debian/changelog similarity index 100% rename from percona-packaging/debian/changelog rename to debian/changelog diff --git a/percona-packaging/debian/compat b/debian/compat similarity index 100% rename from percona-packaging/debian/compat rename to debian/compat diff --git a/debian/control b/debian/control new file mode 100644 index 0000000..c4350a8 --- /dev/null +++ b/debian/control @@ -0,0 +1,24 @@ +Source: percona-pg-stat-monitor +Section: database +Priority: optional +Maintainer: Percona Development Team +Build-Depends: + debhelper (>= 9), + postgresql-server-dev-all (>= 153~) | percona-postgresql-server-dev-all (>= 153~), + +Package: percona-pg-stat-monitorPGVERSION +Architecture: any +Depends: + postgresql-PGVERSION, + ${misc:Depends}, + ${shlibs:Depends}, +Description: enhancement query planning and execution statistics collector + The pg_stat_monitor is a Query Performance Monitoring tool for PostgreSQL. + It attempts to provide a more holistic picture by providing much-needed query + performance insights in a single view. + . + pg_stat_monitor provides improved insights that allow database users to + understand query origins, execution, planning statistics and details, query + information, and metadata. This significantly improves observability, enabling + users to debug and tune query performance. pg_stat_monitor is developed on the + basis of pg_stat_statements as its more advanced replacement. diff --git a/debian/control.in b/debian/control.in new file mode 100644 index 0000000..c4350a8 --- /dev/null +++ b/debian/control.in @@ -0,0 +1,24 @@ +Source: percona-pg-stat-monitor +Section: database +Priority: optional +Maintainer: Percona Development Team +Build-Depends: + debhelper (>= 9), + postgresql-server-dev-all (>= 153~) | percona-postgresql-server-dev-all (>= 153~), + +Package: percona-pg-stat-monitorPGVERSION +Architecture: any +Depends: + postgresql-PGVERSION, + ${misc:Depends}, + ${shlibs:Depends}, +Description: enhancement query planning and execution statistics collector + The pg_stat_monitor is a Query Performance Monitoring tool for PostgreSQL. + It attempts to provide a more holistic picture by providing much-needed query + performance insights in a single view. + . + pg_stat_monitor provides improved insights that allow database users to + understand query origins, execution, planning statistics and details, query + information, and metadata. This significantly improves observability, enabling + users to debug and tune query performance. pg_stat_monitor is developed on the + basis of pg_stat_statements as its more advanced replacement. diff --git a/debian/copyright b/debian/copyright new file mode 100644 index 0000000..dfbbbf6 --- /dev/null +++ b/debian/copyright @@ -0,0 +1,26 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: pg_stat_monitor +Source: https://github.com/percona/pg_stat_monitor + +Files: * +Copyright: Portions Copyright © 2018-2021, Percona LLC and/or its affiliates + Portions Copyright © 1996-2021, The PostgreSQL Global Development Group + Portions Copyright © 1994, The Regents of the University of California + +License: PostgreSQL + Permission to use, copy, modify, and distribute this software and its + documentation for any purpose, without fee, and without a written agreement + is hereby granted, provided that the above copyright notice and this + paragraph and the following two paragraphs appear in all copies. + . + IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + . + THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/percona-packaging/debian/pgversions b/debian/pgversions similarity index 100% rename from percona-packaging/debian/pgversions rename to debian/pgversions diff --git a/percona-packaging/debian/rules b/debian/rules similarity index 100% rename from percona-packaging/debian/rules rename to debian/rules diff --git a/percona-packaging/debian/source/format b/debian/source/format similarity index 100% rename from percona-packaging/debian/source/format rename to debian/source/format diff --git a/percona-packaging/debian/source/lintian-overrides b/debian/source/lintian-overrides similarity index 100% rename from percona-packaging/debian/source/lintian-overrides rename to debian/source/lintian-overrides diff --git a/hash_query.c b/hash_query.c index 29d0cc8..809c829 100644 --- a/hash_query.c +++ b/hash_query.c @@ -22,19 +22,8 @@ static pgssSharedState *pgss; static HTAB *pgss_hash; +static HTAB *pgss_query_hash; -static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size); -/* - * Copy query from src_buffer to dst_buff. - * Use query_id and query_pos to fast locate query in source buffer. - * Store updated query position in the destination buffer into param query_pos. - */ -static bool copy_query(uint64 bucket_id, - uint64 query_id, - uint64 query_pos, - unsigned char *dst_buf, - unsigned char *src_buf, - size_t *new_query_pos); static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size) @@ -50,7 +39,6 @@ void pgss_startup(void) { bool found = false; - int32 i; /* reset in case this is a restart within the postmaster */ @@ -75,16 +63,10 @@ pgss_startup(void) init_hook_stats(); #endif - pgss->query_buf_size_bucket = MAX_QUERY_BUF / PGSM_MAX_BUCKETS; - - for (i = 0; i < PGSM_MAX_BUCKETS; i++) - { - unsigned char *buf = (unsigned char *)ShmemAlloc(pgss->query_buf_size_bucket); - set_qbuf(i, buf); - memset(buf, 0, sizeof (uint64)); - } + set_qbuf((unsigned char *)ShmemAlloc(MAX_QUERY_BUF)); pgss_hash = hash_init("pg_stat_monitor: bucket hashtable", sizeof(pgssHashKey), sizeof(pgssEntry), MAX_BUCKET_ENTRIES); + pgss_query_hash = hash_init("pg_stat_monitor: queryID hashtable", sizeof(uint64), sizeof(pgssQueryEntry), MAX_BUCKET_ENTRIES); LWLockRelease(AddinShmemInitLock); @@ -107,6 +89,12 @@ pgsm_get_hash(void) return pgss_hash; } +HTAB* +pgsm_get_query_hash(void) +{ + return pgss_query_hash; +} + /* * shmem_shutdown hook: Dump statistics into file. * @@ -152,7 +140,9 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding) } /* Find or create an entry with desired hash code */ entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER_NULL, &found); - if (!found) + if (entry == NULL) + pgsm_log_error("hash_entry_alloc: OUT OF MEMORY"); + else if (!found) { pgss->bucket_entry[pg_atomic_read_u64(&pgss->current_wbucket)]++; /* New entry, initialize it */ @@ -164,8 +154,7 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding) /* ... and don't forget the query text metadata */ entry->encoding = encoding; } - if (entry == NULL) - elog(DEBUG1, "%s", "pg_stat_monitor: out of memory"); + return entry; } @@ -182,22 +171,15 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding) * Caller must hold an exclusive lock on pgss->lock. */ void -hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]) +hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer) { HASH_SEQ_STATUS hash_seq; pgssEntry *entry = NULL; - pgssSharedState *pgss = pgsm_get_ss(); /* Store pending query ids from the previous bucket. */ List *pending_entries = NIL; ListCell *pending_entry; - if (new_bucket_id != -1) - { - /* Clear all queries in the query buffer for the new bucket. */ - memset(query_buffer[new_bucket_id], 0, pgss->query_buf_size_bucket); - } - /* Iterate over the hash table. */ hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) @@ -210,6 +192,11 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu (entry->key.bucket_id == new_bucket_id && (entry->counters.state == PGSS_FINISHED || entry->counters.state == PGSS_ERROR))) { + if (new_bucket_id == -1) { + /* pg_stat_monitor_reset(), remove entry from query hash table too. */ + hash_search(pgss_query_hash, &(entry->key.queryid), HASH_REMOVE, NULL); + } + entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); } @@ -229,9 +216,21 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu pgssEntry *bkp_entry = malloc(sizeof(pgssEntry)); if (!bkp_entry) { - /* No memory, remove pending query entry from the previous bucket. */ - elog(ERROR, "hash_entry_dealloc: out of memory"); - entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); + pgsm_log_error("hash_entry_dealloc: out of memory"); + /* + * No memory, If the entry has calls > 1 then we change the state to finished, + * as the pending query will likely finish execution during the new bucket + * time window. The pending query will vanish in this case, can't list it + * until it completes. + * + * If there is only one call to the query and it's pending, remove the + * entry from the previous bucket and allow it to finish in the new bucket, + * in order to avoid the query living in the old bucket forever. + */ + if (entry->counters.calls.calls > 1) + entry->counters.state = PGSS_FINISHED; + else + entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); continue; } @@ -244,8 +243,20 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu /* Add the entry to a list of nodes to be processed later. */ pending_entries = lappend(pending_entries, bkp_entry); - /* Finally remove the pending query from the expired bucket id. */ - entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); + /* + * If the entry has calls > 1 then we change the state to finished in + * the previous bucket, as the pending query will likely finish execution + * during the new bucket time window. Can't remove it from the previous bucket + * as it may have many calls and we would lose the query statistics. + * + * If there is only one call to the query and it's pending, remove the entry + * from the previous bucket and allow it to finish in the new bucket, + * in order to avoid the query living in the old bucket forever. + */ + if (entry->counters.calls.calls > 1) + entry->counters.state = PGSS_FINISHED; + else + entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); } } } @@ -268,13 +279,7 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu new_entry->counters = old_entry->counters; SpinLockInit(&new_entry->mutex); new_entry->encoding = old_entry->encoding; - /* copy query's text from previous bucket to the new one. */ - copy_query(new_bucket_id, - new_entry->key.queryid, /* query id */ - old_entry->query_pos, /* query position in buffer */ - query_buffer[new_bucket_id], /* destination query buffer */ - query_buffer[old_bucket_id], /* source query buffer */ - &new_entry->query_pos); /* position in which query was inserted into destination buffer */ + new_entry->query_pos = old_entry->query_pos; } free(old_entry); @@ -310,39 +315,3 @@ IsHashInitialize(void) return (pgss != NULL && pgss_hash != NULL); } - -static bool copy_query(uint64 bucket_id, - uint64 query_id, - uint64 query_pos, - unsigned char *dst_buf, - unsigned char *src_buf, - size_t *new_query_pos) -{ - uint64 query_len = 0; - uint64 buf_len = 0; - - memcpy(&buf_len, src_buf, sizeof (uint64)); - if (buf_len <= 0) - return false; - - /* Try to locate the query directly. */ - if (query_pos != 0 && (query_pos + sizeof(uint64) + sizeof(uint64)) < buf_len) - { - if (*(uint64 *)&src_buf[query_pos] != query_id) - return false; - - query_pos += sizeof(uint64); - - memcpy(&query_len, &src_buf[query_pos], sizeof(uint64)); /* query len */ - query_pos += sizeof(uint64); - - if (query_pos + query_len > buf_len) /* avoid reading past buffer's length. */ - return false; - - return SaveQueryText(bucket_id, query_id, dst_buf, - (const char *)&src_buf[query_pos], - query_len, new_query_pos); - } - - return false; -} diff --git a/percona-packaging/debian/control b/percona-packaging/debian/control deleted file mode 100644 index 662d898..0000000 --- a/percona-packaging/debian/control +++ /dev/null @@ -1,20 +0,0 @@ -Source: percona-pg-stat-monitor -Section: database -Priority: optional -Maintainer: Percona Development Team -Build-Depends: - debhelper (>= 9), - percona-postgresql-server-dev-all (>= 153~), - -Package: percona-pg-stat-monitor@@PG_REL@@ -Architecture: any -Depends: - percona-postgresql-@@PG_REL@@ | postgresql-@@PG_REL@@, - ${misc:Depends}, - ${shlibs:Depends}, -Description: The pg_stat_monitor is statistics collector tool - based on PostgreSQL's contrib module "pg_stat_statements". - . - pg_stat_monitor is developed on the basis of pg_stat_statments - as more advanced replacement for pg_stat_statment. - It provides all the features of pg_stat_statment plus its own feature set. diff --git a/percona-packaging/debian/control.in b/percona-packaging/debian/control.in deleted file mode 100644 index 662d898..0000000 --- a/percona-packaging/debian/control.in +++ /dev/null @@ -1,20 +0,0 @@ -Source: percona-pg-stat-monitor -Section: database -Priority: optional -Maintainer: Percona Development Team -Build-Depends: - debhelper (>= 9), - percona-postgresql-server-dev-all (>= 153~), - -Package: percona-pg-stat-monitor@@PG_REL@@ -Architecture: any -Depends: - percona-postgresql-@@PG_REL@@ | postgresql-@@PG_REL@@, - ${misc:Depends}, - ${shlibs:Depends}, -Description: The pg_stat_monitor is statistics collector tool - based on PostgreSQL's contrib module "pg_stat_statements". - . - pg_stat_monitor is developed on the basis of pg_stat_statments - as more advanced replacement for pg_stat_statment. - It provides all the features of pg_stat_statment plus its own feature set. diff --git a/percona-packaging/debian/copyright b/percona-packaging/debian/copyright deleted file mode 100644 index 621a1ee..0000000 --- a/percona-packaging/debian/copyright +++ /dev/null @@ -1,27 +0,0 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: PostgreSQL -Source: https://github.com/percona/pg_stat_monitor - -pg_stat_monitor - Statistics collector for PostgreSQL. - -Portions Copyright © 2018-2020, Percona LLC and/or its affiliates - -Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group - -Portions Copyright (c) 1994, The Regents of the University of California - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose, without fee, and without a written agreement is -hereby granted, provided that the above copyright notice and this paragraph and -the following two paragraphs appear in all copies. - -IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR DIRECT, -INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, -ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE -COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND -THE COPYRIGHT HOLDER HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, -UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/percona-packaging/scripts/pg_stat_monitor_builder.sh b/percona-packaging/scripts/pg_stat_monitor_builder.sh index 52b2ccb..92986dc 100644 --- a/percona-packaging/scripts/pg_stat_monitor_builder.sh +++ b/percona-packaging/scripts/pg_stat_monitor_builder.sh @@ -128,9 +128,6 @@ get_sources(){ fi REVISION=$(git rev-parse --short HEAD) echo "REVISION=${REVISION}" >> ${WORKDIR}/pg-stat-monitor.properties - rm -rf rpm debian - cp -r percona-packaging/rpm ./ - cp -r percona-packaging/debian ./ EDITFILES="debian/control debian/control.in debian/rules rpm/pg-stat-monitor.spec" for file in $EDITFILES; do @@ -193,9 +190,10 @@ install_deps() { percona-release enable ppg-12 release fi yum -y install git wget + PKGLIST="percona-postgresql-common percona-postgresql${PG_RELEASE}-devel" PKGLIST+=" clang-devel git clang llvm-devel rpmdevtools vim wget" PKGLIST+=" perl binutils gcc gcc-c++" - PKGLIST+=" percona-postgresql-common clang-devel llvm-devel percona-postgresql${PG_RELEASE}-devel git rpm-build rpmdevtools wget gcc make autoconf" + PKGLIST+=" clang-devel llvm-devel git rpm-build rpmdevtools wget gcc make autoconf" if [[ "${RHEL}" -eq 8 ]]; then dnf -y module disable postgresql elif [[ "${RHEL}" -eq 7 ]]; then @@ -225,6 +223,15 @@ install_deps() { elif [[ "${PG_RELEASE}" == "12" ]]; then percona-release enable ppg-12 release fi + + + PKGLIST="percona-postgresql-${PG_RELEASE} percona-postgresql-common percona-postgresql-server-dev-all" + + # ---- using a community version of postgresql + #wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + #echo "deb http://apt.postgresql.org/pub/repos/apt/ ${PG_RELEASE}"-pgdg main | sudo tee /etc/apt/sources.list.d/pgdg.list + #PKGLIST="postgresql-${PG_RELEASE} postgresql-common postgresql-server-dev-all" + apt-get update if [[ "${OS_NAME}" != "focal" ]]; then @@ -237,8 +244,9 @@ install_deps() { fi fi - PKGLIST+=" percona-postgresql-${PG_RELEASE} debconf debhelper clang-7 devscripts dh-exec dh-systemd git wget libkrb5-dev libssl-dev percona-postgresql-common percona-postgresql-server-dev-all" - PKGLIST+=" build-essential debconf debhelper devscripts dh-exec dh-systemd git wget libxml-checker-perl libxml-libxml-perl libio-socket-ssl-perl libperl-dev libssl-dev libxml2-dev txt2man zlib1g-dev libpq-dev" + PKGLIST+=" debconf debhelper clang-7 devscripts dh-exec dh-systemd git wget libkrb5-dev libssl-dev" + PKGLIST+=" build-essential debconf debhelper devscripts dh-exec dh-systemd git wget libxml-checker-perl" + PKGLIST+=" libxml-libxml-perl libio-socket-ssl-perl libperl-dev libssl-dev libxml2-dev txt2man zlib1g-dev libpq-dev" until DEBIAN_FRONTEND=noninteractive apt-get -y install ${PKGLIST}; do sleep 1 @@ -402,6 +410,7 @@ build_source_deb(){ cd ${BUILDDIR} dch -D unstable --force-distribution -v "${VERSION}-${DEB_RELEASE}" "Update to new percona-pg-stat-monitor${PG_RELEASE} version ${VERSION}" + pg_buildext updatecontrol dpkg-buildpackage -S cd ../ mkdir -p $WORKDIR/source_deb @@ -444,6 +453,7 @@ build_deb(){ dpkg-source -x ${DSC} # cd percona-pg-stat-monitor-${VERSION} + sed -i "s:\. :${WORKDIR}/percona-pg-stat-monitor-${VERSION} :g" debian/rules dch -m -D "${OS_NAME}" --force-distribution -v "1:${VERSION}-${DEB_RELEASE}.${OS_NAME}" 'Update distribution' unset $(locale|cut -d= -f1) dpkg-buildpackage -rfakeroot -us -uc -b diff --git a/pg_stat_monitor--1.0.13.sql.in b/pg_stat_monitor--1.0.13.sql.in index 990e62b..864b617 100644 --- a/pg_stat_monitor--1.0.13.sql.in +++ b/pg_stat_monitor--1.0.13.sql.in @@ -119,12 +119,13 @@ LANGUAGE SQL PARALLEL SAFE; CREATE FUNCTION pg_stat_monitor_settings( OUT name text, - OUT value INTEGER, - OUT default_value INTEGER, + OUT value text, + OUT default_value text, OUT description text, OUT minimum INTEGER, OUT maximum INTEGER, - OUT restart INTEGER + OUT options text, + OUT restart text ) RETURNS SETOF record AS 'MODULE_PATHNAME', 'pg_stat_monitor_settings' @@ -137,6 +138,7 @@ CREATE VIEW pg_stat_monitor_settings AS SELECT description, minimum, maximum, + options, restart FROM pg_stat_monitor_settings(); @@ -255,9 +257,42 @@ $$ language plpgsql; -- total_time / greatest(ncalls, 1) as avg_time, -- ncalls, -- ROUND(CAST(total_time / greatest(sum(total_time) OVER(), 0.00000001) * 100 as numeric), 2)::text || '%' as load_comparison ---FROM pg_stat_monitor_hook_stats(); +-- FROM pg_stat_monitor_hook_stats(); + +CREATE FUNCTION pg_stat_monitor_errors( + OUT severity int, + OUT message text, + OUT msgtime text, + OUT calls int8 +) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pg_stat_monitor_errors' +LANGUAGE C STRICT VOLATILE PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION pgsm_log_severity_as_text(severity int) RETURNS TEXT AS +$$ +SELECT + CASE + WHEN severity = 0 THEN 'INFO' + WHEN severity = 1 THEN 'WARNING' + WHEN severity = 2 THEN 'ERROR' + END +$$ +LANGUAGE SQL PARALLEL SAFE; + +CREATE VIEW pg_stat_monitor_errors AS SELECT + pgsm_log_severity_as_text(severity) as severity, message, msgtime, calls +FROM pg_stat_monitor_errors(); + +CREATE FUNCTION pg_stat_monitor_reset_errors() +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C PARALLEL SAFE; GRANT SELECT ON pg_stat_monitor TO PUBLIC; GRANT SELECT ON pg_stat_monitor_settings TO PUBLIC; + +GRANT SELECT ON pg_stat_monitor_errors TO PUBLIC; -- Don't want this to be available to non-superusers. REVOKE ALL ON FUNCTION pg_stat_monitor_reset() FROM PUBLIC; +REVOKE ALL ON FUNCTION pg_stat_monitor_reset_errors() FROM PUBLIC; diff --git a/pg_stat_monitor--1.0.14.sql.in b/pg_stat_monitor--1.0.14.sql.in index dae64b8..08f13b9 100644 --- a/pg_stat_monitor--1.0.14.sql.in +++ b/pg_stat_monitor--1.0.14.sql.in @@ -119,12 +119,13 @@ LANGUAGE SQL PARALLEL SAFE; CREATE FUNCTION pg_stat_monitor_settings( OUT name text, - OUT value INTEGER, - OUT default_value INTEGER, + OUT value text, + OUT default_value text, OUT description text, OUT minimum INTEGER, OUT maximum INTEGER, - OUT restart INTEGER + OUT options text, + OUT restart text ) RETURNS SETOF record AS 'MODULE_PATHNAME', 'pg_stat_monitor_settings' @@ -137,6 +138,7 @@ CREATE VIEW pg_stat_monitor_settings AS SELECT description, minimum, maximum, + options, restart FROM pg_stat_monitor_settings(); @@ -256,9 +258,42 @@ $$ language plpgsql; -- total_time / greatest(ncalls, 1) as avg_time, -- ncalls, -- ROUND(CAST(total_time / greatest(sum(total_time) OVER(), 0.00000001) * 100 as numeric), 2)::text || '%' as load_comparison ---FROM pg_stat_monitor_hook_stats(); +-- FROM pg_stat_monitor_hook_stats(); + +CREATE FUNCTION pg_stat_monitor_errors( + OUT severity int, + OUT message text, + OUT msgtime text, + OUT calls int8 +) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pg_stat_monitor_errors' +LANGUAGE C STRICT VOLATILE PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION pgsm_log_severity_as_text(severity int) RETURNS TEXT AS +$$ +SELECT + CASE + WHEN severity = 0 THEN 'INFO' + WHEN severity = 1 THEN 'WARNING' + WHEN severity = 2 THEN 'ERROR' + END +$$ +LANGUAGE SQL PARALLEL SAFE; + +CREATE VIEW pg_stat_monitor_errors AS SELECT + pgsm_log_severity_as_text(severity) as severity, message, msgtime, calls +FROM pg_stat_monitor_errors(); + +CREATE FUNCTION pg_stat_monitor_reset_errors() +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C PARALLEL SAFE; GRANT SELECT ON pg_stat_monitor TO PUBLIC; GRANT SELECT ON pg_stat_monitor_settings TO PUBLIC; + +GRANT SELECT ON pg_stat_monitor_errors TO PUBLIC; -- Don't want this to be available to non-superusers. REVOKE ALL ON FUNCTION pg_stat_monitor_reset() FROM PUBLIC; +REVOKE ALL ON FUNCTION pg_stat_monitor_reset_errors() FROM PUBLIC; diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 4042de1..52780ab 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -17,6 +17,7 @@ #include "postgres.h" #include "access/parallel.h" +#include "utils/guc.h" #include #ifdef BENCHMARK #include /* clock() */ @@ -67,7 +68,8 @@ static int num_relations; /* Number of relation in the query */ static bool system_init = false; static struct rusage rusage_start; static struct rusage rusage_end; -static unsigned char *pgss_qbuf[MAX_BUCKETS]; +/* Query buffer, store queries' text. */ +static unsigned char *pgss_qbuf = NULL; static char *pgss_explain(QueryDesc *queryDesc); #ifdef BENCHMARK static struct pg_hook_stats_t *pg_hook_stats; @@ -103,12 +105,11 @@ PG_FUNCTION_INFO_V1(pg_stat_monitor_settings); PG_FUNCTION_INFO_V1(get_histogram_timings); PG_FUNCTION_INFO_V1(pg_stat_monitor_hook_stats); -static uint pg_get_client_addr(void); -static int pg_get_application_name(char* application_name); +static uint pg_get_client_addr(bool *ok); +static int pg_get_application_name(char *application_name, bool *ok); static PgBackendStatus *pg_get_backend_status(void); static Datum intarray_get_datum(int32 arr[], int len); - #if PG_VERSION_NUM < 140000 DECLARE_HOOK(void pgss_post_parse_analyze, ParseState *pstate, Query *query); #else @@ -136,6 +137,7 @@ DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryStri ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *qc); +static uint64 pgss_hash_string(const char *str, int len); #else static void BufferUsageAccumDiff(BufferUsage* bufusage, BufferUsage* pgBufferUsage, BufferUsage* bufusage_start); DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString, @@ -144,30 +146,28 @@ DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryStri DestReceiver *dest, char *completionTag); #endif - -static uint64 pgss_hash_string(const char *str, int len); char *unpack_sql_state(int sql_state); -static void pgss_store_error(uint64 queryid, const char * query, ErrorData *edata); +#define PGSM_HANDLED_UTILITY(n) (!IsA(n, ExecuteStmt) && \ + !IsA(n, PrepareStmt) && \ + !IsA(n, DeallocateStmt)) -static void pgss_store_utility(const char *query, - double total_time, - uint64 rows, - BufferUsage *bufusage, - WalUsage *walusage); +static void pgss_store_error(uint64 queryid, const char *query, ErrorData *edata); static void pgss_store(uint64 queryid, - const char *query, - PlanInfo *plan_info, - CmdType cmd_type, - SysInfo *sys_info, - ErrorInfo *error_info, - double total_time, - uint64 rows, - BufferUsage *bufusage, - WalUsage *walusage, - JumbleState *jstate, - pgssStoreKind kind); + const char *query, + int query_location, + int query_len, + PlanInfo *plan_info, + CmdType cmd_type, + SysInfo *sys_info, + ErrorInfo *error_info, + double total_time, + uint64 rows, + BufferUsage *bufusage, + WalUsage *walusage, + JumbleState *jstate, + pgssStoreKind kind); static void pg_stat_monitor_internal(FunctionCallInfo fcinfo, bool showtext); @@ -179,6 +179,12 @@ static void JumbleQuery(JumbleState *jstate, Query *query); static void JumbleRangeTable(JumbleState *jstate, List *rtable); static void JumbleExpr(JumbleState *jstate, Node *node); static void RecordConstLocation(JumbleState *jstate, int location); +/* + * Given a possibly multi-statement source string, confine our attention to the + * relevant part of the string. + */ +static const char * +CleanQuerytext(const char *query, int *location, int *len); #endif static char *generate_normalized_query(JumbleState *jstate, const char *query, @@ -188,25 +194,14 @@ static int comp_location(const void *a, const void *b); static uint64 get_next_wbucket(pgssSharedState *pgss); -static void -pgss_store_query(uint64 queryid, - const char * query, - CmdType cmd_type, - int query_location, - int query_len, -#if PG_VERSION_NUM > 130000 - JumbleState *jstate, -#else - JumbleState *jstate, -#endif - pgssStoreKind kind); - #if PG_VERSION_NUM < 140000 static uint64 get_query_id(JumbleState *jstate, Query *query); #endif /* Daniel J. Bernstein's hash algorithm: see http://www.cse.yorku.ca/~oz/hash.html */ static uint64 djb2_hash(unsigned char *str, size_t len); +/* Same as above, but stores the calculated string length into *out_len (small optimization) */ +static uint64 djb2_hash_str(unsigned char *str, int *out_len); /* * Module load callback */ @@ -214,7 +209,8 @@ static uint64 djb2_hash(unsigned char *str, size_t len); void _PG_init(void) { - int i, rc; + int rc; + char file_name[1024]; elog(DEBUG2, "pg_stat_monitor: %s()", __FUNCTION__); /* @@ -239,12 +235,8 @@ _PG_init(void) EnableQueryId(); #endif - for (i = 0; i < PGSM_MAX_BUCKETS; i++) - { - char file_name[1024]; - snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, i); - unlink(file_name); - } + snprintf(file_name, 1024, "%s", PGSM_TEXT_FILE); + unlink(file_name); EmitWarningsOnPlaceholders("pg_stat_monitor"); @@ -360,8 +352,6 @@ pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query, JumbleState static void pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) { - pgssStoreKind kind = PGSS_PARSE; - if (prev_post_parse_analyze_hook) prev_post_parse_analyze_hook(pstate, query, jstate); @@ -379,7 +369,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) */ if (query->utilityStmt) { - query->queryId = UINT64CONST(0); + if (PGSM_TRACK_UTILITY && !PGSM_HANDLED_UTILITY(query->utilityStmt)) + query->queryId = UINT64CONST(0); return; } @@ -390,15 +381,21 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) * constants, the normalized string would be the same as the query text * anyway, so there's no need for an early entry. */ - if (jstate == NULL || jstate->clocations_count <= 0) - return; - pgss_store_query(query->queryId, /* queryid */ - pstate->p_sourcetext, /* query */ - query->commandType, /* CmdType */ - query->stmt_location, /* Query Location */ - query->stmt_len, /* Query Len */ - jstate, /* JumbleState */ - kind); /*pgssStoreKind */ + if (jstate && jstate->clocations_count > 0) + pgss_store(query->queryId, /* query id */ + pstate->p_sourcetext, /* query */ + query->stmt_location, /* query location */ + query->stmt_len, /* query length */ + NULL, /* PlanInfo */ + query->commandType, /* CmdType */ + NULL, /* SysInfo */ + NULL, /* ErrorInfo */ + 0, /* totaltime */ + 0, /* rows */ + NULL, /* bufusage */ + NULL, /* walusage */ + jstate, /* JumbleState */ + PGSS_PARSE); /* pgssStoreKind */ } #else @@ -419,7 +416,6 @@ static void pgss_post_parse_analyze(ParseState *pstate, Query *query) { JumbleState jstate; - pgssStoreKind kind = PGSS_PARSE; if (prev_post_parse_analyze_hook) prev_post_parse_analyze_hook(pstate, query); @@ -454,16 +450,21 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query) if (query->queryId == UINT64CONST(0)) query->queryId = UINT64CONST(1); - if (jstate.clocations_count <= 0) - return; - - pgss_store_query(query->queryId, /* queryid */ - pstate->p_sourcetext, /* query */ - query->commandType, /* CmdType */ - query->stmt_location, /* Query Location */ - query->stmt_len, /* Query Len */ - &jstate, /* JumbleState */ - kind); /*pgssStoreKind */ + if (jstate.clocations_count > 0) + pgss_store(query->queryId, /* query id */ + pstate->p_sourcetext, /* query */ + query->stmt_location, /* query location */ + query->stmt_len, /* query length */ + NULL, /* PlanInfo */ + query->commandType, /* CmdType */ + NULL, /* SysInfo */ + NULL, /* ErrorInfo */ + 0, /* totaltime */ + 0, /* rows */ + NULL, /* bufusage */ + NULL, /* walusage */ + &jstate, /* JumbleState */ + PGSS_PARSE); /* pgssStoreKind */ } #endif @@ -483,10 +484,8 @@ pgss_ExecutorStart_benchmark(QueryDesc *queryDesc, int eflags) static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags) { - uint64 queryId = queryDesc->plannedstmt->queryId; - - if(getrusage(RUSAGE_SELF, &rusage_start) != 0) - elog(DEBUG1, "pg_stat_monitor: failed to execute getrusage"); + if (getrusage(RUSAGE_SELF, &rusage_start) != 0) + pgsm_log_error("pgss_ExecutorStart: failed to execute getrusage"); if (prev_ExecutorStart) prev_ExecutorStart(queryDesc, eflags); @@ -520,22 +519,20 @@ pgss_ExecutorStart(QueryDesc *queryDesc, int eflags) #endif MemoryContextSwitchTo(oldcxt); } - pgss_store(queryId, /* query id */ + pgss_store(queryDesc->plannedstmt->queryId, /* query id */ queryDesc->sourceText, /* query text */ + queryDesc->plannedstmt->stmt_location, /* query location */ + queryDesc->plannedstmt->stmt_len, /* query length */ NULL, /* PlanInfo */ queryDesc->operation, /* CmdType */ NULL, /* SysInfo */ NULL, /* ErrorInfo */ 0, /* totaltime */ 0, /* rows */ - NULL, /* bufusage */ -#if PG_VERSION_NUM >= 130000 + NULL, /* bufusage */ NULL, /* walusage */ -#else - NULL, -#endif - NULL, - PGSS_EXEC); /* pgssStoreKind */ + NULL, /* JumbleState */ + PGSS_EXEC); /* pgssStoreKind */ } } @@ -652,17 +649,18 @@ pgss_ExecutorEnd_benchmark(QueryDesc *queryDesc) static void pgss_ExecutorEnd(QueryDesc *queryDesc) { - uint64 queryId = queryDesc->plannedstmt->queryId; - SysInfo sys_info; - PlanInfo plan_info; + uint64 queryId = queryDesc->plannedstmt->queryId; + SysInfo sys_info; + PlanInfo plan_info; + PlanInfo *plan_ptr = NULL; - /* Extract the plan information in case of SELECT statment */ - memset(&plan_info, 0, sizeof(PlanInfo)); + /* Extract the plan information in case of SELECT statement */ if (queryDesc->operation == CMD_SELECT && PGSM_QUERY_PLAN) { MemoryContext mct = MemoryContextSwitchTo(TopMemoryContext); - snprintf(plan_info.plan_text, PLAN_TEXT_LEN, "%s", pgss_explain(queryDesc)); - plan_info.planid = DatumGetUInt64(hash_any_extended((const unsigned char*)plan_info.plan_text, strlen(plan_info.plan_text), 0)); + plan_info.plan_len = snprintf(plan_info.plan_text, PLAN_TEXT_LEN, "%s", pgss_explain(queryDesc)); + plan_info.planid = DatumGetUInt64(hash_any_extended((const unsigned char *)plan_info.plan_text, plan_info.plan_len, 0)); + plan_ptr = &plan_info; MemoryContextSwitchTo(mct); } @@ -679,15 +677,17 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) sys_info.utime = time_diff(rusage_end.ru_utime, rusage_start.ru_utime); sys_info.stime = time_diff(rusage_end.ru_stime, rusage_start.ru_stime); - pgss_store(queryId, /* query id */ - queryDesc->sourceText, /* query text */ - &plan_info, /* PlanInfo */ - queryDesc->operation, /* CmdType */ - &sys_info, /* SysInfo */ - NULL, /* ErrorInfo */ - queryDesc->totaltime->total * 1000.0, /* totaltime */ - queryDesc->estate->es_processed, /* rows */ - &queryDesc->totaltime->bufusage, /* bufusage */ + pgss_store(queryId, /* query id */ + queryDesc->sourceText, /* query text */ + queryDesc->plannedstmt->stmt_location, /* query location */ + queryDesc->plannedstmt->stmt_len, /* query length */ + plan_ptr, /* PlanInfo */ + queryDesc->operation, /* CmdType */ + &sys_info, /* SysInfo */ + NULL, /* ErrorInfo */ + queryDesc->totaltime->total * 1000.0, /* totaltime */ + queryDesc->estate->es_processed, /* rows */ + &queryDesc->totaltime->bufusage, /* bufusage */ #if PG_VERSION_NUM >= 130000 &queryDesc->totaltime->walusage, /* walusage */ #else @@ -783,7 +783,6 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par if (PGSM_TRACK_PLANNING && query_string && parse->queryId != UINT64CONST(0) && !IsParallelWorker()) { - PlanInfo plan_info; instr_time start; instr_time duration; BufferUsage bufusage_start; @@ -791,7 +790,6 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par WalUsage walusage_start; WalUsage walusage; - memset(&plan_info, 0, sizeof(PlanInfo)); /* We need to track buffer usage as the planner can access them. */ bufusage_start = pgBufferUsage; @@ -833,18 +831,20 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par /* calc differences of WAL counters. */ memset(&walusage, 0, sizeof(WalUsage)); WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start); - pgss_store(parse->queryId, /* query id */ - query_string, /* query */ - &plan_info, /* PlanInfo */ - parse->commandType, /* CmdType */ - NULL, /* SysInfo */ - NULL, /* ErrorInfo */ - INSTR_TIME_GET_MILLISEC(duration), /* totaltime */ - 0, /* rows */ - &bufusage, /* bufusage */ - &walusage, /* walusage */ - NULL, /* JumbleState */ - PGSS_PLAN); /* pgssStoreKind */ + pgss_store(parse->queryId, /* query id */ + query_string, /* query */ + parse->stmt_location, /* query location */ + parse->stmt_len, /* query length */ + NULL, /* PlanInfo */ + parse->commandType, /* CmdType */ + NULL, /* SysInfo */ + NULL, /* ErrorInfo */ + INSTR_TIME_GET_MILLISEC(duration), /* totaltime */ + 0, /* rows */ + &bufusage, /* bufusage */ + &walusage, /* walusage */ + NULL, /* JumbleState */ + PGSS_PLAN); /* pgssStoreKind */ } else { @@ -934,7 +934,24 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, char *completionTag) #endif { - Node *parsetree = pstmt->utilityStmt; + Node *parsetree = pstmt->utilityStmt; + uint64 queryId = 0; + +#if PG_VERSION_NUM >= 140000 + queryId = pstmt->queryId; + + /* + * Force utility statements to get queryId zero. We do this even in cases + * where the statement contains an optimizable statement for which a + * queryId could be derived (such as EXPLAIN or DECLARE CURSOR). For such + * cases, runtime control will first go through ProcessUtility and then + * the executor, and we don't want the executor hooks to do anything, + * since we are already measuring the statement's costs at the utility + * level. + */ + if (PGSM_TRACK_UTILITY && !IsParallelWorker()) + pstmt->queryId = UINT64CONST(0); +#endif /* * If it's an EXECUTE statement, we don't track it and don't increment the @@ -950,10 +967,8 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, * * Likewise, we don't track execution of DEALLOCATE. */ - if (PGSM_TRACK_UTILITY && - !IsA(parsetree, ExecuteStmt) && - !IsA(parsetree, PrepareStmt) && - !IsA(parsetree, DeallocateStmt) && !IsParallelWorker()) + if (PGSM_TRACK_UTILITY && PGSM_HANDLED_UTILITY(parsetree) && + !IsParallelWorker()) { instr_time start; instr_time duration; @@ -1016,7 +1031,16 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, INSTR_TIME_SUBTRACT(duration, start); #if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 140000 + rows = (qc && (qc->commandTag == CMDTAG_COPY || + qc->commandTag == CMDTAG_FETCH || + qc->commandTag == CMDTAG_SELECT || + qc->commandTag == CMDTAG_REFRESH_MATERIALIZED_VIEW)) + ? qc->nprocessed + : 0; +#else rows = (qc && qc->commandTag == CMDTAG_COPY) ? qc->nprocessed : 0; +#endif /* calc differences of WAL counters. */ memset(&walusage, 0, sizeof(WalUsage)); WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start); @@ -1031,49 +1055,59 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, /* calc differences of buffer counters. */ memset(&bufusage, 0, sizeof(BufferUsage)); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); - pgss_store_utility(queryString, /* query text */ - INSTR_TIME_GET_MILLISEC(duration), /* totaltime */ - rows, /* rows */ - &bufusage, /* bufusage */ - &walusage); /* walusage */ + pgss_store( + queryId, /* query ID */ + queryString, /* query text */ + pstmt->stmt_location, /* query location */ + pstmt->stmt_len, /* query length */ + NULL, /* PlanInfo */ + 0, /* CmdType */ + NULL, /* SysInfo */ + NULL, /* ErrorInfo */ + INSTR_TIME_GET_MILLISEC(duration), /* total_time */ + rows, /* rows */ + &bufusage, /* bufusage */ + &walusage, /* walusage */ + NULL, /* JumbleState */ + PGSS_FINISHED); /* pgssStoreKind */ } else { #if PG_VERSION_NUM >= 140000 - if (prev_ProcessUtility) - prev_ProcessUtility(pstmt, queryString, - readOnlyTree, - context, params, queryEnv, + if (prev_ProcessUtility) + prev_ProcessUtility(pstmt, queryString, + readOnlyTree, + context, params, queryEnv, + dest, + qc); + else + standard_ProcessUtility(pstmt, queryString, + readOnlyTree, + context, params, queryEnv, dest, qc); - else - standard_ProcessUtility(pstmt, queryString, - readOnlyTree, - context, params, queryEnv, - dest, - qc); #elif PG_VERSION_NUM >= 130000 - if (prev_ProcessUtility) - prev_ProcessUtility(pstmt, queryString, - context, params, queryEnv, + if (prev_ProcessUtility) + prev_ProcessUtility(pstmt, queryString, + context, params, queryEnv, + dest, + qc); + else + standard_ProcessUtility(pstmt, queryString, + context, params, queryEnv, dest, qc); - else - standard_ProcessUtility(pstmt, queryString, - context, params, queryEnv, - dest, - qc); #else - if (prev_ProcessUtility) - prev_ProcessUtility(pstmt, queryString, + if (prev_ProcessUtility) + prev_ProcessUtility(pstmt, queryString, + context, params, queryEnv, + dest, + completionTag); + else + standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); - else - standard_ProcessUtility(pstmt, queryString, - context, params, queryEnv, - dest, - completionTag); #endif } } @@ -1099,6 +1133,8 @@ BufferUsageAccumDiff(BufferUsage* bufusage, BufferUsage* pgBufferUsage, BufferUs INSTR_TIME_SUBTRACT(bufusage->blk_write_time, bufusage_start->blk_write_time); } #endif + +#if PG_VERSION_NUM < 140000 /* * Given an arbitrarily long query string, produce a hash for the purposes of * identifying the query, without normalizing constants. Used when hashing @@ -1110,19 +1146,23 @@ pgss_hash_string(const char *str, int len) return DatumGetUInt64(hash_any_extended((const unsigned char *) str, len, 0)); } +#endif static PgBackendStatus* pg_get_backend_status(void) { LocalPgBackendStatus *local_beentry; - int num_backends = pgstat_fetch_stat_numbackends(); - int i; + int num_backends = pgstat_fetch_stat_numbackends(); + int i; for (i = 1; i <= num_backends; i++) { PgBackendStatus *beentry; local_beentry = pgstat_fetch_stat_local_beentry(i); + if (!local_beentry) + continue; + beentry = &local_beentry->backendStatus; if (beentry->st_procpid == MyProcPid) @@ -1132,32 +1172,32 @@ pg_get_backend_status(void) } static int -pg_get_application_name(char *application_name) +pg_get_application_name(char *application_name, bool *ok) { PgBackendStatus *beentry = pg_get_backend_status(); - snprintf(application_name, APPLICATIONNAME_LEN, "%s", beentry->st_appname); - return strlen(application_name); + if (!beentry) + return snprintf(application_name, APPLICATIONNAME_LEN, "%s", "unknown"); + + *ok = true; + + return snprintf(application_name, APPLICATIONNAME_LEN, "%s", beentry->st_appname); } -/* - * Store some statistics for a statement. - * - * If queryId is 0 then this is a utility statement and we should compute - * a suitable queryId internally. - * - * If jstate is not NULL then we're trying to create an entry for which - * we have no statistics as yet; we just want to record the normalized - */ - static uint -pg_get_client_addr(void) +pg_get_client_addr(bool *ok) { PgBackendStatus *beentry = pg_get_backend_status(); - char remote_host[NI_MAXHOST]; - int ret; + char remote_host[NI_MAXHOST]; + int ret; + + remote_host[0] = '\0'; + + if (!beentry) + return ntohl(inet_addr("127.0.0.1")); + + *ok = true; - memset(remote_host, 0x0, NI_MAXHOST); ret = pg_getnameinfo_all(&beentry->st_clientaddr.addr, beentry->st_clientaddr.salen, remote_host, sizeof(remote_host), @@ -1168,6 +1208,7 @@ pg_get_client_addr(void) if (strcmp(remote_host, "[local]") == 0) return ntohl(inet_addr("127.0.0.1")); + return ntohl(inet_addr(remote_host)); } @@ -1186,16 +1227,16 @@ pgss_update_entry(pgssEntry *entry, BufferUsage *bufusage, WalUsage *walusage, bool reset, - pgssStoreKind kind) + pgssStoreKind kind, + const char *app_name, + size_t app_name_len) { int index; - char application_name[APPLICATIONNAME_LEN]; - int application_name_len = pg_get_application_name(application_name); double old_mean; int message_len = error_info ? strlen (error_info->message) : 0; int comments_len = comments ? strlen (comments) : 0; int sqlcode_len = error_info ? strlen (error_info->sqlcode) : 0; - int plan_text_len = plan_info ? strlen (plan_info->plan_text) : 0; + int plan_text_len = plan_info ? plan_info->plan_len : 0; /* volatile block */ @@ -1259,11 +1300,11 @@ pgss_update_entry(pgssEntry *entry, e->counters.resp_calls[index]++; } - if (plan_text_len > 0) + if (plan_text_len > 0 && !e->counters.planinfo.plan_text[0]) _snprintf(e->counters.planinfo.plan_text, plan_info->plan_text, plan_text_len + 1, PLAN_TEXT_LEN); - if (application_name_len > 0) - _snprintf(e->counters.info.application_name, application_name, application_name_len + 1, APPLICATIONNAME_LEN); + if (app_name_len > 0 && !e->counters.info.application_name[0]) + _snprintf(e->counters.info.application_name, app_name, app_name_len + 1, APPLICATIONNAME_LEN); e->counters.info.num_relations = num_relations; _snprintf2(e->counters.info.relations, relations, num_relations, REL_LEN); @@ -1303,7 +1344,6 @@ pgss_update_entry(pgssEntry *entry, e->counters.blocks.blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->blk_write_time); } e->counters.calls.usage += USAGE_EXEC(total_time); - e->counters.info.host = pg_get_client_addr(); if (sys_info) { e->counters.sysinfo.utime = sys_info->utime; @@ -1319,72 +1359,6 @@ pgss_update_entry(pgssEntry *entry, } } -static void -pgss_store_query(uint64 queryid, - const char * query, - CmdType cmd_type, - int query_location, - int query_len, -#if PG_VERSION_NUM > 130000 - JumbleState *jstate, -#else - JumbleState *jstate, -#endif - pgssStoreKind kind) -{ - char *norm_query = NULL; - - if (query_location >= 0) - { - Assert(query_location <= strlen(query)); - query += query_location; - /* Length of 0 (or -1) means "rest of string" */ - if (query_len <= 0) - query_len = strlen(query); - else - Assert(query_len <= strlen(query)); - } - else - { - /* If query location is unknown, distrust query_len as well */ - query_location = 0; - query_len = strlen(query); - } - - /* - * Discard leading and trailing whitespace, too. Use scanner_isspace() - * not libc's isspace(), because we want to match the lexer's behavior. - */ - while (query_len > 0 && scanner_isspace(query[0])) - query++, query_location++, query_len--; - while (query_len > 0 && scanner_isspace(query[query_len - 1])) - query_len--; - - if (jstate) - norm_query = generate_normalized_query(jstate, query, - query_location, - &query_len, - GetDatabaseEncoding()); - /* - * For utility statements, we just hash the query string to get an ID. - */ - if (queryid == UINT64CONST(0)) - queryid = pgss_hash_string(query, query_len); - - pgss_store(queryid, /* query id */ - PGSM_NORMALIZED_QUERY ? (norm_query ? norm_query : query) : query, /* query */ - NULL, /* PlanInfo */ - cmd_type, /* CmdType */ - NULL, /* SysInfo */ - NULL, /* ErrorInfo */ - 0, /* totaltime */ - 0, /* rows */ - NULL, /* bufusage */ - NULL, /* walusage */ - jstate, /* JumbleState */ - kind); /* pgssStoreKind */ -} - static void pgss_store_error(uint64 queryid, const char * query, @@ -1396,41 +1370,20 @@ pgss_store_error(uint64 queryid, snprintf(error_info.message, ERROR_MESSAGE_LEN, "%s", edata->message); snprintf(error_info.sqlcode, SQLCODE_LEN, "%s", unpack_sql_state(edata->sqlerrcode)); - pgss_store(queryid, /* query id */ - query, /* query text */ - NULL, /* PlanInfo */ - 0, /* CmdType */ - NULL, /* SysInfo */ - &error_info, /* ErrorInfo */ - 0, /* total_time */ - 0, /* rows */ - NULL, /* bufusage */ - NULL, /* walusage */ - NULL, /* JumbleState */ - PGSS_ERROR); /* pgssStoreKind */ -} - -static void -pgss_store_utility(const char *query, - double total_time, - uint64 rows, - BufferUsage *bufusage, - WalUsage *walusage) -{ - uint64 queryid = pgss_hash_string(query, strlen(query)); - - pgss_store(queryid, /* query id */ - query, /* query text */ - NULL, /* PlanInfo */ - 0, /* CmdType */ - NULL, /* SysInfo */ - NULL, /* ErrorInfo */ - total_time, /* total_time */ - rows, /* rows */ - bufusage, /* bufusage */ - walusage, /* walusage */ - NULL, /* JumbleState */ - PGSS_FINISHED); /* pgssStoreKind */ + pgss_store(queryid, /* query id */ + query, /* query text */ + 0, /* query location */ + strlen(query), /* query length */ + NULL, /* PlanInfo */ + 0, /* CmdType */ + NULL, /* SysInfo */ + &error_info, /* ErrorInfo */ + 0, /* total_time */ + 0, /* rows */ + NULL, /* bufusage */ + NULL, /* walusage */ + NULL, /* JumbleState */ + PGSS_ERROR); /* pgssStoreKind */ } /* @@ -1445,47 +1398,102 @@ pgss_store_utility(const char *query, */ static void pgss_store(uint64 queryid, - const char *query, - PlanInfo *plan_info, - CmdType cmd_type, - SysInfo *sys_info, - ErrorInfo *error_info, - double total_time, - uint64 rows, - BufferUsage *bufusage, - WalUsage *walusage, - JumbleState *jstate, - pgssStoreKind kind) + const char *query, + int query_location, + int query_len, + PlanInfo *plan_info, + CmdType cmd_type, + SysInfo *sys_info, + ErrorInfo *error_info, + double total_time, + uint64 rows, + BufferUsage *bufusage, + WalUsage *walusage, + JumbleState *jstate, + pgssStoreKind kind) { HTAB *pgss_hash; - pgssHashKey key; + pgssHashKey key; pgssEntry *entry; pgssSharedState *pgss = pgsm_get_ss(); - char application_name[APPLICATIONNAME_LEN]; - int application_name_len; - bool reset = false; - uint64 bucketid; - uint64 prev_bucket_id; - uint64 userid; - uint64 planid; - uint64 appid; - char comments[512] = ""; - size_t query_len; + char *app_name_ptr; + char app_name[APPLICATIONNAME_LEN] = ""; + int app_name_len = 0; + bool reset = false; + uint64 bucketid; + uint64 prev_bucket_id; + uint64 userid; + uint64 planid; + uint64 appid = 0; + char comments[512] = ""; + char *norm_query = NULL; + bool found_app_name = false; + bool found_client_addr = false; + uint client_addr = 0; /* Monitoring is disabled */ if (!PGSM_ENABLED) return; /* Safety check... */ - if (!IsSystemInitialized() || !pgss_qbuf[pg_atomic_read_u64(&pgss->current_wbucket)]) + if (!IsSystemInitialized()) return; - Assert(query != NULL); - userid = GetUserId(); +#if PG_VERSION_NUM >= 140000 + /* + * Nothing to do if compute_query_id isn't enabled and no other module + * computed a query identifier. + */ + if (queryid == UINT64CONST(0)) + return; +#endif - application_name_len = pg_get_application_name(application_name); - planid = plan_info ? plan_info->planid: 0; - appid = djb2_hash((unsigned char *)application_name, application_name_len); + query = CleanQuerytext(query, &query_location, &query_len); + +#if PG_VERSION_NUM < 140000 + /* + * For utility statements, we just hash the query string to get an ID. + */ + if (queryid == UINT64CONST(0)) + { + queryid = pgss_hash_string(query, query_len); + /* + * If we are unlucky enough to get a hash of zero(invalid), use + * queryID as 2 instead, queryID 1 is already in use for normal + * statements. + */ + if (queryid == UINT64CONST(0)) + queryid = UINT64CONST(2); + } +#endif + + Assert(query != NULL); + if (kind == PGSS_ERROR) + { + int sec_ctx; + GetUserIdAndSecContext((Oid *)&userid, &sec_ctx); + } + else + userid = GetUserId(); + + /* Try to read application name from GUC directly */ + if (application_name && *application_name) + { + app_name_ptr = application_name; + appid = djb2_hash_str((unsigned char *)application_name, &app_name_len); + } + else + { + app_name_len = pg_get_application_name(app_name, &found_app_name); + if (found_app_name) + appid = djb2_hash((unsigned char *)app_name, app_name_len); + app_name_ptr = app_name; + } + + if (!found_client_addr) + client_addr = pg_get_client_addr(&found_client_addr); + + planid = plan_info ? plan_info->planid : 0; extract_query_comments(query, comments, sizeof(comments)); @@ -1499,13 +1507,13 @@ pgss_store(uint64 queryid, key.userid = userid; key.dbid = MyDatabaseId; key.queryid = queryid; - key.ip = pg_get_client_addr(); + key.ip = client_addr; key.planid = planid; key.appid = appid; #if PG_VERSION_NUM < 140000 key.toplevel = 1; #else - key.toplevel = (nested_level == 0); + key.toplevel = ((exec_nested_level + plan_nested_level) == 0); #endif pgss_hash = pgsm_get_hash(); @@ -1514,42 +1522,88 @@ pgss_store(uint64 queryid, entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL); if (!entry) { - uint64 prev_qbuf_len; - /* position in which the query's text was inserted into the query buffer. */ - size_t qpos = 0; + pgssQueryEntry *query_entry; + bool query_found = false; + uint64 prev_qbuf_len = 0; + HTAB *pgss_query_hash; - query_len = strlen(query); - if (query_len > PGSM_QUERY_MAX_LEN) - query_len = PGSM_QUERY_MAX_LEN; + pgss_query_hash = pgsm_get_query_hash(); + + /* + * Create a new, normalized query string if caller asked. We don't + * need to hold the lock while doing this work. (Note: in any case, + * it's possible that someone else creates a duplicate hashtable entry + * in the interval where we don't hold the lock below. That case is + * handled by entry_alloc. + */ + if (jstate && PGSM_NORMALIZED_QUERY) + { + LWLockRelease(pgss->lock); + norm_query = generate_normalized_query(jstate, query, + query_location, + &query_len, + GetDatabaseEncoding()); + LWLockAcquire(pgss->lock, LW_SHARED); + } + + query_entry = hash_search(pgss_query_hash, &queryid, HASH_ENTER_NULL, &query_found); + if (query_entry == NULL) + { + LWLockRelease(pgss->lock); + if (norm_query) + pfree(norm_query); + pgsm_log_error("pgss_store: out of memory (pgss_query_hash)."); + return; + } + else if (!query_found) + { + /* New query, truncate length if necessary. */ + if (query_len > PGSM_QUERY_MAX_LEN) + query_len = PGSM_QUERY_MAX_LEN; + } /* Need exclusive lock to make a new hashtable entry - promote */ LWLockRelease(pgss->lock); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - /* - * Save current query buffer length, if we fail to add a new - * new entry to the hash table then we must restore the - * original length. - */ - memcpy(&prev_qbuf_len, pgss_qbuf[bucketid], sizeof(prev_qbuf_len)); - if (!SaveQueryText(bucketid, queryid, pgss_qbuf[bucketid], query, query_len, &qpos)) + if (!query_found) { - LWLockRelease(pgss->lock); - elog(DEBUG1, "pg_stat_monitor: insufficient shared space for query."); - return; + if (!SaveQueryText(bucketid, + queryid, + pgss_qbuf, + norm_query ? norm_query : query, + query_len, + &query_entry->query_pos)) + { + LWLockRelease(pgss->lock); + if (norm_query) + pfree(norm_query); + pgsm_log_error("pgss_store: insufficient shared space for query."); + return; + } + /* + * Save current query buffer length, if we fail to add a new + * new entry to the hash table then we must restore the + * original length. + */ + memcpy(&prev_qbuf_len, pgss_qbuf, sizeof(prev_qbuf_len)); } - /* OK to create a new hashtable entry */ + /* OK to create a new hashtable entry */ entry = hash_entry_alloc(pgss, &key, GetDatabaseEncoding()); if (entry == NULL) { - /* Restore previous query buffer length. */ - memcpy(pgss_qbuf[bucketid], &prev_qbuf_len, sizeof(prev_qbuf_len)); + if (!query_found) + { + /* Restore previous query buffer length. */ + memcpy(pgss_qbuf, &prev_qbuf_len, sizeof(prev_qbuf_len)); + } LWLockRelease(pgss->lock); - elog(DEBUG1, "pg_stat_monitor: out of memory"); + if (norm_query) + pfree(norm_query); return; } - entry->query_pos = qpos; + entry->query_pos = query_entry->query_pos; } if (jstate == NULL) @@ -1567,9 +1621,14 @@ pgss_store(uint64 queryid, bufusage, /* bufusage */ walusage, /* walusage */ reset, /* reset */ - kind); /* kind */ + kind, /* kind */ + app_name_ptr, + app_name_len); + } LWLockRelease(pgss->lock); + if (norm_query) + pfree(norm_query); } /* * Reset all statement statistics. @@ -1585,11 +1644,10 @@ pg_stat_monitor_reset(PG_FUNCTION_ARGS) errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); hash_entry_dealloc(-1, -1, NULL); - /* Reset query buffers. */ - for (size_t i = 0; i < PGSM_MAX_BUCKETS; ++i) - { - *(uint64 *)pgss_qbuf[i] = 0; - } + + /* Reset query buffer. */ + *(uint64 *)pgss_qbuf = 0; + #ifdef BENCHMARK for (int i = STATS_START; i < STATS_END; ++i) { pg_hook_stats[i].min_time = 0; @@ -1643,8 +1701,8 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, char parentid_txt[32]; pgssSharedState *pgss = pgsm_get_ss(); HTAB *pgss_hash = pgsm_get_hash(); - char *query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN); - char *parent_query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN); + char *query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN + 1); + char *parent_query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN + 1); /* Safety check... */ if (!IsSystemInitialized()) @@ -1699,7 +1757,6 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, uint64 userid = entry->key.userid; uint64 ip = entry->key.ip; uint64 planid = entry->key.planid; - unsigned char *buf = pgss_qbuf[bucketid]; #if PG_VERSION_NUM < 140000 bool toplevel = 1; bool is_allowed_role = is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS); @@ -1708,7 +1765,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, bool toplevel = entry->key.toplevel; #endif - if (read_query(buf, queryid, query_txt, entry->query_pos) == 0) + if (read_query(pgss_qbuf, queryid, query_txt, entry->query_pos) == 0) { int rc; rc = read_query_buffer(bucketid, queryid, query_txt, entry->query_pos); @@ -1735,8 +1792,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (tmp.info.parentid != UINT64CONST(0)) { - int rc = 0; - if (read_query(buf, tmp.info.parentid, parent_query_txt, 0) == 0) + if (read_query(pgss_qbuf, tmp.info.parentid, parent_query_txt, 0) == 0) { rc = read_query_buffer(bucketid, tmp.info.parentid, parent_query_txt, 0); if (rc != 1) @@ -1788,7 +1844,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (enc != query_txt) pfree(enc); /* plan at column number 7 */ - if (planid && strlen(tmp.planinfo.plan_text) > 0) + if (planid && tmp.planinfo.plan_text[0]) values[i++] = CStringGetTextDatum(tmp.planinfo.plan_text); else nulls[i++] = true; @@ -1992,11 +2048,12 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, values[i++] = BoolGetDatum(toplevel); tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - pfree(query_txt); - pfree(parent_query_txt); /* clean up and return the tuplestore */ LWLockRelease(pgss->lock); + pfree(query_txt); + pfree(parent_query_txt); + tuplestore_donestoring(tupstore); } @@ -2055,8 +2112,22 @@ get_next_wbucket(pgssSharedState *pgss) LWLockAcquire(pgss->lock, LW_EXCLUSIVE); hash_entry_dealloc(new_bucket_id, prev_bucket_id, pgss_qbuf); - snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, (int)new_bucket_id); - unlink(file_name); + if (pgss->overflow) + { + pgss->n_bucket_cycles += 1; + if (pgss->n_bucket_cycles >= PGSM_MAX_BUCKETS) + { + /* + * A full rotation of PGSM_MAX_BUCKETS buckets happened since + * we detected a query buffer overflow. + * Reset overflow state and remove the dump file. + */ + pgss->overflow = false; + pgss->n_bucket_cycles = 0; + snprintf(file_name, 1024, "%s", PGSM_TEXT_FILE); + unlink(file_name); + } + } LWLockRelease(pgss->lock); @@ -2733,6 +2804,45 @@ RecordConstLocation(JumbleState *jstate, int location) jstate->clocations_count++; } } + +static const char * +CleanQuerytext(const char *query, int *location, int *len) +{ + int query_location = *location; + int query_len = *len; + + /* First apply starting offset, unless it's -1 (unknown). */ + if (query_location >= 0) + { + Assert(query_location <= strlen(query)); + query += query_location; + /* Length of 0 (or -1) means "rest of string" */ + if (query_len <= 0) + query_len = strlen(query); + else + Assert(query_len <= strlen(query)); + } + else + { + /* If query location is unknown, distrust query_len as well */ + query_location = 0; + query_len = strlen(query); + } + + /* + * Discard leading and trailing whitespace, too. Use scanner_isspace() + * not libc's isspace(), because we want to match the lexer's behavior. + */ + while (query_len > 0 && scanner_isspace(query[0])) + query++, query_location++, query_len--; + while (query_len > 0 && scanner_isspace(query[query_len - 1])) + query_len--; + + *location = query_location; + *len = query_len; + + return query; +} #endif /* @@ -2987,18 +3097,16 @@ intarray_get_datum(int32 arr[], int len) int j; char str[1024]; char tmp[10]; - bool first = true; - memset(str, 0, sizeof(str)); + str[0] = '\0'; /* Need to calculate the actual size, and avoid unnessary memory usage */ for (j = 0; j < len; j++) { - if (first) + if (!str[0]) { snprintf(tmp, 10, "%d", arr[j]); strcat(str,tmp); - first = false; continue; } snprintf(tmp, 10, ",%d", arr[j]); @@ -3103,20 +3211,33 @@ SaveQueryText(uint64 bucketid, case OVERFLOW_TARGET_DISK: { bool dump_ok; + pgssSharedState *pgss = pgsm_get_ss(); + + if (pgss->overflow) + { + pgsm_log_error("query buffer overflowed twice"); + return false; + } /* * If the query buffer is empty, there is nothing to dump, this also - * means that the current query length exceeds MAX_QUERY_BUFFER_BUCKET. + * means that the current query length exceeds MAX_QUERY_BUF. */ if (buf_len <= sizeof (uint64)) return false; - dump_ok = dump_queries_buffer(bucketid, buf, MAX_QUERY_BUFFER_BUCKET); + dump_ok = dump_queries_buffer(bucketid, buf, MAX_QUERY_BUF); buf_len = sizeof (uint64); + if (dump_ok) + { + pgss->overflow = true; + pgss->n_bucket_cycles = 0; + } + /* * We must check for overflow again, as the query length may - * exceed the size allocated to the buffer (MAX_QUERY_BUFFER_BUCKET). + * exceed the total size allocated to the buffer (MAX_QUERY_BUF). */ if (QUERY_BUFFER_OVERFLOW(buf_len, query_len)) { @@ -3279,9 +3400,10 @@ pg_stat_monitor_hook_stats(PG_FUNCTION_ARGS) } void -set_qbuf(int i, unsigned char *buf) +set_qbuf(unsigned char *buf) { - pgss_qbuf[i] = buf; + pgss_qbuf = buf; + *(uint64 *)pgss_qbuf = 0; } #ifdef BENCHMARK @@ -3303,6 +3425,10 @@ pgsm_emit_log_hook(ErrorData *edata) if (IsParallelWorker()) return; + /* Check if PostgreSQL has finished its own bootstraping code. */ + if (MyProc == NULL) + return; + if ((edata->elevel == ERROR || edata->elevel == WARNING || edata->elevel == INFO || edata->elevel == DEBUG1)) { uint64 queryid = 0; @@ -3334,7 +3460,7 @@ dump_queries_buffer(int bucket_id, unsigned char *buf, int buf_len) int off = 0; int tries = 0; - snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, bucket_id); + snprintf(file_name, 1024, "%s", PGSM_TEXT_FILE); fd = OpenTransientFile(file_name, O_RDWR | O_CREAT | O_APPEND | PG_BINARY); if (fd < 0) { @@ -3390,18 +3516,18 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos) bool done = false; bool found = false; - snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, bucket_id); + snprintf(file_name, 1024, "%s", PGSM_TEXT_FILE); fd = OpenTransientFile(file_name, O_RDONLY | PG_BINARY); if (fd < 0) goto exit; - buf = (unsigned char*) palloc(MAX_QUERY_BUFFER_BUCKET); + buf = (unsigned char*) palloc(MAX_QUERY_BUF); while (!done) { off = 0; - /* read a chunck of MAX_QUERY_BUFFER_BUCKET size. */ + /* read a chunck of MAX_QUERY_BUF size. */ do { - nread = read(fd, buf + off, MAX_QUERY_BUFFER_BUCKET - off); + nread = read(fd, buf + off, MAX_QUERY_BUF - off); if (nread == -1) { if (errno == EINTR && tries++ < 3) /* read() was interrupted, attempt to read again (max attempts=3) */ @@ -3416,9 +3542,9 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos) } off += nread; - } while (off < MAX_QUERY_BUFFER_BUCKET); + } while (off < MAX_QUERY_BUF); - if (off == MAX_QUERY_BUFFER_BUCKET) + if (off == MAX_QUERY_BUF) { /* we have a chunck, scan it looking for queryid. */ if (read_query(buf, queryid, query_txt, pos) != 0) @@ -3431,7 +3557,7 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos) } else /* - * Either done=true or file has a size not multiple of MAX_QUERY_BUFFER_BUCKET. + * Either done=true or file has a size not multiple of MAX_QUERY_BUF. * It is safe to assume that the file was truncated or corrupted. */ break; @@ -3617,7 +3743,24 @@ static uint64 djb2_hash(unsigned char *str, size_t len) uint64 hash = 5381LLU; while (len--) - hash = ((hash << 5) + hash) ^ *str++; // hash(i - 1) * 33 ^ str[i] + hash = ((hash << 5) + hash) ^ *str++; // hash(i - 1) * 33 ^ str[i] + + return hash; +} + +static uint64 djb2_hash_str(unsigned char *str, int *out_len) +{ + uint64 hash = 5381LLU; + unsigned char *start = str; + unsigned char c; + + while ((c = *str) != '\0') + { + hash = ((hash << 5) + hash) ^ c; // hash(i - 1) * 33 ^ str[i] + ++str; + } + + *out_len = str - start; return hash; } diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 84ed936..0d96fe7 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -87,9 +87,8 @@ #define MAX_QUERY_BUF (PGSM_QUERY_SHARED_BUFFER * 1024 * 1024) #define MAX_BUCKETS_MEM (PGSM_MAX * 1024 * 1024) #define BUCKETS_MEM_OVERFLOW() ((hash_get_num_entries(pgss_hash) * sizeof(pgssEntry)) >= MAX_BUCKETS_MEM) -#define MAX_QUERY_BUFFER_BUCKET MAX_QUERY_BUF / PGSM_MAX_BUCKETS #define MAX_BUCKET_ENTRIES (MAX_BUCKETS_MEM / sizeof(pgssEntry)) -#define QUERY_BUFFER_OVERFLOW(x,y) ((x + y + sizeof(uint64) + sizeof(uint64)) > MAX_QUERY_BUFFER_BUCKET) +#define QUERY_BUFFER_OVERFLOW(x,y) ((x + y + sizeof(uint64) + sizeof(uint64)) > MAX_QUERY_BUF) #define QUERY_MARGIN 100 #define MIN_QUERY_LEN 10 #define SQLCODE_LEN 20 @@ -161,7 +160,7 @@ typedef enum AGG_KEY #define MAX_QUERY_LEN 1024 -/* shared nenory storage for the query */ +/* shared memory storage for the query */ typedef struct CallTime { double total_time; /* total execution time, in msec */ @@ -171,27 +170,26 @@ typedef struct CallTime double sum_var_time; /* sum of variances in execution time in msec */ } CallTime; -typedef struct pgssQueryHashKey -{ - uint64 bucket_id; /* bucket number */ - uint64 queryid; /* query identifier */ - uint64 userid; /* user OID */ - uint64 dbid; /* database OID */ - uint64 ip; /* client ip address */ - uint64 appid; /* hash of application name */ -} pgssQueryHashKey; - +/* + * Entry type for queries hash table (query ID). + * + * We use a hash table to keep track of query IDs that have their + * corresponding query text added to the query buffer (pgsm_query_shared_buffer). + * + * This allow us to avoid adding duplicated queries to the buffer, therefore + * leaving more space for other queries and saving some CPU. + */ typedef struct pgssQueryEntry { - pgssQueryHashKey key; /* hash key of entry - MUST BE FIRST */ - uint64 pos; /* bucket number */ - uint64 state; + uint64 queryid; /* query identifier, also the key. */ + size_t query_pos; /* query location within query buffer */ } pgssQueryEntry; typedef struct PlanInfo { uint64 planid; /* plan identifier */ char plan_text[PLAN_TEXT_LEN]; /* plan text */ + size_t plan_len; /* strlen(plan_text) */ } PlanInfo; typedef struct pgssHashKey @@ -208,10 +206,6 @@ typedef struct pgssHashKey typedef struct QueryInfo { - uint64 queryid; /* query identifier */ - Oid userid; /* user OID */ - Oid dbid; /* database OID */ - uint host; /* client IP */ uint64 parentid; /* parent queryid of current query*/ int64 type; /* type of query, options are query, info, warning, error, fatal */ char application_name[APPLICATIONNAME_LEN]; @@ -311,8 +305,22 @@ typedef struct pgssSharedState pg_atomic_uint64 current_wbucket; pg_atomic_uint64 prev_bucket_usec; uint64 bucket_entry[MAX_BUCKETS]; - int64 query_buf_size_bucket; char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */ + LWLock *errors_lock; /* protects errors hashtable search/modification */ + /* + * These variables are used when pgsm_overflow_target is ON. + * + * overflow is set to true when the query buffer overflows. + * + * n_bucket_cycles counts the number of times we changed bucket + * since the query buffer overflowed. When it reaches pgsm_max_buckets + * we remove the dump file, also reset the counter. + * + * This allows us to avoid having a large file on disk that would also + * slowdown queries to the pg_stat_monitor view. + */ + bool overflow; + size_t n_bucket_cycles; } pgssSharedState; #define ResetSharedState(x) \ @@ -382,21 +390,20 @@ int pgsm_get_bucket_size(void); pgssSharedState* pgsm_get_ss(void); HTAB *pgsm_get_plan_hash(void); HTAB *pgsm_get_hash(void); +HTAB *pgsm_get_query_hash(void); HTAB *pgsm_get_plan_hash(void); void hash_entry_reset(void); void hash_query_entryies_reset(void); void hash_query_entries(); void hash_query_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]); -void hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]); +void hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer); pgssEntry* hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding); Size hash_memsize(void); int read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos); uint64 read_query(unsigned char *buf, uint64 queryid, char * query, size_t pos); -pgssQueryEntry* hash_find_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid); -pgssQueryEntry* hash_create_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid); void pgss_startup(void); -void set_qbuf(int i, unsigned char *); +void set_qbuf(unsigned char *); /* hash_query.c */ void pgss_startup(void); diff --git a/pgsm_errors.c b/pgsm_errors.c new file mode 100644 index 0000000..878712a --- /dev/null +++ b/pgsm_errors.c @@ -0,0 +1,223 @@ +/*------------------------------------------------------------------------- + * + * pgsm_errors.c + * Track pg_stat_monitor internal error messages. + * + * Copyright © 2021, Percona LLC and/or its affiliates + * + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1994, The Regents of the University of California + * + * IDENTIFICATION + * contrib/pg_stat_monitor/pgsm_errors.c + * + *------------------------------------------------------------------------- + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pg_stat_monitor.h" +#include "pgsm_errors.h" + + +PG_FUNCTION_INFO_V1(pg_stat_monitor_errors); +PG_FUNCTION_INFO_V1(pg_stat_monitor_reset_errors); + + +/* + * Maximum number of error messages tracked. + * This should be set to a sensible value in order to track + * the different type of errors that pg_stat_monitor may + * report, e.g. out of memory. + */ +#define PSGM_ERRORS_MAX 128 + +static HTAB *pgsm_errors_ht = NULL; + +void psgm_errors_init(void) +{ + HASHCTL info; +#if PG_VERSION_NUM >= 140000 + int flags = HASH_ELEM | HASH_STRINGS; +#else + int flags = HASH_ELEM | HASH_BLOBS; +#endif + + + memset(&info, 0, sizeof(info)); + info.keysize = ERROR_MSG_MAX_LEN; + info.entrysize = sizeof(ErrorEntry); + pgsm_errors_ht = ShmemInitHash("pg_stat_monitor: errors hashtable", + PSGM_ERRORS_MAX, /* initial size */ + PSGM_ERRORS_MAX, /* maximum size */ + &info, + flags); +} + +size_t pgsm_errors_size(void) +{ + return hash_estimate_size(PSGM_ERRORS_MAX, sizeof(ErrorEntry)); +} + +void pgsm_log(PgsmLogSeverity severity, const char *format, ...) +{ + char key[ERROR_MSG_MAX_LEN]; + ErrorEntry *entry; + bool found = false; + va_list ap; + int n; + struct timeval tv; + struct tm *lt; + pgssSharedState *pgss; + + va_start(ap, format); + n = vsnprintf(key, ERROR_MSG_MAX_LEN, format, ap); + va_end(ap); + + if (n < 0) + return; + + pgss = pgsm_get_ss(); + LWLockAcquire(pgss->errors_lock, LW_EXCLUSIVE); + + entry = (ErrorEntry *) hash_search(pgsm_errors_ht, key, HASH_ENTER_NULL, &found); + if (!entry) + { + LWLockRelease(pgss->errors_lock); + /* + * We're out of memory, can't track this error message. + */ + return; + } + + if (!found) + { + entry->severity = severity; + entry->calls = 0; + } + + /* Update message timestamp. */ + gettimeofday(&tv, NULL); + lt = localtime(&tv.tv_sec); + snprintf(entry->time, sizeof(entry->time), + "%04d-%02d-%02d %02d:%02d:%02d", + lt->tm_year + 1900, + lt->tm_mon + 1, + lt->tm_mday, + lt->tm_hour, + lt->tm_min, + lt->tm_sec); + + entry->calls++; + + LWLockRelease(pgss->errors_lock); +} + +/* + * Clear all entries from the hash table. + */ +Datum +pg_stat_monitor_reset_errors(PG_FUNCTION_ARGS) +{ + HASH_SEQ_STATUS hash_seq; + ErrorEntry *entry; + pgssSharedState *pgss = pgsm_get_ss(); + + /* Safety check... */ + if (!IsSystemInitialized()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); + + LWLockAcquire(pgss->errors_lock, LW_EXCLUSIVE); + + hash_seq_init(&hash_seq, pgsm_errors_ht); + while ((entry = hash_seq_search(&hash_seq)) != NULL) + entry = hash_search(pgsm_errors_ht, &entry->message, HASH_REMOVE, NULL); + + LWLockRelease(pgss->errors_lock); + PG_RETURN_VOID(); +} + +/* + * Invoked when users query the view pg_stat_monitor_errors. + * This function creates tuples with error messages from data present in + * the hash table, then return the dataset to the caller. + */ +Datum +pg_stat_monitor_errors(PG_FUNCTION_ARGS) +{ + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate *tupstore; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + HASH_SEQ_STATUS hash_seq; + ErrorEntry *error_entry; + pgssSharedState *pgss = pgsm_get_ss(); + + /* Safety check... */ + if (!IsSystemInitialized()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); + + /* check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("pg_stat_monitor: set-valued function called in context that cannot accept a set"))); + + /* Switch into long-lived context to construct returned data structures */ + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "pg_stat_monitor: return type must be a row type"); + + if (tupdesc->natts != 4) + elog(ERROR, "pg_stat_monitor: incorrect number of output arguments, required 3, found %d", tupdesc->natts); + + tupstore = tuplestore_begin_heap(true, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; + + MemoryContextSwitchTo(oldcontext); + + LWLockAcquire(pgss->errors_lock, LW_SHARED); + + hash_seq_init(&hash_seq, pgsm_errors_ht); + while ((error_entry = hash_seq_search(&hash_seq)) != NULL) + { + Datum values[4]; + bool nulls[4]; + int i = 0; + memset(values, 0, sizeof(values)); + memset(nulls, 0, sizeof(nulls)); + + values[i++] = Int64GetDatumFast(error_entry->severity); + values[i++] = CStringGetTextDatum(error_entry->message); + values[i++] = CStringGetTextDatum(error_entry->time); + values[i++] = Int64GetDatumFast(error_entry->calls); + + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } + + LWLockRelease(pgss->errors_lock); + /* clean up and return the tuplestore */ + tuplestore_donestoring(tupstore); + + return (Datum)0; +} \ No newline at end of file diff --git a/regression/expected/application_name.out b/regression/expected/application_name.out index 9d40e02..e200c18 100644 --- a/regression/expected/application_name.out +++ b/regression/expected/application_name.out @@ -12,11 +12,11 @@ SELECT 1 AS num; (1 row) SELECT query,application_name FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | application_name ---------------------------------------------------------------------------------+----------------------------- - SELECT $1 AS num | pg_regress/application_name - SELECT pg_stat_monitor_reset(); | pg_regress/application_name - SELECT query,application_name FROM pg_stat_monitor ORDER BY query COLLATE "C"; | pg_regress/application_name + query | application_name +-------------------------------------------------------------------------------+----------------------------- + SELECT $1 AS num | pg_regress/application_name + SELECT pg_stat_monitor_reset() | pg_regress/application_name + SELECT query,application_name FROM pg_stat_monitor ORDER BY query COLLATE "C" | pg_regress/application_name (3 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/application_name_unique.out b/regression/expected/application_name_unique.out new file mode 100644 index 0000000..15ba62f --- /dev/null +++ b/regression/expected/application_name_unique.out @@ -0,0 +1,38 @@ +Create EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +Set application_name = 'naeem' ; +SELECT 1 AS num; + num +----- + 1 +(1 row) + +Set application_name = 'psql' ; +SELECT 1 AS num; + num +----- + 1 +(1 row) + +SELECT query,application_name FROM pg_stat_monitor ORDER BY query, application_name COLLATE "C"; + query | application_name +--------------------------------+------------------------------------ + SELECT $1 AS num | naeem + SELECT $1 AS num | psql + SELECT pg_stat_monitor_reset() | pg_regress/application_name_unique + Set application_name = 'naeem' | naeem + Set application_name = 'psql' | psql +(5 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +DROP EXTENSION pg_stat_monitor; diff --git a/regression/expected/basic.out b/regression/expected/basic.out index 04b5eb8..adea370 100644 --- a/regression/expected/basic.out +++ b/regression/expected/basic.out @@ -12,11 +12,11 @@ SELECT 1 AS num; (1 row) SELECT query FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query ---------------------------------------------------------------- + query +-------------------------------------------------------------- SELECT $1 AS num - SELECT pg_stat_monitor_reset(); - SELECT query FROM pg_stat_monitor ORDER BY query COLLATE "C"; + SELECT pg_stat_monitor_reset() + SELECT query FROM pg_stat_monitor ORDER BY query COLLATE "C" (3 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/cmd_type.out b/regression/expected/cmd_type.out index aba4be0..086d230 100644 --- a/regression/expected/cmd_type.out +++ b/regression/expected/cmd_type.out @@ -24,19 +24,19 @@ SELECT b FROM t2 FOR UPDATE; TRUNCATE t1; DROP TABLE t1; SELECT query, cmd_type, cmd_type_text FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | cmd_type | cmd_type_text ------------------------------------------------------------------------------------------+----------+--------------- - CREATE TABLE t1 (a INTEGER); | 0 | - CREATE TABLE t2 (b INTEGER); | 0 | - DELETE FROM t1; | 4 | DELETE - DROP TABLE t1; | 0 | - INSERT INTO t1 VALUES($1) | 3 | INSERT - SELECT a FROM t1; | 1 | SELECT - SELECT b FROM t2 FOR UPDATE; | 1 | SELECT - SELECT pg_stat_monitor_reset(); | 1 | SELECT - SELECT query, cmd_type, cmd_type_text FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 1 | SELECT - TRUNCATE t1; | 0 | - UPDATE t1 SET a = $1 | 2 | UPDATE + query | cmd_type | cmd_type_text +----------------------------------------------------------------------------------------+----------+--------------- + CREATE TABLE t1 (a INTEGER) | 0 | + CREATE TABLE t2 (b INTEGER) | 0 | + DELETE FROM t1 | 4 | DELETE + DROP TABLE t1 | 0 | + INSERT INTO t1 VALUES($1) | 3 | INSERT + SELECT a FROM t1 | 1 | SELECT + SELECT b FROM t2 FOR UPDATE | 1 | SELECT + SELECT pg_stat_monitor_reset() | 1 | SELECT + SELECT query, cmd_type, cmd_type_text FROM pg_stat_monitor ORDER BY query COLLATE "C" | 1 | SELECT + TRUNCATE t1 | 0 | + UPDATE t1 SET a = $1 | 2 | UPDATE (11 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/counters.out b/regression/expected/counters.out index ebf26f7..40fed48 100644 --- a/regression/expected/counters.out +++ b/regression/expected/counters.out @@ -36,11 +36,11 @@ SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; (0 rows) SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | calls -----------------------------------------------------------------------------------+------- - SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; | 4 - SELECT pg_stat_monitor_reset(); | 1 - SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 1 + query | calls +---------------------------------------------------------------------------------+------- + SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a | 4 + SELECT pg_stat_monitor_reset() | 1 + SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C" | 1 (3 rows) SELECT pg_stat_monitor_reset(); @@ -69,8 +69,8 @@ SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; query | calls ---------------------------------------------------------------------------------------------------+------- SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a | 1000 - SELECT pg_stat_monitor_reset(); | 1 - SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 1 + SELECT pg_stat_monitor_reset() | 1 + SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C" | 1 do $$ +| 1 declare +| n integer:= 1; +| @@ -80,7 +80,7 @@ SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; exit when n = 1000; +| n := n + 1; +| end loop; +| - end $$; | + end $$ | (4 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/database.out b/regression/expected/database.out index 378cbea..12c5055 100644 --- a/regression/expected/database.out +++ b/regression/expected/database.out @@ -28,12 +28,12 @@ SELECT * FROM t3,t4 WHERE t3.c = t4.d; \c contrib_regression SELECT datname, query FROM pg_stat_monitor ORDER BY query COLLATE "C"; - datname | query ---------------------+------------------------------------------------------------------------ - db1 | SELECT * FROM t1,t2 WHERE t1.a = t2.b; - db2 | SELECT * FROM t3,t4 WHERE t3.c = t4.d; - contrib_regression | SELECT datname, query FROM pg_stat_monitor ORDER BY query COLLATE "C"; - contrib_regression | SELECT pg_stat_monitor_reset(); + datname | query +--------------------+----------------------------------------------------------------------- + db1 | SELECT * FROM t1,t2 WHERE t1.a = t2.b + db2 | SELECT * FROM t3,t4 WHERE t3.c = t4.d + contrib_regression | SELECT datname, query FROM pg_stat_monitor ORDER BY query COLLATE "C" + contrib_regression | SELECT pg_stat_monitor_reset() (4 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/error.out b/regression/expected/error.out index ccf7968..2e3d8fa 100644 --- a/regression/expected/error.out +++ b/regression/expected/error.out @@ -21,18 +21,22 @@ RAISE WARNING 'warning message'; END $$; WARNING: warning message SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; - query | elevel | sqlcode | message -------------------------------------------------------------------------------------------------+--------+---------+----------------------------------- - ELECET * FROM unknown; | 20 | 42601 | syntax error at or near "ELECET" - SELECT * FROM unknown; | 20 | 42P01 | relation "unknown" does not exist - SELECT 1/0; | 20 | 22012 | division by zero - SELECT pg_stat_monitor_reset(); | 0 | | - SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | - do $$ +| 19 | 01000 | warning message - BEGIN +| | | - RAISE WARNING 'warning message'; +| | | - END $$; | | | -(6 rows) + query | elevel | sqlcode | message +-----------------------------------------------------------------------------------------------+--------+---------+----------------------------------- + ELECET * FROM unknown; | 20 | 42601 | syntax error at or near "ELECET" + SELECT * FROM unknown; | 20 | 42P01 | relation "unknown" does not exist + SELECT 1/0; | 20 | 22012 | division by zero + SELECT pg_stat_monitor_reset() | 0 | | + SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel | 0 | | + do $$ +| 0 | | + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$ | | | + do $$ +| 19 | 01000 | warning message + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$; | | | +(7 rows) SELECT pg_stat_monitor_reset(); pg_stat_monitor_reset diff --git a/regression/expected/error_1.out b/regression/expected/error_1.out index 102a92a..7a61786 100644 --- a/regression/expected/error_1.out +++ b/regression/expected/error_1.out @@ -21,18 +21,22 @@ RAISE WARNING 'warning message'; END $$; WARNING: warning message SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; - query | elevel | sqlcode | message -------------------------------------------------------------------------------------------------+--------+---------+----------------------------------- - ELECET * FROM unknown; | 21 | 42601 | syntax error at or near "ELECET" - SELECT * FROM unknown; | 21 | 42P01 | relation "unknown" does not exist - SELECT 1/0; | 21 | 22012 | division by zero - SELECT pg_stat_monitor_reset(); | 0 | | - SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | - do $$ +| 19 | 01000 | warning message - BEGIN +| | | - RAISE WARNING 'warning message'; +| | | - END $$; | | | -(6 rows) + query | elevel | sqlcode | message +-----------------------------------------------------------------------------------------------+--------+---------+----------------------------------- + ELECET * FROM unknown; | 21 | 42601 | syntax error at or near "ELECET" + SELECT * FROM unknown; | 21 | 42P01 | relation "unknown" does not exist + SELECT 1/0; | 21 | 22012 | division by zero + SELECT pg_stat_monitor_reset() | 0 | | + SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel | 0 | | + do $$ +| 0 | | + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$ | | | + do $$ +| 19 | 01000 | warning message + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$; | | | +(7 rows) SELECT pg_stat_monitor_reset(); pg_stat_monitor_reset diff --git a/regression/expected/error_insert.out b/regression/expected/error_insert.out new file mode 100644 index 0000000..19a1d89 --- /dev/null +++ b/regression/expected/error_insert.out @@ -0,0 +1,34 @@ +Drop Table if exists Company; +NOTICE: table "company" does not exist, skipping +CREATE TABLE Company( + ID INT PRIMARY KEY NOT NULL, + NAME TEXT NOT NULL +); +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); +ERROR: duplicate key value violates unique constraint "company_pkey" +DETAIL: Key (id)=(1) already exists. +Drop Table if exists Company; +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; + query | elevel | sqlcode | message +-------------------------------------------------------+--------+---------+--------------------------------------------------------------- + Drop Table if exists Company | 0 | | + INSERT INTO Company(ID, Name) VALUES ($1, $2) | 0 | | + INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); | 21 | 23505 | duplicate key value violates unique constraint "company_pkey" + SELECT pg_stat_monitor_reset() | 0 | | +(4 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +DROP EXTENSION pg_stat_monitor; diff --git a/regression/expected/error_insert_1.out b/regression/expected/error_insert_1.out new file mode 100644 index 0000000..c706613 --- /dev/null +++ b/regression/expected/error_insert_1.out @@ -0,0 +1,34 @@ +Drop Table if exists Company; +NOTICE: table "company" does not exist, skipping +CREATE TABLE Company( + ID INT PRIMARY KEY NOT NULL, + NAME TEXT NOT NULL +); +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); +ERROR: duplicate key value violates unique constraint "company_pkey" +DETAIL: Key (id)=(1) already exists. +Drop Table if exists Company; +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; + query | elevel | sqlcode | message +-------------------------------------------------------+--------+---------+--------------------------------------------------------------- + Drop Table if exists Company | 0 | | + INSERT INTO Company(ID, Name) VALUES ($1, $2) | 0 | | + INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); | 20 | 23505 | duplicate key value violates unique constraint "company_pkey" + SELECT pg_stat_monitor_reset() | 0 | | +(4 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +DROP EXTENSION pg_stat_monitor; diff --git a/regression/expected/histogram.out b/regression/expected/histogram.out index e69de29..40e7fe5 100644 --- a/regression/expected/histogram.out +++ b/regression/expected/histogram.out @@ -0,0 +1,71 @@ +CREATE OR REPLACE FUNCTION generate_histogram() + RETURNS TABLE ( + range TEXT, freq INT, bar TEXT + ) AS $$ +Declare + bucket_id integer; + query_id text; +BEGIN + select bucket into bucket_id from pg_stat_monitor order by calls desc limit 1; + select queryid into query_id from pg_stat_monitor order by calls desc limit 1; + --RAISE INFO 'bucket_id %', bucket_id; + --RAISE INFO 'query_id %', query_id; + return query + SELECT * FROM histogram(bucket_id, query_id) AS a(range TEXT, freq INT, bar TEXT); +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION run_pg_sleep(INTEGER) RETURNS VOID AS $$ +DECLARE + loops ALIAS FOR $1; +BEGIN + FOR i IN 1..loops LOOP + --RAISE INFO 'Current timestamp: %', timeofday()::TIMESTAMP; + RAISE INFO 'Sleep % seconds', i; + PERFORM pg_sleep(i); + END LOOP; +END; +$$ LANGUAGE 'plpgsql' STRICT; +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +Set pg_stat_monitor.track='all'; +select run_pg_sleep(5); +INFO: Sleep 1 seconds +INFO: Sleep 2 seconds +INFO: Sleep 3 seconds +INFO: Sleep 4 seconds +INFO: Sleep 5 seconds + run_pg_sleep +-------------- + +(1 row) + +SELECT substr(query, 0,50) as query, calls, resp_calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; + query | calls | resp_calls +---------------------------------+-------+----------------------- + SELECT pg_sleep(i) | 5 | {0,0,0,0,0,0,3,2,0,0} + SELECT pg_stat_monitor_reset() | 1 | {1,0,0,0,0,0,0,0,0,0} + Set pg_stat_monitor.track='all' | 1 | {1,0,0,0,0,0,0,0,0,0} + select run_pg_sleep($1) | 1 | {0,0,0,0,0,0,0,0,1,0} +(4 rows) + +select * from generate_histogram(); + range | freq | bar +--------------------+------+-------------------------------------------------------------------------------------------- + (0 - 3)} | 0 | + (3 - 10)} | 0 | + (10 - 31)} | 0 | + (31 - 100)} | 0 | + (100 - 316)} | 0 | + (316 - 1000)} | 0 | + (1000 - 3162)} | 3 | ■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ + (3162 - 10000)} | 2 | ■■■■■■■■■■■■■■■■■■■■ + (10000 - 31622)} | 0 | + (31622 - 100000)} | 0 | +(10 rows) + +DROP EXTENSION pg_stat_monitor; diff --git a/regression/expected/histogram_1.out b/regression/expected/histogram_1.out new file mode 100644 index 0000000..f926990 --- /dev/null +++ b/regression/expected/histogram_1.out @@ -0,0 +1,71 @@ +CREATE OR REPLACE FUNCTION generate_histogram() + RETURNS TABLE ( + range TEXT, freq INT, bar TEXT + ) AS $$ +Declare + bucket_id integer; + query_id text; +BEGIN + select bucket into bucket_id from pg_stat_monitor order by calls desc limit 1; + select queryid into query_id from pg_stat_monitor order by calls desc limit 1; + --RAISE INFO 'bucket_id %', bucket_id; + --RAISE INFO 'query_id %', query_id; + return query + SELECT * FROM histogram(bucket_id, query_id) AS a(range TEXT, freq INT, bar TEXT); +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION run_pg_sleep(INTEGER) RETURNS VOID AS $$ +DECLARE + loops ALIAS FOR $1; +BEGIN + FOR i IN 1..loops LOOP + --RAISE INFO 'Current timestamp: %', timeofday()::TIMESTAMP; + RAISE INFO 'Sleep % seconds', i; + PERFORM pg_sleep(i); + END LOOP; +END; +$$ LANGUAGE 'plpgsql' STRICT; +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +Set pg_stat_monitor.track='all'; +select run_pg_sleep(5); +INFO: Sleep 1 seconds +INFO: Sleep 2 seconds +INFO: Sleep 3 seconds +INFO: Sleep 4 seconds +INFO: Sleep 5 seconds + run_pg_sleep +-------------- + +(1 row) + +SELECT substr(query, 0,50) as query, calls, resp_calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; + query | calls | resp_calls +---------------------------------+-------+----------------------- + SELECT pg_sleep(i) | 5 | {0,0,0,0,0,0,3,2,0,0} + SELECT pg_stat_monitor_reset() | 1 | {1,0,0,0,0,0,0,0,0,0} + Set pg_stat_monitor.track='all' | 1 | {1,0,0,0,0,0,0,0,0,0} + select run_pg_sleep($1) | 1 | {0,0,0,0,0,0,0,0,1,0} +(4 rows) + +select * from generate_histogram(); + range | freq | bar +--------------------+------+-------------------------------- + (0 - 3)} | 0 | + (3 - 10)} | 0 | + (10 - 31)} | 0 | + (31 - 100)} | 0 | + (100 - 316)} | 0 | + (316 - 1000)} | 0 | + (1000 - 3162)} | 3 | ■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ + (3162 - 10000)} | 2 | ■■■■■■■■■■■■■■■■■■■■ + (10000 - 31622)} | 0 | + (31622 - 100000)} | 0 | +(10 rows) + +DROP EXTENSION pg_stat_monitor; diff --git a/regression/expected/relations.out b/regression/expected/relations.out index 9ff9288..6c17a47 100644 --- a/regression/expected/relations.out +++ b/regression/expected/relations.out @@ -37,14 +37,14 @@ SELECT * FROM foo1, foo2, foo3, foo4; (0 rows) SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; - query | relations ---------------------------------------------------------------------------+--------------------------------------------------- - SELECT * FROM foo1, foo2, foo3, foo4; | {public.foo1,public.foo2,public.foo3,public.foo4} - SELECT * FROM foo1, foo2, foo3; | {public.foo1,public.foo2,public.foo3} - SELECT * FROM foo1, foo2; | {public.foo1,public.foo2} - SELECT * FROM foo1; | {public.foo1} - SELECT pg_stat_monitor_reset(); | - SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; | {public.pg_stat_monitor*,pg_catalog.pg_database} + query | relations +-------------------------------------------------------------------------+--------------------------------------------------- + SELECT * FROM foo1 | {public.foo1} + SELECT * FROM foo1, foo2 | {public.foo1,public.foo2} + SELECT * FROM foo1, foo2, foo3 | {public.foo1,public.foo2,public.foo3} + SELECT * FROM foo1, foo2, foo3, foo4 | {public.foo1,public.foo2,public.foo3,public.foo4} + SELECT pg_stat_monitor_reset() | + SELECT query, relations from pg_stat_monitor ORDER BY query collate "C" | {public.pg_stat_monitor*,pg_catalog.pg_database} (6 rows) SELECT pg_stat_monitor_reset(); @@ -89,14 +89,14 @@ SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3, sch4.foo4; (0 rows) SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; - query | relations ---------------------------------------------------------------------------+-------------------------------------------------- - SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3, sch4.foo4; | {sch1.foo1,sch2.foo2,sch3.foo3,sch4.foo4} - SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3; | {sch1.foo1,sch2.foo2,sch3.foo3} - SELECT * FROM sch1.foo1, sch2.foo2; | {sch1.foo1,sch2.foo2} - SELECT * FROM sch1.foo1; | {sch1.foo1} - SELECT pg_stat_monitor_reset(); | - SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; | {public.pg_stat_monitor*,pg_catalog.pg_database} + query | relations +-------------------------------------------------------------------------+-------------------------------------------------- + SELECT * FROM sch1.foo1 | {sch1.foo1} + SELECT * FROM sch1.foo1, sch2.foo2 | {sch1.foo1,sch2.foo2} + SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3 | {sch1.foo1,sch2.foo2,sch3.foo3} + SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3, sch4.foo4 | {sch1.foo1,sch2.foo2,sch3.foo3,sch4.foo4} + SELECT pg_stat_monitor_reset() | + SELECT query, relations from pg_stat_monitor ORDER BY query collate "C" | {public.pg_stat_monitor*,pg_catalog.pg_database} (6 rows) SELECT pg_stat_monitor_reset(); @@ -122,12 +122,12 @@ SELECT * FROM sch1.foo1, sch2.foo2, foo1, foo2; (0 rows) SELECT query, relations from pg_stat_monitor ORDER BY query; - query | relations ---------------------------------------------------------------+-------------------------------------------------- - SELECT * FROM sch1.foo1, foo1; | {sch1.foo1,public.foo1} - SELECT * FROM sch1.foo1, sch2.foo2, foo1, foo2; | {sch1.foo1,sch2.foo2,public.foo1,public.foo2} - SELECT pg_stat_monitor_reset(); | - SELECT query, relations from pg_stat_monitor ORDER BY query; | {public.pg_stat_monitor*,pg_catalog.pg_database} + query | relations +-------------------------------------------------------------+-------------------------------------------------- + SELECT * FROM sch1.foo1, foo1 | {sch1.foo1,public.foo1} + SELECT * FROM sch1.foo1, sch2.foo2, foo1, foo2 | {sch1.foo1,sch2.foo2,public.foo1,public.foo2} + SELECT pg_stat_monitor_reset() | + SELECT query, relations from pg_stat_monitor ORDER BY query | {public.pg_stat_monitor*,pg_catalog.pg_database} (4 rows) SELECT pg_stat_monitor_reset(); @@ -168,14 +168,14 @@ SELECT * FROM v1,v2,v3,v4; (0 rows) SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; - query | relations ---------------------------------------------------------------------------+----------------------------------------------------------------------------------------------- - SELECT * FROM v1,v2,v3,v4; | {public.v1*,public.foo1,public.v2*,public.foo2,public.v3*,public.foo3,public.v4*,public.foo4} - SELECT * FROM v1,v2,v3; | {public.v1*,public.foo1,public.v2*,public.foo2,public.v3*,public.foo3} - SELECT * FROM v1,v2; | {public.v1*,public.foo1,public.v2*,public.foo2} - SELECT * FROM v1; | {public.v1*,public.foo1} - SELECT pg_stat_monitor_reset(); | - SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; | {public.pg_stat_monitor*,pg_catalog.pg_database} + query | relations +-------------------------------------------------------------------------+----------------------------------------------------------------------------------------------- + SELECT * FROM v1 | {public.v1*,public.foo1} + SELECT * FROM v1,v2 | {public.v1*,public.foo1,public.v2*,public.foo2} + SELECT * FROM v1,v2,v3 | {public.v1*,public.foo1,public.v2*,public.foo2,public.v3*,public.foo3} + SELECT * FROM v1,v2,v3,v4 | {public.v1*,public.foo1,public.v2*,public.foo2,public.v3*,public.foo3,public.v4*,public.foo4} + SELECT pg_stat_monitor_reset() | + SELECT query, relations from pg_stat_monitor ORDER BY query collate "C" | {public.pg_stat_monitor*,pg_catalog.pg_database} (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/rows.out b/regression/expected/rows.out index 5a0a0df..c68a9d8 100644 --- a/regression/expected/rows.out +++ b/regression/expected/rows.out @@ -8541,14 +8541,14 @@ SELECt * FROM t2 WHERE b % 2 = 0; (2500 rows) SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | rows_retrieved --------------------------------------------------------------------------------+---------------- - SELECT * FROM t1 LIMIT $1 | 10 - SELECT * FROM t1; | 1000 - SELECT * FROM t2; | 5000 - SELECT pg_stat_monitor_reset(); | 1 - SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 0 - SELECt * FROM t2 WHERE b % $1 = $2 | 2500 + query | rows_retrieved +------------------------------------------------------------------------------+---------------- + SELECT * FROM t1 | 1000 + SELECT * FROM t1 LIMIT $1 | 10 + SELECT * FROM t2 | 5000 + SELECT pg_stat_monitor_reset() | 1 + SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C" | 0 + SELECt * FROM t2 WHERE b % $1 = $2 | 2500 (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/rows_1.out b/regression/expected/rows_1.out index d6b3c09..c43d41a 100644 --- a/regression/expected/rows_1.out +++ b/regression/expected/rows_1.out @@ -8541,14 +8541,14 @@ SELECt * FROM t2 WHERE b % 2 = 0; (2500 rows) SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | rows_retrieved --------------------------------------------------------------------------------+---------------- - SELECT * FROM t1 LIMIT $1 | 10 - SELECT * FROM t1; | 1000 - SELECT b FROM t2 FOR UPDATE; | 5000 - SELECT pg_stat_monitor_reset(); | 1 - SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 0 - SELECt * FROM t2 WHERE b % $1 = $2 | 2500 + query | rows_retrieved +------------------------------------------------------------------------------+---------------- + SELECT * FROM t1 | 1000 + SELECT * FROM t1 LIMIT $1 | 10 + SELECT b FROM t2 FOR UPDATE | 5000 + SELECT pg_stat_monitor_reset() | 1 + SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C" | 0 + SELECt * FROM t2 WHERE b % $1 = $2 | 2500 (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/state.out b/regression/expected/state.out index b051091..bac22d1 100644 --- a/regression/expected/state.out +++ b/regression/expected/state.out @@ -14,12 +14,12 @@ SELECT 1; SELECT 1/0; -- divide by zero ERROR: division by zero SELECT query, state_code, state FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | state_code | state -----------------------------------------------------------------------------------+------------+--------------------- - SELECT $1 | 3 | FINISHED - SELECT 1/0; | 4 | FINISHED WITH ERROR - SELECT pg_stat_monitor_reset(); | 3 | FINISHED - SELECT query, state_code, state FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 2 | ACTIVE + query | state_code | state +---------------------------------------------------------------------------------+------------+--------------------- + SELECT $1 | 3 | FINISHED + SELECT 1/0; | 4 | FINISHED WITH ERROR + SELECT pg_stat_monitor_reset() | 3 | FINISHED + SELECT query, state_code, state FROM pg_stat_monitor ORDER BY query COLLATE "C" | 2 | ACTIVE (4 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/tags.out b/regression/expected/tags.out index b2cda90..27d37d3 100644 --- a/regression/expected/tags.out +++ b/regression/expected/tags.out @@ -15,8 +15,8 @@ SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; query | comments ---------------------------------------------------------------------------+---------------------------------------------------------- SELECT $1 AS num /* { "application", psql_app, "real_ip", 192.168.1.3) */ | /* { "application", psql_app, "real_ip", 192.168.1.3) */ - SELECT pg_stat_monitor_reset(); | - SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; | + SELECT pg_stat_monitor_reset() | + SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C" | (3 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/top_query.out b/regression/expected/top_query.out index 5d810f8..42d4f3b 100644 --- a/regression/expected/top_query.out +++ b/regression/expected/top_query.out @@ -23,23 +23,23 @@ SELECT add2(1,2); (1 row) SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | top_query ---------------------------------------------------------------------------+-------------------- - CREATE OR REPLACE FUNCTION add(int, int) RETURNS INTEGER AS +| - $$ +| - BEGIN +| - return (select $1 + $2); +| - END; $$ language plpgsql; | - CREATE OR REPLACE function add2(int, int) RETURNS int as +| - $$ +| - BEGIN +| - return add($1,$2); +| - END; +| - $$ language plpgsql; | - SELECT (select $1 + $2) | SELECT add2($1,$2) - SELECT add2($1,$2) | - SELECT pg_stat_monitor_reset(); | - SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C"; | + query | top_query +-------------------------------------------------------------------------+-------------------- + CREATE OR REPLACE FUNCTION add(int, int) RETURNS INTEGER AS +| + $$ +| + BEGIN +| + return (select $1 + $2); +| + END; $$ language plpgsql | + CREATE OR REPLACE function add2(int, int) RETURNS int as +| + $$ +| + BEGIN +| + return add($1,$2); +| + END; +| + $$ language plpgsql | + SELECT (select $1 + $2) | SELECT add2($1,$2) + SELECT add2($1,$2) | + SELECT pg_stat_monitor_reset() | + SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C" | (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/top_query_1.out b/regression/expected/top_query_1.out index ca738e5..b8e71a8 100644 --- a/regression/expected/top_query_1.out +++ b/regression/expected/top_query_1.out @@ -23,23 +23,23 @@ SELECT add2(1,2); (1 row) SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | top_query ---------------------------------------------------------------------------+-------------------- - (select $1 + $2) | SELECT add2($1,$2) - CREATE OR REPLACE FUNCTION add(int, int) RETURNS INTEGER AS +| - $$ +| - BEGIN +| - return (select $1 + $2); +| - END; $$ language plpgsql; | - CREATE OR REPLACE function add2(int, int) RETURNS int as +| - $$ +| - BEGIN +| - return add($1,$2); +| - END; +| - $$ language plpgsql; | - SELECT add2($1,$2) | - SELECT pg_stat_monitor_reset(); | - SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C"; | + query | top_query +-------------------------------------------------------------------------+-------------------- + (select $1 + $2) | SELECT add2($1,$2) + CREATE OR REPLACE FUNCTION add(int, int) RETURNS INTEGER AS +| + $$ +| + BEGIN +| + return (select $1 + $2); +| + END; $$ language plpgsql | + CREATE OR REPLACE function add2(int, int) RETURNS int as +| + $$ +| + BEGIN +| + return add($1,$2); +| + END; +| + $$ language plpgsql | + SELECT add2($1,$2) | + SELECT pg_stat_monitor_reset() | + SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C" | (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/sql/application_name_unique.sql b/regression/sql/application_name_unique.sql new file mode 100644 index 0000000..49a1d55 --- /dev/null +++ b/regression/sql/application_name_unique.sql @@ -0,0 +1,9 @@ +Create EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); +Set application_name = 'naeem' ; +SELECT 1 AS num; +Set application_name = 'psql' ; +SELECT 1 AS num; +SELECT query,application_name FROM pg_stat_monitor ORDER BY query, application_name COLLATE "C"; +SELECT pg_stat_monitor_reset(); +DROP EXTENSION pg_stat_monitor; diff --git a/regression/sql/error_insert.sql b/regression/sql/error_insert.sql new file mode 100644 index 0000000..14f21f1 --- /dev/null +++ b/regression/sql/error_insert.sql @@ -0,0 +1,17 @@ +Drop Table if exists Company; + +CREATE TABLE Company( + ID INT PRIMARY KEY NOT NULL, + NAME TEXT NOT NULL +); + + +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); + +Drop Table if exists Company; +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; +SELECT pg_stat_monitor_reset(); +DROP EXTENSION pg_stat_monitor; diff --git a/regression/sql/histogram.sql b/regression/sql/histogram.sql index 50aa897..92960df 100644 --- a/regression/sql/histogram.sql +++ b/regression/sql/histogram.sql @@ -1,28 +1,39 @@ +CREATE OR REPLACE FUNCTION generate_histogram() + RETURNS TABLE ( + range TEXT, freq INT, bar TEXT + ) AS $$ +Declare + bucket_id integer; + query_id text; +BEGIN + select bucket into bucket_id from pg_stat_monitor order by calls desc limit 1; + select queryid into query_id from pg_stat_monitor order by calls desc limit 1; + --RAISE INFO 'bucket_id %', bucket_id; + --RAISE INFO 'query_id %', query_id; + return query + SELECT * FROM histogram(bucket_id, query_id) AS a(range TEXT, freq INT, bar TEXT); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION run_pg_sleep(INTEGER) RETURNS VOID AS $$ +DECLARE + loops ALIAS FOR $1; +BEGIN + FOR i IN 1..loops LOOP + --RAISE INFO 'Current timestamp: %', timeofday()::TIMESTAMP; + RAISE INFO 'Sleep % seconds', i; + PERFORM pg_sleep(i); + END LOOP; +END; +$$ LANGUAGE 'plpgsql' STRICT; + CREATE EXTENSION pg_stat_monitor; - -CREATE TABLE t1(a int); - SELECT pg_stat_monitor_reset(); +Set pg_stat_monitor.track='all'; +select run_pg_sleep(5); -INSERT INTO t1 VALUES(generate_series(1,10)); -ANALYZE t1; -SELECT count(*) FROM t1; +SELECT substr(query, 0,50) as query, calls, resp_calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; -INSERT INTO t1 VALUES(generate_series(1,10000)); -ANALYZE t1; -SELECT count(*) FROM t1;; +select * from generate_histogram(); -INSERT INTO t1 VALUES(generate_series(1,1000000)); -ANALYZE t1; -SELECT count(*) FROM t1; - -INSERT INTO t1 VALUES(generate_series(1,10000000)); -ANALYZE t1; -SELECT count(*) FROM t1; - -SELECT query, calls, min_time, max_time, resp_calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; -SELECT * FROM histogram(0, 'F44CD1B4B33A47AF') AS a(range TEXT, freq INT, bar TEXT); - -DROP TABLE t1; -SELECT pg_stat_monitor_reset(); DROP EXTENSION pg_stat_monitor; diff --git a/percona-packaging/rpm/pg-stat-monitor.spec b/rpm/pg-stat-monitor.spec similarity index 95% rename from percona-packaging/rpm/pg-stat-monitor.spec rename to rpm/pg-stat-monitor.spec index e59ab61..e4c852c 100644 --- a/percona-packaging/rpm/pg-stat-monitor.spec +++ b/rpm/pg-stat-monitor.spec @@ -13,6 +13,8 @@ URL: https://github.com/Percona-Lab/pg_stat_monitor BuildRequires: percona-postgresql%{pgrel}-devel Requires: postgresql-server Provides: percona-pg-stat-monitor%{pgrel} +Conflicts: percona-pg-stat-monitor%{pgrel} +Obsoletes: percona-pg-stat-monitor%{pgrel} Epoch: 1 Packager: Percona Development Team Vendor: Percona, Inc