From e9ee3b8107335fa4184272ddfd6290bb21e3f28d Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Tue, 30 Nov 2021 14:03:13 +0200 Subject: [PATCH 01/39] DISTPG-348-pg_stat_monitor-Update-directory-structure-for-Debian-packaging-files --- {percona-packaging/debian => debian}/changelog | 0 {percona-packaging/debian => debian}/compat | 0 {percona-packaging/debian => debian}/control | 0 {percona-packaging/debian => debian}/control.in | 0 {percona-packaging/debian => debian}/copyright | 0 {percona-packaging/debian => debian}/pgversions | 0 {percona-packaging/debian => debian}/rules | 0 {percona-packaging/debian => debian}/source/format | 0 {percona-packaging/debian => debian}/source/lintian-overrides | 0 {percona-packaging/rpm => rpm}/pg-stat-monitor.spec | 0 10 files changed, 0 insertions(+), 0 deletions(-) rename {percona-packaging/debian => debian}/changelog (100%) rename {percona-packaging/debian => debian}/compat (100%) rename {percona-packaging/debian => debian}/control (100%) rename {percona-packaging/debian => debian}/control.in (100%) rename {percona-packaging/debian => debian}/copyright (100%) rename {percona-packaging/debian => debian}/pgversions (100%) rename {percona-packaging/debian => debian}/rules (100%) rename {percona-packaging/debian => debian}/source/format (100%) rename {percona-packaging/debian => debian}/source/lintian-overrides (100%) rename {percona-packaging/rpm => rpm}/pg-stat-monitor.spec (100%) diff --git a/percona-packaging/debian/changelog b/debian/changelog similarity index 100% rename from percona-packaging/debian/changelog rename to debian/changelog diff --git a/percona-packaging/debian/compat b/debian/compat similarity index 100% rename from percona-packaging/debian/compat rename to debian/compat diff --git a/percona-packaging/debian/control b/debian/control similarity index 100% rename from percona-packaging/debian/control rename to debian/control diff --git a/percona-packaging/debian/control.in b/debian/control.in similarity index 100% rename from percona-packaging/debian/control.in rename to debian/control.in diff --git a/percona-packaging/debian/copyright b/debian/copyright similarity index 100% rename from percona-packaging/debian/copyright rename to debian/copyright diff --git a/percona-packaging/debian/pgversions b/debian/pgversions similarity index 100% rename from percona-packaging/debian/pgversions rename to debian/pgversions diff --git a/percona-packaging/debian/rules b/debian/rules similarity index 100% rename from percona-packaging/debian/rules rename to debian/rules diff --git a/percona-packaging/debian/source/format b/debian/source/format similarity index 100% rename from percona-packaging/debian/source/format rename to debian/source/format diff --git a/percona-packaging/debian/source/lintian-overrides b/debian/source/lintian-overrides similarity index 100% rename from percona-packaging/debian/source/lintian-overrides rename to debian/source/lintian-overrides diff --git a/percona-packaging/rpm/pg-stat-monitor.spec b/rpm/pg-stat-monitor.spec similarity index 100% rename from percona-packaging/rpm/pg-stat-monitor.spec rename to rpm/pg-stat-monitor.spec From 77f2facf6452a71c94190d33106732afc6022806 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Tue, 30 Nov 2021 14:10:37 +0200 Subject: [PATCH 02/39] DISTPG-349 PG Debian Requirement: Remove dependency on Percona PostgreSQL --- debian/control.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/control.in b/debian/control.in index 662d898..f1a620f 100644 --- a/debian/control.in +++ b/debian/control.in @@ -4,12 +4,12 @@ Priority: optional Maintainer: Percona Development Team Build-Depends: debhelper (>= 9), - percona-postgresql-server-dev-all (>= 153~), + postgresql-server-dev-all (>= 153~), Package: percona-pg-stat-monitor@@PG_REL@@ Architecture: any Depends: - percona-postgresql-@@PG_REL@@ | postgresql-@@PG_REL@@, + postgresql-@@PG_REL@@, ${misc:Depends}, ${shlibs:Depends}, Description: The pg_stat_monitor is statistics collector tool From 45ad5e9d1a7e80f4552c87b01712115b56e64119 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Tue, 30 Nov 2021 14:38:45 +0200 Subject: [PATCH 03/39] DISTPG-353 PG Debian Requirement: the Description is overflowing --- debian/control | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/debian/control b/debian/control index 662d898..8eb2b20 100644 --- a/debian/control +++ b/debian/control @@ -12,9 +12,13 @@ Depends: percona-postgresql-@@PG_REL@@ | postgresql-@@PG_REL@@, ${misc:Depends}, ${shlibs:Depends}, -Description: The pg_stat_monitor is statistics collector tool - based on PostgreSQL's contrib module "pg_stat_statements". +Description: enhancement query planning and execution statistics collector + The pg_stat_monitor is a Query Performance Monitoring tool for PostgreSQL. + It attempts to provide a more holistic picture by providing much-needed query + performance insights in a single view. . - pg_stat_monitor is developed on the basis of pg_stat_statments - as more advanced replacement for pg_stat_statment. - It provides all the features of pg_stat_statment plus its own feature set. + pg_stat_monitor provides improved insights that allow database users to + understand query origins, execution, planning statistics and details, query + information, and metadata. This significantly improves observability, enabling + users to debug and tune query performance. pg_stat_monitor is developed on the + basis of pg_stat_statements as its more advanced replacement. From 7756a8c0cbb46eb1c7062b42062f0dbf1fedd73e Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Tue, 30 Nov 2021 15:13:04 +0200 Subject: [PATCH 04/39] DISTPG-349 modified build script --- percona-packaging/scripts/pg_stat_monitor_builder.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/percona-packaging/scripts/pg_stat_monitor_builder.sh b/percona-packaging/scripts/pg_stat_monitor_builder.sh index 52b2ccb..bab9be0 100644 --- a/percona-packaging/scripts/pg_stat_monitor_builder.sh +++ b/percona-packaging/scripts/pg_stat_monitor_builder.sh @@ -128,9 +128,6 @@ get_sources(){ fi REVISION=$(git rev-parse --short HEAD) echo "REVISION=${REVISION}" >> ${WORKDIR}/pg-stat-monitor.properties - rm -rf rpm debian - cp -r percona-packaging/rpm ./ - cp -r percona-packaging/debian ./ EDITFILES="debian/control debian/control.in debian/rules rpm/pg-stat-monitor.spec" for file in $EDITFILES; do From 87ebfd4f79059dcfdc4995b20eac55a8a5d29051 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Tue, 30 Nov 2021 21:04:43 +0200 Subject: [PATCH 05/39] DISTPG-349 modified build script --- percona-packaging/scripts/pg_stat_monitor_builder.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/percona-packaging/scripts/pg_stat_monitor_builder.sh b/percona-packaging/scripts/pg_stat_monitor_builder.sh index bab9be0..e903448 100644 --- a/percona-packaging/scripts/pg_stat_monitor_builder.sh +++ b/percona-packaging/scripts/pg_stat_monitor_builder.sh @@ -222,6 +222,10 @@ install_deps() { elif [[ "${PG_RELEASE}" == "12" ]]; then percona-release enable ppg-12 release fi + + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + echo "deb http://apt.postgresql.org/pub/repos/apt/ ${PG_RELEASE}"-pgdg main | sudo tee /etc/apt/sources.list.d/pgdg.list + apt-get update if [[ "${OS_NAME}" != "focal" ]]; then @@ -234,7 +238,7 @@ install_deps() { fi fi - PKGLIST+=" percona-postgresql-${PG_RELEASE} debconf debhelper clang-7 devscripts dh-exec dh-systemd git wget libkrb5-dev libssl-dev percona-postgresql-common percona-postgresql-server-dev-all" + PKGLIST+=" postgresql-${PG_RELEASE} debconf debhelper clang-7 devscripts dh-exec dh-systemd git wget libkrb5-dev libssl-dev postgresql-common postgresql-server-dev-all" PKGLIST+=" build-essential debconf debhelper devscripts dh-exec dh-systemd git wget libxml-checker-perl libxml-libxml-perl libio-socket-ssl-perl libperl-dev libssl-dev libxml2-dev txt2man zlib1g-dev libpq-dev" until DEBIAN_FRONTEND=noninteractive apt-get -y install ${PKGLIST}; do @@ -399,6 +403,7 @@ build_source_deb(){ cd ${BUILDDIR} dch -D unstable --force-distribution -v "${VERSION}-${DEB_RELEASE}" "Update to new percona-pg-stat-monitor${PG_RELEASE} version ${VERSION}" + pg_buildext updatecontrol dpkg-buildpackage -S cd ../ mkdir -p $WORKDIR/source_deb From d8ca6ad50a28e5c0ab0bc1d8f5dd167d1e7bad62 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Tue, 30 Nov 2021 21:39:52 +0200 Subject: [PATCH 06/39] DISTPG-349 modified build script --- debian/control | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/control b/debian/control index 8eb2b20..cd75130 100644 --- a/debian/control +++ b/debian/control @@ -4,12 +4,12 @@ Priority: optional Maintainer: Percona Development Team Build-Depends: debhelper (>= 9), - percona-postgresql-server-dev-all (>= 153~), + postgresql-server-dev-all (>= 153~), Package: percona-pg-stat-monitor@@PG_REL@@ Architecture: any Depends: - percona-postgresql-@@PG_REL@@ | postgresql-@@PG_REL@@, + postgresql-@@PG_REL@@ | postgresql-@@PG_REL@@, ${misc:Depends}, ${shlibs:Depends}, Description: enhancement query planning and execution statistics collector From 2b55326bda492c16afbd4b5d4d8c16ddda4a1334 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Tue, 30 Nov 2021 21:43:54 +0200 Subject: [PATCH 07/39] DISTPG-349 modified build script --- debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/control b/debian/control index cd75130..8e77a49 100644 --- a/debian/control +++ b/debian/control @@ -9,7 +9,7 @@ Build-Depends: Package: percona-pg-stat-monitor@@PG_REL@@ Architecture: any Depends: - postgresql-@@PG_REL@@ | postgresql-@@PG_REL@@, + postgresql-@@PG_REL@@, ${misc:Depends}, ${shlibs:Depends}, Description: enhancement query planning and execution statistics collector From a939265018f403890ec53123825477c4c3d62c08 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Wed, 1 Dec 2021 12:12:39 +0200 Subject: [PATCH 08/39] DISTPG-349 modified build script --- debian/control | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/debian/control b/debian/control index 8e77a49..18a694e 100644 --- a/debian/control +++ b/debian/control @@ -17,8 +17,7 @@ Description: enhancement query planning and execution statistics collector It attempts to provide a more holistic picture by providing much-needed query performance insights in a single view. . - pg_stat_monitor provides improved insights that allow database users to - understand query origins, execution, planning statistics and details, query - information, and metadata. This significantly improves observability, enabling - users to debug and tune query performance. pg_stat_monitor is developed on the - basis of pg_stat_statements as its more advanced replacement. + pg_stat_monitor is developed on the basis of pg_stat_statments + as more advanced replacement for pg_stat_statment. + It provides all the features of pg_stat_statment plus its own feature set. + From da73cfb70b1a57ae73fc7b018915d45da5a24fed Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Wed, 1 Dec 2021 19:22:29 +0200 Subject: [PATCH 09/39] DISTPG-349 modified build script --- debian/control | 1 - percona-packaging/scripts/pg_stat_monitor_builder.sh | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/control b/debian/control index 18a694e..58ef967 100644 --- a/debian/control +++ b/debian/control @@ -20,4 +20,3 @@ Description: enhancement query planning and execution statistics collector pg_stat_monitor is developed on the basis of pg_stat_statments as more advanced replacement for pg_stat_statment. It provides all the features of pg_stat_statment plus its own feature set. - diff --git a/percona-packaging/scripts/pg_stat_monitor_builder.sh b/percona-packaging/scripts/pg_stat_monitor_builder.sh index e903448..1152837 100644 --- a/percona-packaging/scripts/pg_stat_monitor_builder.sh +++ b/percona-packaging/scripts/pg_stat_monitor_builder.sh @@ -446,6 +446,7 @@ build_deb(){ dpkg-source -x ${DSC} # cd percona-pg-stat-monitor-${VERSION} + sed -i "s:\. :${WORKDIR}/percona-pg-stat-monitor-${VERSION} :g" debian/rules dch -m -D "${OS_NAME}" --force-distribution -v "1:${VERSION}-${DEB_RELEASE}.${OS_NAME}" 'Update distribution' unset $(locale|cut -d= -f1) dpkg-buildpackage -rfakeroot -us -uc -b From 2a6cf2a3545a4f5e634d7cd7c7d630da388398f0 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Wed, 1 Dec 2021 21:30:14 +0200 Subject: [PATCH 10/39] DISTPG-349 modified build script --- Makefile | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index ac1225d..02cdb99 100644 --- a/Makefile +++ b/Makefile @@ -23,19 +23,39 @@ PG_VERSION := $(shell pg_config --version | awk {'print $$1 $$2'}) MAJOR := $(shell echo $(PG_VERSION) | sed -e 's/\.[^./]*$$//') ifneq (,$(findstring PostgreSQL14,$(MAJOR))) - CP := $(shell cp pg_stat_monitor--1.0.14.sql.in pg_stat_monitor--1.0.sql) + ifneq (,$(wildcard ../pg_stat_monitor--1.0.14.sql.in)) + CP := $(shell cp ../pg_stat_monitor--1.0.14.sql.in ../pg_stat_monitor--1.0.sql) + endif + ifneq (,$(wildcard pg_stat_monitor--1.0.14.sql.in)) + CP := $(shell cp pg_stat_monitor--1.0.14.sql.in pg_stat_monitor--1.0.sql) + endif endif ifneq (,$(findstring PostgreSQL13,$(MAJOR))) - CP := $(shell cp pg_stat_monitor--1.0.13.sql.in pg_stat_monitor--1.0.sql) + ifneq (,$(wildcard ../pg_stat_monitor--1.0.13.sql.in)) + CP := $(shell cp ../pg_stat_monitor--1.0.13.sql.in ../pg_stat_monitor--1.0.sql) + endif + ifneq (,$(wildcard pg_stat_monitor--1.0.13.sql.in)) + CP := $(shell cp pg_stat_monitor--1.0.13.sql.in pg_stat_monitor--1.0.sql) + endif endif ifneq (,$(findstring PostgreSQL12,$(MAJOR))) - CP := $(shell cp pg_stat_monitor--1.0.sql.in pg_stat_monitor--1.0.sql) + ifneq (,$(wildcard ../pg_stat_monitor--1.0.sql.in)) + CP := $(shell cp ../pg_stat_monitor--1.0.sql.in ../pg_stat_monitor--1.0.sql) + endif + ifneq (,$(wildcard pg_stat_monitor--1.0.sql.in)) + CP := $(shell cp pg_stat_monitor--1.0.sql.in pg_stat_monitor--1.0.sql) + endif endif ifneq (,$(findstring PostgreSQL11,$(MAJOR))) - CP := $(shell cp pg_stat_monitor--1.0.sql.in pg_stat_monitor--1.0.sql) + ifneq (,$(wildcard ../pg_stat_monitor--1.0.sql.in)) + CP := $(shell cp ../pg_stat_monitor--1.0.sql.in ../pg_stat_monitor--1.0.sql) + endif + ifneq (,$(wildcard pg_stat_monitor--1.0.sql.in)) + CP := $(shell cp pg_stat_monitor--1.0.sql.in pg_stat_monitor--1.0.sql) + endif endif ifdef USE_PGXS From 6990508d8ad64f39bf5505fe1d0bf6537dba0095 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Wed, 1 Dec 2021 21:43:41 +0200 Subject: [PATCH 11/39] DISTPG-349 Modify Description --- debian/control | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/debian/control b/debian/control index 58ef967..8e77a49 100644 --- a/debian/control +++ b/debian/control @@ -17,6 +17,8 @@ Description: enhancement query planning and execution statistics collector It attempts to provide a more holistic picture by providing much-needed query performance insights in a single view. . - pg_stat_monitor is developed on the basis of pg_stat_statments - as more advanced replacement for pg_stat_statment. - It provides all the features of pg_stat_statment plus its own feature set. + pg_stat_monitor provides improved insights that allow database users to + understand query origins, execution, planning statistics and details, query + information, and metadata. This significantly improves observability, enabling + users to debug and tune query performance. pg_stat_monitor is developed on the + basis of pg_stat_statements as its more advanced replacement. From f1041ed0537a48b11233f7f99215e77e0ef7657d Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Thu, 2 Dec 2021 12:54:08 +0200 Subject: [PATCH 12/39] DISTPG-349 Change Copyright --- debian/copyright | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/debian/copyright b/debian/copyright index 621a1ee..dfbbbf6 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,27 +1,26 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: PostgreSQL +Upstream-Name: pg_stat_monitor Source: https://github.com/percona/pg_stat_monitor -pg_stat_monitor - Statistics collector for PostgreSQL. +Files: * +Copyright: Portions Copyright © 2018-2021, Percona LLC and/or its affiliates + Portions Copyright © 1996-2021, The PostgreSQL Global Development Group + Portions Copyright © 1994, The Regents of the University of California -Portions Copyright © 2018-2020, Percona LLC and/or its affiliates - -Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group - -Portions Copyright (c) 1994, The Regents of the University of California - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose, without fee, and without a written agreement is -hereby granted, provided that the above copyright notice and this paragraph and -the following two paragraphs appear in all copies. - -IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR DIRECT, -INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, -ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE -COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND -THE COPYRIGHT HOLDER HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, -UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +License: PostgreSQL + Permission to use, copy, modify, and distribute this software and its + documentation for any purpose, without fee, and without a written agreement + is hereby granted, provided that the above copyright notice and this + paragraph and the following two paragraphs appear in all copies. + . + IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + . + THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. From 7decd18e03e5fb82b262184b72d71e4ff07579f8 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Thu, 2 Dec 2021 19:02:41 +0200 Subject: [PATCH 13/39] DISTPG-349 modify control.in --- debian/control.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/control.in b/debian/control.in index f1a620f..66d837c 100644 --- a/debian/control.in +++ b/debian/control.in @@ -6,10 +6,10 @@ Build-Depends: debhelper (>= 9), postgresql-server-dev-all (>= 153~), -Package: percona-pg-stat-monitor@@PG_REL@@ +Package: percona-pg-stat-monitorPGVERSION Architecture: any Depends: - postgresql-@@PG_REL@@, + postgresql-PGVERSION, ${misc:Depends}, ${shlibs:Depends}, Description: The pg_stat_monitor is statistics collector tool From f65dea874c5f2cdac079ea1260ddbdfe167880f7 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Fri, 3 Dec 2021 10:45:38 +0200 Subject: [PATCH 14/39] DISTPG-349 modify control.in --- debian/control.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/control.in b/debian/control.in index 66d837c..66a62c4 100644 --- a/debian/control.in +++ b/debian/control.in @@ -4,7 +4,7 @@ Priority: optional Maintainer: Percona Development Team Build-Depends: debhelper (>= 9), - postgresql-server-dev-all (>= 153~), + postgresql-server-dev-all (>= 153~) | percona-postgresql-server-dev-all (>= 153~), Package: percona-pg-stat-monitorPGVERSION Architecture: any From b636098fbc060fab10a3fc56974dbb9f21871e26 Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Mon, 6 Dec 2021 10:06:55 +0200 Subject: [PATCH 15/39] DISTPG-349 modify build script --- .../scripts/pg_stat_monitor_builder.sh | 22 ++++++++++++++----- rpm/pg-stat-monitor.spec | 2 +- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/percona-packaging/scripts/pg_stat_monitor_builder.sh b/percona-packaging/scripts/pg_stat_monitor_builder.sh index 1152837..77b8db3 100644 --- a/percona-packaging/scripts/pg_stat_monitor_builder.sh +++ b/percona-packaging/scripts/pg_stat_monitor_builder.sh @@ -21,6 +21,7 @@ Usage: $0 [OPTIONS] --rpm_release RPM version( default = 1) --deb_release DEB version( default = 1) --pg_release PPG version build on( default = 11) + --pg_version product version community or percona ( default = percona ) --version product version --help) usage ;; Example $0 --builddir=/tmp/test --get_sources=1 --build_src_rpm=1 --build_rpm=1 @@ -57,6 +58,7 @@ append_arg_to_args () { --rpm_release=*) RPM_RELEASE="$val" ;; --deb_release=*) DEB_RELEASE="$val" ;; --pg_release=*) PG_RELEASE="$val" ;; + --pg_version=*) PG_VERSION="$val" ;; --version=*) VERSION="$val" ;; --help) usage ;; *) @@ -110,6 +112,7 @@ get_sources(){ echo "BUILD_ID=${BUILD_ID}" >> pg-stat-monitor.properties echo "BRANCH_NAME=$(echo ${BRANCH} | awk -F '/' '{print $(NF)}')" >> pg-stat-monitor.properties echo "PG_RELEASE=${PG_RELEASE}" >> pg-stat-monitor.properties + echo "PG_VERSION=${PG_VERSION}" >> pg-stat-monitor.properties echo "RPM_RELEASE=${RPM_RELEASE}" >> pg-stat-monitor.properties echo "DEB_RELEASE=${DEB_RELEASE}" >> pg-stat-monitor.properties git clone "$REPO" ${PRODUCT_FULL} @@ -190,9 +193,10 @@ install_deps() { percona-release enable ppg-12 release fi yum -y install git wget + PKGLIST="percona-postgresql-common percona-postgresql${PG_RELEASE}-devel" PKGLIST+=" clang-devel git clang llvm-devel rpmdevtools vim wget" PKGLIST+=" perl binutils gcc gcc-c++" - PKGLIST+=" percona-postgresql-common clang-devel llvm-devel percona-postgresql${PG_RELEASE}-devel git rpm-build rpmdevtools wget gcc make autoconf" + PKGLIST+=" clang-devel llvm-devel git rpm-build rpmdevtools wget gcc make autoconf" if [[ "${RHEL}" -eq 8 ]]; then dnf -y module disable postgresql elif [[ "${RHEL}" -eq 7 ]]; then @@ -223,8 +227,15 @@ install_deps() { percona-release enable ppg-12 release fi - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - - echo "deb http://apt.postgresql.org/pub/repos/apt/ ${PG_RELEASE}"-pgdg main | sudo tee /etc/apt/sources.list.d/pgdg.list + + PKGLIST="percona-postgresql-${PG_RELEASE} percona-postgresql-common percona-postgresql-server-dev-all" + + if [[ "${PG_VERSION}" == "community" ]]; then + # ---- using a community version of postgresql + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + echo "deb http://apt.postgresql.org/pub/repos/apt/ ${PG_RELEASE}"-pgdg main | sudo tee /etc/apt/sources.list.d/pgdg.list + PKGLIST="postgresql-${PG_RELEASE} postgresql-common postgresql-server-dev-all" + fi apt-get update @@ -238,8 +249,9 @@ install_deps() { fi fi - PKGLIST+=" postgresql-${PG_RELEASE} debconf debhelper clang-7 devscripts dh-exec dh-systemd git wget libkrb5-dev libssl-dev postgresql-common postgresql-server-dev-all" - PKGLIST+=" build-essential debconf debhelper devscripts dh-exec dh-systemd git wget libxml-checker-perl libxml-libxml-perl libio-socket-ssl-perl libperl-dev libssl-dev libxml2-dev txt2man zlib1g-dev libpq-dev" + PKGLIST+=" debconf debhelper clang-7 devscripts dh-exec dh-systemd git wget libkrb5-dev libssl-dev" + PKGLIST+=" build-essential debconf debhelper devscripts dh-exec dh-systemd git wget libxml-checker-perl" + PKGLIST+=" libxml-libxml-perl libio-socket-ssl-perl libperl-dev libssl-dev libxml2-dev txt2man zlib1g-dev libpq-dev" until DEBIAN_FRONTEND=noninteractive apt-get -y install ${PKGLIST}; do sleep 1 diff --git a/rpm/pg-stat-monitor.spec b/rpm/pg-stat-monitor.spec index e59ab61..59fa034 100644 --- a/rpm/pg-stat-monitor.spec +++ b/rpm/pg-stat-monitor.spec @@ -10,7 +10,7 @@ Release: %{rpm_release}%{?dist} License: PostgreSQL Source0: percona-pg-stat-monitor%{pgrel}-%{version}.tar.gz URL: https://github.com/Percona-Lab/pg_stat_monitor -BuildRequires: percona-postgresql%{pgrel}-devel +BuildRequires: postgresql%{pgrel}-devel or percona-postgresql%{pgrel}-devel Requires: postgresql-server Provides: percona-pg-stat-monitor%{pgrel} Epoch: 1 From ff4a6725570cdecdfce2193a2fffb01346108e3b Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Mon, 6 Dec 2021 11:55:21 +0200 Subject: [PATCH 16/39] DISTPG-349 modify build script --- .../scripts/pg_stat_monitor_builder.sh | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/percona-packaging/scripts/pg_stat_monitor_builder.sh b/percona-packaging/scripts/pg_stat_monitor_builder.sh index 77b8db3..92986dc 100644 --- a/percona-packaging/scripts/pg_stat_monitor_builder.sh +++ b/percona-packaging/scripts/pg_stat_monitor_builder.sh @@ -21,7 +21,6 @@ Usage: $0 [OPTIONS] --rpm_release RPM version( default = 1) --deb_release DEB version( default = 1) --pg_release PPG version build on( default = 11) - --pg_version product version community or percona ( default = percona ) --version product version --help) usage ;; Example $0 --builddir=/tmp/test --get_sources=1 --build_src_rpm=1 --build_rpm=1 @@ -58,7 +57,6 @@ append_arg_to_args () { --rpm_release=*) RPM_RELEASE="$val" ;; --deb_release=*) DEB_RELEASE="$val" ;; --pg_release=*) PG_RELEASE="$val" ;; - --pg_version=*) PG_VERSION="$val" ;; --version=*) VERSION="$val" ;; --help) usage ;; *) @@ -112,7 +110,6 @@ get_sources(){ echo "BUILD_ID=${BUILD_ID}" >> pg-stat-monitor.properties echo "BRANCH_NAME=$(echo ${BRANCH} | awk -F '/' '{print $(NF)}')" >> pg-stat-monitor.properties echo "PG_RELEASE=${PG_RELEASE}" >> pg-stat-monitor.properties - echo "PG_VERSION=${PG_VERSION}" >> pg-stat-monitor.properties echo "RPM_RELEASE=${RPM_RELEASE}" >> pg-stat-monitor.properties echo "DEB_RELEASE=${DEB_RELEASE}" >> pg-stat-monitor.properties git clone "$REPO" ${PRODUCT_FULL} @@ -230,12 +227,10 @@ install_deps() { PKGLIST="percona-postgresql-${PG_RELEASE} percona-postgresql-common percona-postgresql-server-dev-all" - if [[ "${PG_VERSION}" == "community" ]]; then - # ---- using a community version of postgresql - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - - echo "deb http://apt.postgresql.org/pub/repos/apt/ ${PG_RELEASE}"-pgdg main | sudo tee /etc/apt/sources.list.d/pgdg.list - PKGLIST="postgresql-${PG_RELEASE} postgresql-common postgresql-server-dev-all" - fi + # ---- using a community version of postgresql + #wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + #echo "deb http://apt.postgresql.org/pub/repos/apt/ ${PG_RELEASE}"-pgdg main | sudo tee /etc/apt/sources.list.d/pgdg.list + #PKGLIST="postgresql-${PG_RELEASE} postgresql-common postgresql-server-dev-all" apt-get update From e39140544434edadacd3ce31deb06cf6a3e647ea Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Mon, 6 Dec 2021 12:33:06 +0200 Subject: [PATCH 17/39] DISTPG-349 restored spec file --- rpm/pg-stat-monitor.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpm/pg-stat-monitor.spec b/rpm/pg-stat-monitor.spec index 59fa034..e59ab61 100644 --- a/rpm/pg-stat-monitor.spec +++ b/rpm/pg-stat-monitor.spec @@ -10,7 +10,7 @@ Release: %{rpm_release}%{?dist} License: PostgreSQL Source0: percona-pg-stat-monitor%{pgrel}-%{version}.tar.gz URL: https://github.com/Percona-Lab/pg_stat_monitor -BuildRequires: postgresql%{pgrel}-devel or percona-postgresql%{pgrel}-devel +BuildRequires: percona-postgresql%{pgrel}-devel Requires: postgresql-server Provides: percona-pg-stat-monitor%{pgrel} Epoch: 1 From 07438dfee3ef036a5d9f85b1cd26c1f825f52f7c Mon Sep 17 00:00:00 2001 From: Evgeniy Patlan Date: Mon, 6 Dec 2021 14:43:51 +0200 Subject: [PATCH 18/39] Fix rpm packaging --- rpm/pg-stat-monitor.spec | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rpm/pg-stat-monitor.spec b/rpm/pg-stat-monitor.spec index e59ab61..e4c852c 100644 --- a/rpm/pg-stat-monitor.spec +++ b/rpm/pg-stat-monitor.spec @@ -13,6 +13,8 @@ URL: https://github.com/Percona-Lab/pg_stat_monitor BuildRequires: percona-postgresql%{pgrel}-devel Requires: postgresql-server Provides: percona-pg-stat-monitor%{pgrel} +Conflicts: percona-pg-stat-monitor%{pgrel} +Obsoletes: percona-pg-stat-monitor%{pgrel} Epoch: 1 Packager: Percona Development Team Vendor: Percona, Inc From 7d92b3ac59111933acaabec6a8c69269d08fd824 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Tue, 30 Nov 2021 16:15:53 -0300 Subject: [PATCH 19/39] PG-290: Fix crash when enabling debugging log level on PostgreSQL. There were couple issues to handle, the main one was that our log hook (pgsm_emit_log_hook) was being called after the shared memory hook completed (pgss_shmem_startup) but before PostgreSQL boostraping code finished, thus triggering the following assertion during a call to LWLockAcquire(): Assert(!(proc == NULL && IsUnderPostmaster)); proc is a pointer to MyProc, a PostgreSQL's shared global variable that was not yet initalized by PostgreSQL. We must also check for a NULL pointer return in pg_get_backend_status() the pgstat_fetch_stat_local_beentry() function may return a NULL pointer during initialization, in which case we use "127.0.0.1" for the client address, and "postmaster" for application name. --- pg_stat_monitor.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 4042de1..ec43578 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1123,6 +1123,9 @@ pg_get_backend_status(void) PgBackendStatus *beentry; local_beentry = pgstat_fetch_stat_local_beentry(i); + if (!local_beentry) + continue; + beentry = &local_beentry->backendStatus; if (beentry->st_procpid == MyProcPid) @@ -1135,6 +1138,8 @@ static int pg_get_application_name(char *application_name) { PgBackendStatus *beentry = pg_get_backend_status(); + if (!beentry) + return snprintf(application_name, APPLICATIONNAME_LEN, "%s", "postmaster"); snprintf(application_name, APPLICATIONNAME_LEN, "%s", beentry->st_appname); return strlen(application_name); @@ -1157,6 +1162,9 @@ pg_get_client_addr(void) char remote_host[NI_MAXHOST]; int ret; + if (!beentry) + return ntohl(inet_addr("127.0.0.1")); + memset(remote_host, 0x0, NI_MAXHOST); ret = pg_getnameinfo_all(&beentry->st_clientaddr.addr, beentry->st_clientaddr.salen, @@ -1481,7 +1489,13 @@ pgss_store(uint64 queryid, return; Assert(query != NULL); - userid = GetUserId(); + if (kind == PGSS_ERROR) + { + int sec_ctx; + GetUserIdAndSecContext((Oid *)&userid, &sec_ctx); + } + else + userid = GetUserId(); application_name_len = pg_get_application_name(application_name); planid = plan_info ? plan_info->planid: 0; @@ -3303,6 +3317,10 @@ pgsm_emit_log_hook(ErrorData *edata) if (IsParallelWorker()) return; + /* Check if PostgreSQL has finished its own bootstraping code. */ + if (MyProc == NULL) + return; + if ((edata->elevel == ERROR || edata->elevel == WARNING || edata->elevel == INFO || edata->elevel == DEBUG1)) { uint64 queryid = 0; From b3c7ba8c60f7e366ed40fb1a97425ac73e1d8243 Mon Sep 17 00:00:00 2001 From: Umair Shahid Date: Tue, 28 Dec 2021 17:58:36 +0400 Subject: [PATCH 20/39] Update README.md Proposed changes for https://jira.percona.com/browse/PG-155 --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 4c516d8..24d1ad5 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ * [Setup](#setup) * [Building from source code](#building-from-source) * [How to contribute](#how-to-contribute) +* [Report a Bug](#report-a-bug) * [Support, discussions and forums](#support-discussions-and-forums) * [License](#license) * [Copyright notice](#copyright-notice) @@ -241,6 +242,10 @@ We welcome and strongly encourage community participation and contributions, and The [Contributing Guide](https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) contains the guidelines on how you can contribute. +### Report a Bug + +Please report all bugs to Percona's Jira: https://jira.percona.com/projects/PG/issues + ### Support, discussions and forums From c21a3de00def9c34b0738630e0ba4a83432d46fd Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Mon, 29 Nov 2021 11:09:47 -0300 Subject: [PATCH 21/39] PG-286: Avoid duplicate queries in text buffer. The memory area reserved for query text (pgsm_query_shared_buffer) was divided evenly for each bucket, this allowed to have the same query, e.g. "SELECT 1", duplicated in different buckets, thus wasting space. This commit fix the query text duplication by adding a new hash table whose only purpose is to verify if a given query is already added to the buffer (by using the queryID). This allows different buckets that share the same query to point to a unique entry in the query buffer (pgss_qbuf). When pg_stat_monitor moves to a new bucket id, by avoiding adding a query that already exists in the buffer it can also save some CPU time. --- hash_query.c | 87 +++++++------------------------------ pg_stat_monitor.c | 106 +++++++++++++++++++++++++++------------------- pg_stat_monitor.h | 41 +++++++----------- 3 files changed, 93 insertions(+), 141 deletions(-) diff --git a/hash_query.c b/hash_query.c index 29d0cc8..9ce3477 100644 --- a/hash_query.c +++ b/hash_query.c @@ -22,19 +22,8 @@ static pgssSharedState *pgss; static HTAB *pgss_hash; +static HTAB *pgss_query_hash; -static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size); -/* - * Copy query from src_buffer to dst_buff. - * Use query_id and query_pos to fast locate query in source buffer. - * Store updated query position in the destination buffer into param query_pos. - */ -static bool copy_query(uint64 bucket_id, - uint64 query_id, - uint64 query_pos, - unsigned char *dst_buf, - unsigned char *src_buf, - size_t *new_query_pos); static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size) @@ -50,7 +39,6 @@ void pgss_startup(void) { bool found = false; - int32 i; /* reset in case this is a restart within the postmaster */ @@ -75,16 +63,10 @@ pgss_startup(void) init_hook_stats(); #endif - pgss->query_buf_size_bucket = MAX_QUERY_BUF / PGSM_MAX_BUCKETS; - - for (i = 0; i < PGSM_MAX_BUCKETS; i++) - { - unsigned char *buf = (unsigned char *)ShmemAlloc(pgss->query_buf_size_bucket); - set_qbuf(i, buf); - memset(buf, 0, sizeof (uint64)); - } + set_qbuf((unsigned char *)ShmemAlloc(MAX_QUERY_BUF)); pgss_hash = hash_init("pg_stat_monitor: bucket hashtable", sizeof(pgssHashKey), sizeof(pgssEntry), MAX_BUCKET_ENTRIES); + pgss_query_hash = hash_init("pg_stat_monitor: queryID hashtable", sizeof(uint64), sizeof(pgssQueryEntry), MAX_BUCKET_ENTRIES); LWLockRelease(AddinShmemInitLock); @@ -107,6 +89,12 @@ pgsm_get_hash(void) return pgss_hash; } +HTAB* +pgsm_get_query_hash(void) +{ + return pgss_query_hash; +} + /* * shmem_shutdown hook: Dump statistics into file. * @@ -182,22 +170,15 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding) * Caller must hold an exclusive lock on pgss->lock. */ void -hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]) +hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer) { HASH_SEQ_STATUS hash_seq; pgssEntry *entry = NULL; - pgssSharedState *pgss = pgsm_get_ss(); /* Store pending query ids from the previous bucket. */ List *pending_entries = NIL; ListCell *pending_entry; - if (new_bucket_id != -1) - { - /* Clear all queries in the query buffer for the new bucket. */ - memset(query_buffer[new_bucket_id], 0, pgss->query_buf_size_bucket); - } - /* Iterate over the hash table. */ hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) @@ -210,6 +191,11 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu (entry->key.bucket_id == new_bucket_id && (entry->counters.state == PGSS_FINISHED || entry->counters.state == PGSS_ERROR))) { + if (new_bucket_id == -1) { + /* pg_stat_monitor_reset(), remove entry from query hash table too. */ + hash_search(pgss_query_hash, &(entry->key.queryid), HASH_REMOVE, NULL); + } + entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); } @@ -268,13 +254,6 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu new_entry->counters = old_entry->counters; SpinLockInit(&new_entry->mutex); new_entry->encoding = old_entry->encoding; - /* copy query's text from previous bucket to the new one. */ - copy_query(new_bucket_id, - new_entry->key.queryid, /* query id */ - old_entry->query_pos, /* query position in buffer */ - query_buffer[new_bucket_id], /* destination query buffer */ - query_buffer[old_bucket_id], /* source query buffer */ - &new_entry->query_pos); /* position in which query was inserted into destination buffer */ } free(old_entry); @@ -310,39 +289,3 @@ IsHashInitialize(void) return (pgss != NULL && pgss_hash != NULL); } - -static bool copy_query(uint64 bucket_id, - uint64 query_id, - uint64 query_pos, - unsigned char *dst_buf, - unsigned char *src_buf, - size_t *new_query_pos) -{ - uint64 query_len = 0; - uint64 buf_len = 0; - - memcpy(&buf_len, src_buf, sizeof (uint64)); - if (buf_len <= 0) - return false; - - /* Try to locate the query directly. */ - if (query_pos != 0 && (query_pos + sizeof(uint64) + sizeof(uint64)) < buf_len) - { - if (*(uint64 *)&src_buf[query_pos] != query_id) - return false; - - query_pos += sizeof(uint64); - - memcpy(&query_len, &src_buf[query_pos], sizeof(uint64)); /* query len */ - query_pos += sizeof(uint64); - - if (query_pos + query_len > buf_len) /* avoid reading past buffer's length. */ - return false; - - return SaveQueryText(bucket_id, query_id, dst_buf, - (const char *)&src_buf[query_pos], - query_len, new_query_pos); - } - - return false; -} diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index ec43578..78e4599 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -67,7 +67,8 @@ static int num_relations; /* Number of relation in the query */ static bool system_init = false; static struct rusage rusage_start; static struct rusage rusage_end; -static unsigned char *pgss_qbuf[MAX_BUCKETS]; +/* Query buffer, store queries' text. */ +static unsigned char *pgss_qbuf = NULL; static char *pgss_explain(QueryDesc *queryDesc); #ifdef BENCHMARK static struct pg_hook_stats_t *pg_hook_stats; @@ -1311,7 +1312,6 @@ pgss_update_entry(pgssEntry *entry, e->counters.blocks.blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->blk_write_time); } e->counters.calls.usage += USAGE_EXEC(total_time); - e->counters.info.host = pg_get_client_addr(); if (sys_info) { e->counters.sysinfo.utime = sys_info->utime; @@ -1478,14 +1478,13 @@ pgss_store(uint64 queryid, uint64 planid; uint64 appid; char comments[512] = ""; - size_t query_len; /* Monitoring is disabled */ if (!PGSM_ENABLED) return; /* Safety check... */ - if (!IsSystemInitialized() || !pgss_qbuf[pg_atomic_read_u64(&pgss->current_wbucket)]) + if (!IsSystemInitialized()) return; Assert(query != NULL); @@ -1528,42 +1527,63 @@ pgss_store(uint64 queryid, entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL); if (!entry) { - uint64 prev_qbuf_len; - /* position in which the query's text was inserted into the query buffer. */ - size_t qpos = 0; + pgssQueryEntry *query_entry; + size_t query_len = 0; + bool query_found = false; + uint64 prev_qbuf_len = 0; + HTAB *pgss_query_hash; - query_len = strlen(query); - if (query_len > PGSM_QUERY_MAX_LEN) - query_len = PGSM_QUERY_MAX_LEN; + pgss_query_hash = pgsm_get_query_hash(); + + query_entry = hash_search(pgss_query_hash, &queryid, HASH_ENTER_NULL, &query_found); + if (query_entry == NULL) + { + LWLockRelease(pgss->lock); + pgsm_log_error("pgss_store: out of memory (pgss_query_hash)."); + return; + } + else if (!query_found) + { + /* New query, must add it to the buffer, calculate its length. */ + query_len = strlen(query); + if (query_len > PGSM_QUERY_MAX_LEN) + query_len = PGSM_QUERY_MAX_LEN; + } /* Need exclusive lock to make a new hashtable entry - promote */ LWLockRelease(pgss->lock); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - /* - * Save current query buffer length, if we fail to add a new - * new entry to the hash table then we must restore the - * original length. - */ - memcpy(&prev_qbuf_len, pgss_qbuf[bucketid], sizeof(prev_qbuf_len)); - if (!SaveQueryText(bucketid, queryid, pgss_qbuf[bucketid], query, query_len, &qpos)) + if (!query_found) { - LWLockRelease(pgss->lock); - elog(DEBUG1, "pg_stat_monitor: insufficient shared space for query."); - return; + if (!SaveQueryText(bucketid, queryid, pgss_qbuf, query, query_len, &query_entry->query_pos)) + { + LWLockRelease(pgss->lock); + pgsm_log_error("pgss_store: insufficient shared space for query."); + return; + } + /* + * Save current query buffer length, if we fail to add a new + * new entry to the hash table then we must restore the + * original length. + */ + memcpy(&prev_qbuf_len, pgss_qbuf, sizeof(prev_qbuf_len)); } /* OK to create a new hashtable entry */ entry = hash_entry_alloc(pgss, &key, GetDatabaseEncoding()); if (entry == NULL) { - /* Restore previous query buffer length. */ - memcpy(pgss_qbuf[bucketid], &prev_qbuf_len, sizeof(prev_qbuf_len)); + if (!query_found) + { + /* Restore previous query buffer length. */ + memcpy(pgss_qbuf, &prev_qbuf_len, sizeof(prev_qbuf_len)); + } LWLockRelease(pgss->lock); elog(DEBUG1, "pg_stat_monitor: out of memory"); return; } - entry->query_pos = qpos; + entry->query_pos = query_entry->query_pos; } if (jstate == NULL) @@ -1599,11 +1619,10 @@ pg_stat_monitor_reset(PG_FUNCTION_ARGS) errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); hash_entry_dealloc(-1, -1, NULL); - /* Reset query buffers. */ - for (size_t i = 0; i < PGSM_MAX_BUCKETS; ++i) - { - *(uint64 *)pgss_qbuf[i] = 0; - } + + /* Reset query buffer. */ + *(uint64 *)pgss_qbuf = 0; + #ifdef BENCHMARK for (int i = STATS_START; i < STATS_END; ++i) { pg_hook_stats[i].min_time = 0; @@ -1713,7 +1732,6 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, uint64 userid = entry->key.userid; uint64 ip = entry->key.ip; uint64 planid = entry->key.planid; - unsigned char *buf = pgss_qbuf[bucketid]; #if PG_VERSION_NUM < 140000 bool toplevel = 1; bool is_allowed_role = is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS); @@ -1722,7 +1740,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, bool toplevel = entry->key.toplevel; #endif - if (read_query(buf, queryid, query_txt, entry->query_pos) == 0) + if (read_query(pgss_qbuf, queryid, query_txt, entry->query_pos) == 0) { int rc; rc = read_query_buffer(bucketid, queryid, query_txt, entry->query_pos); @@ -1749,8 +1767,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (tmp.info.parentid != UINT64CONST(0)) { - int rc = 0; - if (read_query(buf, tmp.info.parentid, parent_query_txt, 0) == 0) + if (read_query(pgss_qbuf, tmp.info.parentid, parent_query_txt, 0) == 0) { rc = read_query_buffer(bucketid, tmp.info.parentid, parent_query_txt, 0); if (rc != 1) @@ -1802,7 +1819,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (enc != query_txt) pfree(enc); /* plan at column number 7 */ - if (planid && strlen(tmp.planinfo.plan_text) > 0) + if (planid && tmp.planinfo.plan_text[0]) values[i++] = CStringGetTextDatum(tmp.planinfo.plan_text); else nulls[i++] = true; @@ -3120,17 +3137,17 @@ SaveQueryText(uint64 bucketid, /* * If the query buffer is empty, there is nothing to dump, this also - * means that the current query length exceeds MAX_QUERY_BUFFER_BUCKET. + * means that the current query length exceeds MAX_QUERY_BUF. */ if (buf_len <= sizeof (uint64)) return false; - dump_ok = dump_queries_buffer(bucketid, buf, MAX_QUERY_BUFFER_BUCKET); + dump_ok = dump_queries_buffer(bucketid, buf, MAX_QUERY_BUF); buf_len = sizeof (uint64); /* * We must check for overflow again, as the query length may - * exceed the size allocated to the buffer (MAX_QUERY_BUFFER_BUCKET). + * exceed the total size allocated to the buffer (MAX_QUERY_BUF). */ if (QUERY_BUFFER_OVERFLOW(buf_len, query_len)) { @@ -3293,9 +3310,10 @@ pg_stat_monitor_hook_stats(PG_FUNCTION_ARGS) } void -set_qbuf(int i, unsigned char *buf) +set_qbuf(unsigned char *buf) { - pgss_qbuf[i] = buf; + pgss_qbuf = buf; + *(uint64 *)pgss_qbuf = 0; } #ifdef BENCHMARK @@ -3413,13 +3431,13 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos) if (fd < 0) goto exit; - buf = (unsigned char*) palloc(MAX_QUERY_BUFFER_BUCKET); + buf = (unsigned char*) palloc(MAX_QUERY_BUF); while (!done) { off = 0; - /* read a chunck of MAX_QUERY_BUFFER_BUCKET size. */ + /* read a chunck of MAX_QUERY_BUF size. */ do { - nread = read(fd, buf + off, MAX_QUERY_BUFFER_BUCKET - off); + nread = read(fd, buf + off, MAX_QUERY_BUF - off); if (nread == -1) { if (errno == EINTR && tries++ < 3) /* read() was interrupted, attempt to read again (max attempts=3) */ @@ -3434,9 +3452,9 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos) } off += nread; - } while (off < MAX_QUERY_BUFFER_BUCKET); + } while (off < MAX_QUERY_BUF); - if (off == MAX_QUERY_BUFFER_BUCKET) + if (off == MAX_QUERY_BUF) { /* we have a chunck, scan it looking for queryid. */ if (read_query(buf, queryid, query_txt, pos) != 0) @@ -3449,7 +3467,7 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos) } else /* - * Either done=true or file has a size not multiple of MAX_QUERY_BUFFER_BUCKET. + * Either done=true or file has a size not multiple of MAX_QUERY_BUF. * It is safe to assume that the file was truncated or corrupted. */ break; diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 84ed936..045da45 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -87,9 +87,8 @@ #define MAX_QUERY_BUF (PGSM_QUERY_SHARED_BUFFER * 1024 * 1024) #define MAX_BUCKETS_MEM (PGSM_MAX * 1024 * 1024) #define BUCKETS_MEM_OVERFLOW() ((hash_get_num_entries(pgss_hash) * sizeof(pgssEntry)) >= MAX_BUCKETS_MEM) -#define MAX_QUERY_BUFFER_BUCKET MAX_QUERY_BUF / PGSM_MAX_BUCKETS #define MAX_BUCKET_ENTRIES (MAX_BUCKETS_MEM / sizeof(pgssEntry)) -#define QUERY_BUFFER_OVERFLOW(x,y) ((x + y + sizeof(uint64) + sizeof(uint64)) > MAX_QUERY_BUFFER_BUCKET) +#define QUERY_BUFFER_OVERFLOW(x,y) ((x + y + sizeof(uint64) + sizeof(uint64)) > MAX_QUERY_BUF) #define QUERY_MARGIN 100 #define MIN_QUERY_LEN 10 #define SQLCODE_LEN 20 @@ -161,7 +160,7 @@ typedef enum AGG_KEY #define MAX_QUERY_LEN 1024 -/* shared nenory storage for the query */ +/* shared memory storage for the query */ typedef struct CallTime { double total_time; /* total execution time, in msec */ @@ -171,21 +170,19 @@ typedef struct CallTime double sum_var_time; /* sum of variances in execution time in msec */ } CallTime; -typedef struct pgssQueryHashKey -{ - uint64 bucket_id; /* bucket number */ - uint64 queryid; /* query identifier */ - uint64 userid; /* user OID */ - uint64 dbid; /* database OID */ - uint64 ip; /* client ip address */ - uint64 appid; /* hash of application name */ -} pgssQueryHashKey; - +/* + * Entry type for queries hash table (query ID). + * + * We use a hash table to keep track of query IDs that have their + * corresponding query text added to the query buffer (pgsm_query_shared_buffer). + * + * This allow us to avoid adding duplicated queries to the buffer, therefore + * leaving more space for other queries and saving some CPU. + */ typedef struct pgssQueryEntry { - pgssQueryHashKey key; /* hash key of entry - MUST BE FIRST */ - uint64 pos; /* bucket number */ - uint64 state; + uint64 queryid; /* query identifier, also the key. */ + size_t query_pos; /* query location within query buffer */ } pgssQueryEntry; typedef struct PlanInfo @@ -208,10 +205,6 @@ typedef struct pgssHashKey typedef struct QueryInfo { - uint64 queryid; /* query identifier */ - Oid userid; /* user OID */ - Oid dbid; /* database OID */ - uint host; /* client IP */ uint64 parentid; /* parent queryid of current query*/ int64 type; /* type of query, options are query, info, warning, error, fatal */ char application_name[APPLICATIONNAME_LEN]; @@ -311,7 +304,6 @@ typedef struct pgssSharedState pg_atomic_uint64 current_wbucket; pg_atomic_uint64 prev_bucket_usec; uint64 bucket_entry[MAX_BUCKETS]; - int64 query_buf_size_bucket; char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */ } pgssSharedState; @@ -382,21 +374,20 @@ int pgsm_get_bucket_size(void); pgssSharedState* pgsm_get_ss(void); HTAB *pgsm_get_plan_hash(void); HTAB *pgsm_get_hash(void); +HTAB *pgsm_get_query_hash(void); HTAB *pgsm_get_plan_hash(void); void hash_entry_reset(void); void hash_query_entryies_reset(void); void hash_query_entries(); void hash_query_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]); -void hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]); +void hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer); pgssEntry* hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding); Size hash_memsize(void); int read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos); uint64 read_query(unsigned char *buf, uint64 queryid, char * query, size_t pos); -pgssQueryEntry* hash_find_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid); -pgssQueryEntry* hash_create_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid); void pgss_startup(void); -void set_qbuf(int i, unsigned char *); +void set_qbuf(unsigned char *); /* hash_query.c */ void pgss_startup(void); From df89c3f4a36d9ad5b21c6e7dc4a0e67da4f4f7da Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Mon, 29 Nov 2021 14:39:56 -0300 Subject: [PATCH 22/39] PG-286: Small performance improvements. pgss_ExecutorEnd: Avoid unnecessary memset(plan_info, 0, ...). We only use this object if the condition below is true, in which case we already initialize all the fields in the object, also we now store the plan string length (plan_info.plan_len) to avoid calling strlen on it again later: if (queryDesc->operation == CMD_SELECT && PGSM_QUERY_PLAN) { ... here we initialize plan_info If the condition is false, then we pass a NULL PlanInfo* to the pgss_store to avoid more unnecessary processing. pgss_planner_hook: Similar, avoid memset(plan_info, 0, ...) this object is not used here, so we pass NULL to pgss_store. pg_get_application_name: Remove call to strlen, snprintf already give us the calculated string length, so we just return it. pg_get_client_addr: Cache localhost, avoid calling ntohl(inet_addr("127.0.0.1")) all the time. pgss_update_entry: Make use of PlanInfo->plan_len, avoiding a call to strlen again. intarray_get_datum: Init the string by setting the first byte to '\0'. --- pg_stat_monitor.c | 57 ++++++++++++++++++++++------------------------- pg_stat_monitor.h | 1 + 2 files changed, 28 insertions(+), 30 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 78e4599..3e318e6 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -656,14 +656,15 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) uint64 queryId = queryDesc->plannedstmt->queryId; SysInfo sys_info; PlanInfo plan_info; + PlanInfo *plan_ptr = NULL; - /* Extract the plan information in case of SELECT statment */ - memset(&plan_info, 0, sizeof(PlanInfo)); + /* Extract the plan information in case of SELECT statement */ if (queryDesc->operation == CMD_SELECT && PGSM_QUERY_PLAN) - { + { MemoryContext mct = MemoryContextSwitchTo(TopMemoryContext); - snprintf(plan_info.plan_text, PLAN_TEXT_LEN, "%s", pgss_explain(queryDesc)); - plan_info.planid = DatumGetUInt64(hash_any_extended((const unsigned char*)plan_info.plan_text, strlen(plan_info.plan_text), 0)); + plan_info.plan_len = snprintf(plan_info.plan_text, PLAN_TEXT_LEN, "%s", pgss_explain(queryDesc)); + plan_info.planid = DatumGetUInt64(hash_any_extended((const unsigned char*)plan_info.plan_text, plan_info.plan_len, 0)); + plan_ptr = &plan_info; MemoryContextSwitchTo(mct); } @@ -682,7 +683,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) pgss_store(queryId, /* query id */ queryDesc->sourceText, /* query text */ - &plan_info, /* PlanInfo */ + plan_ptr, /* PlanInfo */ queryDesc->operation, /* CmdType */ &sys_info, /* SysInfo */ NULL, /* ErrorInfo */ @@ -784,7 +785,6 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par if (PGSM_TRACK_PLANNING && query_string && parse->queryId != UINT64CONST(0) && !IsParallelWorker()) { - PlanInfo plan_info; instr_time start; instr_time duration; BufferUsage bufusage_start; @@ -792,7 +792,6 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par WalUsage walusage_start; WalUsage walusage; - memset(&plan_info, 0, sizeof(PlanInfo)); /* We need to track buffer usage as the planner can access them. */ bufusage_start = pgBufferUsage; @@ -836,7 +835,7 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start); pgss_store(parse->queryId, /* query id */ query_string, /* query */ - &plan_info, /* PlanInfo */ + NULL, /* PlanInfo */ parse->commandType, /* CmdType */ NULL, /* SysInfo */ NULL, /* ErrorInfo */ @@ -1142,41 +1141,41 @@ pg_get_application_name(char *application_name) if (!beentry) return snprintf(application_name, APPLICATIONNAME_LEN, "%s", "postmaster"); - snprintf(application_name, APPLICATIONNAME_LEN, "%s", beentry->st_appname); - return strlen(application_name); + return snprintf(application_name, APPLICATIONNAME_LEN, "%s", beentry->st_appname); } -/* - * Store some statistics for a statement. - * - * If queryId is 0 then this is a utility statement and we should compute - * a suitable queryId internally. - * - * If jstate is not NULL then we're trying to create an entry for which - * we have no statistics as yet; we just want to record the normalized - */ - static uint pg_get_client_addr(void) { PgBackendStatus *beentry = pg_get_backend_status(); char remote_host[NI_MAXHOST]; int ret; + static uint localhost = 0; + + remote_host[0] = '\0'; if (!beentry) return ntohl(inet_addr("127.0.0.1")); - - memset(remote_host, 0x0, NI_MAXHOST); + ret = pg_getnameinfo_all(&beentry->st_clientaddr.addr, beentry->st_clientaddr.salen, remote_host, sizeof(remote_host), NULL, 0, NI_NUMERICHOST | NI_NUMERICSERV); if (ret != 0) - return ntohl(inet_addr("127.0.0.1")); + { + if (localhost == 0) + localhost = ntohl(inet_addr("127.0.0.1")); + return localhost; + } if (strcmp(remote_host, "[local]") == 0) - return ntohl(inet_addr("127.0.0.1")); + { + if (localhost == 0) + localhost = ntohl(inet_addr("127.0.0.1")); + return localhost; + } + return ntohl(inet_addr(remote_host)); } @@ -1204,7 +1203,7 @@ pgss_update_entry(pgssEntry *entry, int message_len = error_info ? strlen (error_info->message) : 0; int comments_len = comments ? strlen (comments) : 0; int sqlcode_len = error_info ? strlen (error_info->sqlcode) : 0; - int plan_text_len = plan_info ? strlen (plan_info->plan_text) : 0; + int plan_text_len = plan_info ? plan_info->plan_len : 0; /* volatile block */ @@ -3018,18 +3017,16 @@ intarray_get_datum(int32 arr[], int len) int j; char str[1024]; char tmp[10]; - bool first = true; - memset(str, 0, sizeof(str)); + str[0] = '\0'; /* Need to calculate the actual size, and avoid unnessary memory usage */ for (j = 0; j < len; j++) { - if (first) + if (!str[0]) { snprintf(tmp, 10, "%d", arr[j]); strcat(str,tmp); - first = false; continue; } snprintf(tmp, 10, ",%d", arr[j]); diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 045da45..f7a5848 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -189,6 +189,7 @@ typedef struct PlanInfo { uint64 planid; /* plan identifier */ char plan_text[PLAN_TEXT_LEN]; /* plan text */ + size_t plan_len; /* strlen(plan_text) */ } PlanInfo; typedef struct pgssHashKey From 8c61e24f95c70a72834eacc34d82086544f7eb8e Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Mon, 29 Nov 2021 15:25:56 -0300 Subject: [PATCH 23/39] PG-286: Fix query buffer overflow management. If pgsm_overflow_target is ON (default, 1) and the query buffer overflows, we now dump the buffer and keep track of how many times pg_stat_monitor changed bucket since that. If an overflow happen again before pg_stat_monitor cycle through pgsm_max_buckets buckets (default 10), then we don't dump the buffer again, but instead report an error, this ensures that only one dump file of size pgsm_query_shared_buffer will be in disk at any time, avoiding slowing down queries to the pg_stat_monitor view. As soon as pg_stat_monitor cycles through all buckets, we remove the dump file and reset the counter (pgss->n_bucket_cycles). --- pg_stat_monitor.c | 46 +++++++++++++++++++++++++++++++++++----------- pg_stat_monitor.h | 15 +++++++++++++++ 2 files changed, 50 insertions(+), 11 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 3e318e6..f6f8bde 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -215,7 +215,8 @@ static uint64 djb2_hash(unsigned char *str, size_t len); void _PG_init(void) { - int i, rc; + int rc; + char file_name[1024]; elog(DEBUG2, "pg_stat_monitor: %s()", __FUNCTION__); /* @@ -240,12 +241,8 @@ _PG_init(void) EnableQueryId(); #endif - for (i = 0; i < PGSM_MAX_BUCKETS; i++) - { - char file_name[1024]; - snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, i); - unlink(file_name); - } + snprintf(file_name, 1024, "%s", PGSM_TEXT_FILE); + unlink(file_name); EmitWarningsOnPlaceholders("pg_stat_monitor"); @@ -2085,8 +2082,22 @@ get_next_wbucket(pgssSharedState *pgss) LWLockAcquire(pgss->lock, LW_EXCLUSIVE); hash_entry_dealloc(new_bucket_id, prev_bucket_id, pgss_qbuf); - snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, (int)new_bucket_id); - unlink(file_name); + if (pgss->overflow) + { + pgss->n_bucket_cycles += 1; + if (pgss->n_bucket_cycles >= PGSM_MAX_BUCKETS) + { + /* + * A full rotation of PGSM_MAX_BUCKETS buckets happened since + * we detected a query buffer overflow. + * Reset overflow state and remove the dump file. + */ + pgss->overflow = false; + pgss->n_bucket_cycles = 0; + snprintf(file_name, 1024, "%s", PGSM_TEXT_FILE); + unlink(file_name); + } + } LWLockRelease(pgss->lock); @@ -3131,6 +3142,13 @@ SaveQueryText(uint64 bucketid, case OVERFLOW_TARGET_DISK: { bool dump_ok; + pgssSharedState *pgss = pgsm_get_ss(); + + if (pgss->overflow) + { + pgsm_log_error("query buffer overflowed twice"); + return false; + } /* * If the query buffer is empty, there is nothing to dump, this also @@ -3142,6 +3160,12 @@ SaveQueryText(uint64 bucketid, dump_ok = dump_queries_buffer(bucketid, buf, MAX_QUERY_BUF); buf_len = sizeof (uint64); + if (dump_ok) + { + pgss->overflow = true; + pgss->n_bucket_cycles = 0; + } + /* * We must check for overflow again, as the query length may * exceed the total size allocated to the buffer (MAX_QUERY_BUF). @@ -3367,7 +3391,7 @@ dump_queries_buffer(int bucket_id, unsigned char *buf, int buf_len) int off = 0; int tries = 0; - snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, bucket_id); + snprintf(file_name, 1024, "%s", PGSM_TEXT_FILE); fd = OpenTransientFile(file_name, O_RDWR | O_CREAT | O_APPEND | PG_BINARY); if (fd < 0) { @@ -3423,7 +3447,7 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt, size_t pos) bool done = false; bool found = false; - snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, bucket_id); + snprintf(file_name, 1024, "%s", PGSM_TEXT_FILE); fd = OpenTransientFile(file_name, O_RDONLY | PG_BINARY); if (fd < 0) goto exit; diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index f7a5848..0d96fe7 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -306,6 +306,21 @@ typedef struct pgssSharedState pg_atomic_uint64 prev_bucket_usec; uint64 bucket_entry[MAX_BUCKETS]; char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */ + LWLock *errors_lock; /* protects errors hashtable search/modification */ + /* + * These variables are used when pgsm_overflow_target is ON. + * + * overflow is set to true when the query buffer overflows. + * + * n_bucket_cycles counts the number of times we changed bucket + * since the query buffer overflowed. When it reaches pgsm_max_buckets + * we remove the dump file, also reset the counter. + * + * This allows us to avoid having a large file on disk that would also + * slowdown queries to the pg_stat_monitor view. + */ + bool overflow; + size_t n_bucket_cycles; } pgssSharedState; #define ResetSharedState(x) \ From b798ffd4615e34ebb7d1a347eb1fc805608996b5 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Fri, 3 Dec 2021 14:14:31 -0300 Subject: [PATCH 24/39] PG-286: Reduce calls to pgstat_fetch_stat_numbackends(). After couple CPU profiling sessions with perf, it was detected that the function pgstat_fetch_stat_numbackends() is very expensive, reading the implementation on PostgreSQL's backend_status.c just confirmed that. We use that function on pg_stat_monitor to retrieve the application name and IP address of the client, we now cache the results in order to avoid calling it for every query being processed. --- pg_stat_monitor.c | 75 ++++++++++++++++++++++++++--------------------- 1 file changed, 41 insertions(+), 34 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index f6f8bde..e656aa1 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -104,12 +104,11 @@ PG_FUNCTION_INFO_V1(pg_stat_monitor_settings); PG_FUNCTION_INFO_V1(get_histogram_timings); PG_FUNCTION_INFO_V1(pg_stat_monitor_hook_stats); -static uint pg_get_client_addr(void); -static int pg_get_application_name(char* application_name); +static uint pg_get_client_addr(bool *ok); +static int pg_get_application_name(char *application_name, bool *ok); static PgBackendStatus *pg_get_backend_status(void); static Datum intarray_get_datum(int32 arr[], int len); - #if PG_VERSION_NUM < 140000 DECLARE_HOOK(void pgss_post_parse_analyze, ParseState *pstate, Query *query); #else @@ -1112,8 +1111,8 @@ static PgBackendStatus* pg_get_backend_status(void) { LocalPgBackendStatus *local_beentry; - int num_backends = pgstat_fetch_stat_numbackends(); - int i; + int num_backends = pgstat_fetch_stat_numbackends(); + int i; for (i = 1; i <= num_backends; i++) { @@ -1132,46 +1131,44 @@ pg_get_backend_status(void) } static int -pg_get_application_name(char *application_name) +pg_get_application_name(char *application_name, bool *ok) { PgBackendStatus *beentry = pg_get_backend_status(); if (!beentry) return snprintf(application_name, APPLICATIONNAME_LEN, "%s", "postmaster"); - return snprintf(application_name, APPLICATIONNAME_LEN, "%s", beentry->st_appname); + if (!beentry) + return snprintf(application_name, APPLICATIONNAME_LEN, "%s", "unknown"); + + *ok = true; + + return snprintf(application_name, APPLICATIONNAME_LEN, "%s", beentry->st_appname); } static uint -pg_get_client_addr(void) +pg_get_client_addr(bool *ok) { PgBackendStatus *beentry = pg_get_backend_status(); - char remote_host[NI_MAXHOST]; - int ret; - static uint localhost = 0; + char remote_host[NI_MAXHOST]; + int ret; remote_host[0] = '\0'; if (!beentry) return ntohl(inet_addr("127.0.0.1")); - + + *ok = true; + ret = pg_getnameinfo_all(&beentry->st_clientaddr.addr, beentry->st_clientaddr.salen, remote_host, sizeof(remote_host), NULL, 0, NI_NUMERICHOST | NI_NUMERICSERV); if (ret != 0) - { - if (localhost == 0) - localhost = ntohl(inet_addr("127.0.0.1")); - return localhost; - } + return ntohl(inet_addr("127.0.0.1")); if (strcmp(remote_host, "[local]") == 0) - { - if (localhost == 0) - localhost = ntohl(inet_addr("127.0.0.1")); - return localhost; - } + return ntohl(inet_addr("127.0.0.1")); return ntohl(inet_addr(remote_host)); } @@ -1191,11 +1188,11 @@ pgss_update_entry(pgssEntry *entry, BufferUsage *bufusage, WalUsage *walusage, bool reset, - pgssStoreKind kind) + pgssStoreKind kind, + const char *app_name, + size_t app_name_len) { int index; - char application_name[APPLICATIONNAME_LEN]; - int application_name_len = pg_get_application_name(application_name); double old_mean; int message_len = error_info ? strlen (error_info->message) : 0; int comments_len = comments ? strlen (comments) : 0; @@ -1264,11 +1261,11 @@ pgss_update_entry(pgssEntry *entry, e->counters.resp_calls[index]++; } - if (plan_text_len > 0) + if (plan_text_len > 0 && !e->counters.planinfo.plan_text[0]) _snprintf(e->counters.planinfo.plan_text, plan_info->plan_text, plan_text_len + 1, PLAN_TEXT_LEN); - if (application_name_len > 0) - _snprintf(e->counters.info.application_name, application_name, application_name_len + 1, APPLICATIONNAME_LEN); + if (app_name_len > 0 && !e->counters.info.application_name[0]) + _snprintf(e->counters.info.application_name, app_name, app_name_len + 1, APPLICATIONNAME_LEN); e->counters.info.num_relations = num_relations; _snprintf2(e->counters.info.relations, relations, num_relations, REL_LEN); @@ -1465,8 +1462,8 @@ pgss_store(uint64 queryid, pgssHashKey key; pgssEntry *entry; pgssSharedState *pgss = pgsm_get_ss(); - char application_name[APPLICATIONNAME_LEN]; - int application_name_len; + static char application_name[APPLICATIONNAME_LEN] = ""; + static int application_name_len = 0; bool reset = false; uint64 bucketid; uint64 prev_bucket_id; @@ -1474,6 +1471,9 @@ pgss_store(uint64 queryid, uint64 planid; uint64 appid; char comments[512] = ""; + static bool found_app_name = false; + static bool found_client_addr = false; + static uint client_addr = 0; /* Monitoring is disabled */ if (!PGSM_ENABLED) @@ -1492,8 +1492,13 @@ pgss_store(uint64 queryid, else userid = GetUserId(); - application_name_len = pg_get_application_name(application_name); - planid = plan_info ? plan_info->planid: 0; + if (!found_app_name) + application_name_len = pg_get_application_name(application_name, &found_app_name); + + if (!found_client_addr) + client_addr = pg_get_client_addr(&found_client_addr); + + planid = plan_info ? plan_info->planid : 0; appid = djb2_hash((unsigned char *)application_name, application_name_len); extract_query_comments(query, comments, sizeof(comments)); @@ -1508,7 +1513,7 @@ pgss_store(uint64 queryid, key.userid = userid; key.dbid = MyDatabaseId; key.queryid = queryid; - key.ip = pg_get_client_addr(); + key.ip = client_addr; key.planid = planid; key.appid = appid; #if PG_VERSION_NUM < 140000 @@ -1597,7 +1602,9 @@ pgss_store(uint64 queryid, bufusage, /* bufusage */ walusage, /* walusage */ reset, /* reset */ - kind); /* kind */ + kind, /* kind */ + application_name, + application_name_len); LWLockRelease(pgss->lock); } From 258fa7faa4de675544fc201af81677b78d40008b Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Sat, 4 Dec 2021 11:01:39 -0300 Subject: [PATCH 25/39] PG-286: Check for NULL return on hash_search before using object. Check if hash_search() function returns NULL before attempting to use the object in hash_entry_alloc(). --- hash_query.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hash_query.c b/hash_query.c index 9ce3477..4006544 100644 --- a/hash_query.c +++ b/hash_query.c @@ -140,7 +140,9 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding) } /* Find or create an entry with desired hash code */ entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER_NULL, &found); - if (!found) + if (entry == NULL) + pgsm_log_error("hash_entry_alloc: OUT OF MEMORY"); + else if (!found) { pgss->bucket_entry[pg_atomic_read_u64(&pgss->current_wbucket)]++; /* New entry, initialize it */ @@ -152,8 +154,7 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding) /* ... and don't forget the query text metadata */ entry->encoding = encoding; } - if (entry == NULL) - elog(DEBUG1, "%s", "pg_stat_monitor: out of memory"); + return entry; } From 4ed0b7cf3eb808cfa009867e00d8b8ea36ba5fa2 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Sat, 4 Dec 2021 11:05:24 -0300 Subject: [PATCH 26/39] PG-286: Several improvements. This commit introduces serveral improvements: 1. Removal of pgss_store_query and pgss_store_utility functions: To store a query, we just use pgss_store(), this makes the code more uniform. 2. Always pass the query length to the pgss_store function using parse state from PostgreSQL to avoid calculating query length again. 3. Always clean the query (extra spaces, update query location) in pgss_store. 4. Normalize queries right before adding them to the query buffer, but only if user asked for query normalization. 5. Correctly handle utility queries among different PostgreSQL versions: - A word about how utility functions are handled on PG 13 and later versions: - On PostgreSQL <= 13, we have to compute a query ID, on later versions we can call EnableQueryId() to inform Postmaster we want to enable query ID computation. - On PostgreSQL <= 13, post_parse hook is called after process utility hook, on PostgreSQL >= 14, post_parse hook is called before process utility functions. - Based on that information, on PostgreSQL <= 13 / process utility, we pass 0 as queryid to the pgss_store function, then we calculate a queryid after cleaning the query (CleanQueryText) using pgss_hash_string. - On PostgreSQL 14 onward, post_parse() is called before pgss_ProcessUtility, we Clear queryId for prepared statements related utility, on process utility hook, we save the query ID for passing it to the pgss_store function, but mark the query ID with zero to avoid instrumenting it again on executor hooks. --- pg_stat_monitor.c | 552 +++++++++++++++++++++++++--------------------- 1 file changed, 300 insertions(+), 252 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index e656aa1..42286a4 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -136,6 +136,7 @@ DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryStri ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *qc); +static uint64 pgss_hash_string(const char *str, int len); #else static void BufferUsageAccumDiff(BufferUsage* bufusage, BufferUsage* pgBufferUsage, BufferUsage* bufusage_start); DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString, @@ -144,30 +145,28 @@ DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryStri DestReceiver *dest, char *completionTag); #endif - -static uint64 pgss_hash_string(const char *str, int len); char *unpack_sql_state(int sql_state); -static void pgss_store_error(uint64 queryid, const char * query, ErrorData *edata); +#define PGSM_HANDLED_UTILITY(n) (!IsA(n, ExecuteStmt) && \ + !IsA(n, PrepareStmt) && \ + !IsA(n, DeallocateStmt)) -static void pgss_store_utility(const char *query, - double total_time, - uint64 rows, - BufferUsage *bufusage, - WalUsage *walusage); +static void pgss_store_error(uint64 queryid, const char *query, ErrorData *edata); static void pgss_store(uint64 queryid, - const char *query, - PlanInfo *plan_info, - CmdType cmd_type, - SysInfo *sys_info, - ErrorInfo *error_info, - double total_time, - uint64 rows, - BufferUsage *bufusage, - WalUsage *walusage, - JumbleState *jstate, - pgssStoreKind kind); + const char *query, + int query_location, + int query_len, + PlanInfo *plan_info, + CmdType cmd_type, + SysInfo *sys_info, + ErrorInfo *error_info, + double total_time, + uint64 rows, + BufferUsage *bufusage, + WalUsage *walusage, + JumbleState *jstate, + pgssStoreKind kind); static void pg_stat_monitor_internal(FunctionCallInfo fcinfo, bool showtext); @@ -179,6 +178,12 @@ static void JumbleQuery(JumbleState *jstate, Query *query); static void JumbleRangeTable(JumbleState *jstate, List *rtable); static void JumbleExpr(JumbleState *jstate, Node *node); static void RecordConstLocation(JumbleState *jstate, int location); +/* + * Given a possibly multi-statement source string, confine our attention to the + * relevant part of the string. + */ +static const char * +CleanQuerytext(const char *query, int *location, int *len); #endif static char *generate_normalized_query(JumbleState *jstate, const char *query, @@ -188,19 +193,6 @@ static int comp_location(const void *a, const void *b); static uint64 get_next_wbucket(pgssSharedState *pgss); -static void -pgss_store_query(uint64 queryid, - const char * query, - CmdType cmd_type, - int query_location, - int query_len, -#if PG_VERSION_NUM > 130000 - JumbleState *jstate, -#else - JumbleState *jstate, -#endif - pgssStoreKind kind); - #if PG_VERSION_NUM < 140000 static uint64 get_query_id(JumbleState *jstate, Query *query); #endif @@ -357,8 +349,6 @@ pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query, JumbleState static void pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) { - pgssStoreKind kind = PGSS_PARSE; - if (prev_post_parse_analyze_hook) prev_post_parse_analyze_hook(pstate, query, jstate); @@ -376,7 +366,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) */ if (query->utilityStmt) { - query->queryId = UINT64CONST(0); + if (PGSM_TRACK_UTILITY && !PGSM_HANDLED_UTILITY(query->utilityStmt)) + query->queryId = UINT64CONST(0); return; } @@ -387,15 +378,21 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) * constants, the normalized string would be the same as the query text * anyway, so there's no need for an early entry. */ - if (jstate == NULL || jstate->clocations_count <= 0) - return; - pgss_store_query(query->queryId, /* queryid */ - pstate->p_sourcetext, /* query */ - query->commandType, /* CmdType */ - query->stmt_location, /* Query Location */ - query->stmt_len, /* Query Len */ - jstate, /* JumbleState */ - kind); /*pgssStoreKind */ + if (jstate && jstate->clocations_count > 0) + pgss_store(query->queryId, /* query id */ + pstate->p_sourcetext, /* query */ + query->stmt_location, /* query location */ + query->stmt_len, /* query length */ + NULL, /* PlanInfo */ + query->commandType, /* CmdType */ + NULL, /* SysInfo */ + NULL, /* ErrorInfo */ + 0, /* totaltime */ + 0, /* rows */ + NULL, /* bufusage */ + NULL, /* walusage */ + jstate, /* JumbleState */ + PGSS_PARSE); /* pgssStoreKind */ } #else @@ -416,7 +413,6 @@ static void pgss_post_parse_analyze(ParseState *pstate, Query *query) { JumbleState jstate; - pgssStoreKind kind = PGSS_PARSE; if (prev_post_parse_analyze_hook) prev_post_parse_analyze_hook(pstate, query); @@ -451,16 +447,21 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query) if (query->queryId == UINT64CONST(0)) query->queryId = UINT64CONST(1); - if (jstate.clocations_count <= 0) - return; - - pgss_store_query(query->queryId, /* queryid */ - pstate->p_sourcetext, /* query */ - query->commandType, /* CmdType */ - query->stmt_location, /* Query Location */ - query->stmt_len, /* Query Len */ - &jstate, /* JumbleState */ - kind); /*pgssStoreKind */ + if (jstate.clocations_count > 0) + pgss_store(query->queryId, /* query id */ + pstate->p_sourcetext, /* query */ + query->stmt_location, /* query location */ + query->stmt_len, /* query length */ + NULL, /* PlanInfo */ + query->commandType, /* CmdType */ + NULL, /* SysInfo */ + NULL, /* ErrorInfo */ + 0, /* totaltime */ + 0, /* rows */ + NULL, /* bufusage */ + NULL, /* walusage */ + &jstate, /* JumbleState */ + PGSS_PARSE); /* pgssStoreKind */ } #endif @@ -480,10 +481,8 @@ pgss_ExecutorStart_benchmark(QueryDesc *queryDesc, int eflags) static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags) { - uint64 queryId = queryDesc->plannedstmt->queryId; - - if(getrusage(RUSAGE_SELF, &rusage_start) != 0) - elog(DEBUG1, "pg_stat_monitor: failed to execute getrusage"); + if (getrusage(RUSAGE_SELF, &rusage_start) != 0) + pgsm_log_error("pgss_ExecutorStart: failed to execute getrusage"); if (prev_ExecutorStart) prev_ExecutorStart(queryDesc, eflags); @@ -517,22 +516,20 @@ pgss_ExecutorStart(QueryDesc *queryDesc, int eflags) #endif MemoryContextSwitchTo(oldcxt); } - pgss_store(queryId, /* query id */ + pgss_store(queryDesc->plannedstmt->queryId, /* query id */ queryDesc->sourceText, /* query text */ + queryDesc->plannedstmt->stmt_location, /* query location */ + queryDesc->plannedstmt->stmt_len, /* query length */ NULL, /* PlanInfo */ queryDesc->operation, /* CmdType */ NULL, /* SysInfo */ NULL, /* ErrorInfo */ 0, /* totaltime */ 0, /* rows */ - NULL, /* bufusage */ -#if PG_VERSION_NUM >= 130000 + NULL, /* bufusage */ NULL, /* walusage */ -#else - NULL, -#endif - NULL, - PGSS_EXEC); /* pgssStoreKind */ + NULL, /* JumbleState */ + PGSS_EXEC); /* pgssStoreKind */ } } @@ -649,17 +646,17 @@ pgss_ExecutorEnd_benchmark(QueryDesc *queryDesc) static void pgss_ExecutorEnd(QueryDesc *queryDesc) { - uint64 queryId = queryDesc->plannedstmt->queryId; - SysInfo sys_info; - PlanInfo plan_info; - PlanInfo *plan_ptr = NULL; + uint64 queryId = queryDesc->plannedstmt->queryId; + SysInfo sys_info; + PlanInfo plan_info; + PlanInfo *plan_ptr = NULL; - /* Extract the plan information in case of SELECT statement */ + /* Extract the plan information in case of SELECT statement */ if (queryDesc->operation == CMD_SELECT && PGSM_QUERY_PLAN) - { + { MemoryContext mct = MemoryContextSwitchTo(TopMemoryContext); plan_info.plan_len = snprintf(plan_info.plan_text, PLAN_TEXT_LEN, "%s", pgss_explain(queryDesc)); - plan_info.planid = DatumGetUInt64(hash_any_extended((const unsigned char*)plan_info.plan_text, plan_info.plan_len, 0)); + plan_info.planid = DatumGetUInt64(hash_any_extended((const unsigned char *)plan_info.plan_text, plan_info.plan_len, 0)); plan_ptr = &plan_info; MemoryContextSwitchTo(mct); } @@ -677,15 +674,17 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) sys_info.utime = time_diff(rusage_end.ru_utime, rusage_start.ru_utime); sys_info.stime = time_diff(rusage_end.ru_stime, rusage_start.ru_stime); - pgss_store(queryId, /* query id */ - queryDesc->sourceText, /* query text */ - plan_ptr, /* PlanInfo */ - queryDesc->operation, /* CmdType */ - &sys_info, /* SysInfo */ - NULL, /* ErrorInfo */ - queryDesc->totaltime->total * 1000.0, /* totaltime */ - queryDesc->estate->es_processed, /* rows */ - &queryDesc->totaltime->bufusage, /* bufusage */ + pgss_store(queryId, /* query id */ + queryDesc->sourceText, /* query text */ + queryDesc->plannedstmt->stmt_location, /* query location */ + queryDesc->plannedstmt->stmt_len, /* query length */ + plan_ptr, /* PlanInfo */ + queryDesc->operation, /* CmdType */ + &sys_info, /* SysInfo */ + NULL, /* ErrorInfo */ + queryDesc->totaltime->total * 1000.0, /* totaltime */ + queryDesc->estate->es_processed, /* rows */ + &queryDesc->totaltime->bufusage, /* bufusage */ #if PG_VERSION_NUM >= 130000 &queryDesc->totaltime->walusage, /* walusage */ #else @@ -829,18 +828,20 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par /* calc differences of WAL counters. */ memset(&walusage, 0, sizeof(WalUsage)); WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start); - pgss_store(parse->queryId, /* query id */ - query_string, /* query */ - NULL, /* PlanInfo */ - parse->commandType, /* CmdType */ - NULL, /* SysInfo */ - NULL, /* ErrorInfo */ - INSTR_TIME_GET_MILLISEC(duration), /* totaltime */ - 0, /* rows */ - &bufusage, /* bufusage */ - &walusage, /* walusage */ - NULL, /* JumbleState */ - PGSS_PLAN); /* pgssStoreKind */ + pgss_store(parse->queryId, /* query id */ + query_string, /* query */ + parse->stmt_location, /* query location */ + parse->stmt_len, /* query length */ + NULL, /* PlanInfo */ + parse->commandType, /* CmdType */ + NULL, /* SysInfo */ + NULL, /* ErrorInfo */ + INSTR_TIME_GET_MILLISEC(duration), /* totaltime */ + 0, /* rows */ + &bufusage, /* bufusage */ + &walusage, /* walusage */ + NULL, /* JumbleState */ + PGSS_PLAN); /* pgssStoreKind */ } else { @@ -930,7 +931,24 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, char *completionTag) #endif { - Node *parsetree = pstmt->utilityStmt; + Node *parsetree = pstmt->utilityStmt; + uint64 queryId = 0; + +#if PG_VERSION_NUM >= 140000 + queryId = pstmt->queryId; + + /* + * Force utility statements to get queryId zero. We do this even in cases + * where the statement contains an optimizable statement for which a + * queryId could be derived (such as EXPLAIN or DECLARE CURSOR). For such + * cases, runtime control will first go through ProcessUtility and then + * the executor, and we don't want the executor hooks to do anything, + * since we are already measuring the statement's costs at the utility + * level. + */ + if (PGSM_TRACK_UTILITY && !IsParallelWorker()) + pstmt->queryId = UINT64CONST(0); +#endif /* * If it's an EXECUTE statement, we don't track it and don't increment the @@ -946,10 +964,8 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, * * Likewise, we don't track execution of DEALLOCATE. */ - if (PGSM_TRACK_UTILITY && - !IsA(parsetree, ExecuteStmt) && - !IsA(parsetree, PrepareStmt) && - !IsA(parsetree, DeallocateStmt) && !IsParallelWorker()) + if (PGSM_TRACK_UTILITY && PGSM_HANDLED_UTILITY(parsetree) && + !IsParallelWorker()) { instr_time start; instr_time duration; @@ -1012,7 +1028,16 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, INSTR_TIME_SUBTRACT(duration, start); #if PG_VERSION_NUM >= 130000 +#if PG_VERSION_NUM >= 140000 + rows = (qc && (qc->commandTag == CMDTAG_COPY || + qc->commandTag == CMDTAG_FETCH || + qc->commandTag == CMDTAG_SELECT || + qc->commandTag == CMDTAG_REFRESH_MATERIALIZED_VIEW)) + ? qc->nprocessed + : 0; +#else rows = (qc && qc->commandTag == CMDTAG_COPY) ? qc->nprocessed : 0; +#endif /* calc differences of WAL counters. */ memset(&walusage, 0, sizeof(WalUsage)); WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start); @@ -1027,49 +1052,59 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, /* calc differences of buffer counters. */ memset(&bufusage, 0, sizeof(BufferUsage)); BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start); - pgss_store_utility(queryString, /* query text */ - INSTR_TIME_GET_MILLISEC(duration), /* totaltime */ - rows, /* rows */ - &bufusage, /* bufusage */ - &walusage); /* walusage */ + pgss_store( + queryId, /* query ID */ + queryString, /* query text */ + pstmt->stmt_location, /* query location */ + pstmt->stmt_len, /* query length */ + NULL, /* PlanInfo */ + 0, /* CmdType */ + NULL, /* SysInfo */ + NULL, /* ErrorInfo */ + INSTR_TIME_GET_MILLISEC(duration), /* total_time */ + rows, /* rows */ + &bufusage, /* bufusage */ + &walusage, /* walusage */ + NULL, /* JumbleState */ + PGSS_FINISHED); /* pgssStoreKind */ } else { #if PG_VERSION_NUM >= 140000 - if (prev_ProcessUtility) - prev_ProcessUtility(pstmt, queryString, - readOnlyTree, - context, params, queryEnv, + if (prev_ProcessUtility) + prev_ProcessUtility(pstmt, queryString, + readOnlyTree, + context, params, queryEnv, + dest, + qc); + else + standard_ProcessUtility(pstmt, queryString, + readOnlyTree, + context, params, queryEnv, dest, qc); - else - standard_ProcessUtility(pstmt, queryString, - readOnlyTree, - context, params, queryEnv, - dest, - qc); #elif PG_VERSION_NUM >= 130000 - if (prev_ProcessUtility) - prev_ProcessUtility(pstmt, queryString, - context, params, queryEnv, + if (prev_ProcessUtility) + prev_ProcessUtility(pstmt, queryString, + context, params, queryEnv, + dest, + qc); + else + standard_ProcessUtility(pstmt, queryString, + context, params, queryEnv, dest, qc); - else - standard_ProcessUtility(pstmt, queryString, - context, params, queryEnv, - dest, - qc); #else - if (prev_ProcessUtility) - prev_ProcessUtility(pstmt, queryString, + if (prev_ProcessUtility) + prev_ProcessUtility(pstmt, queryString, + context, params, queryEnv, + dest, + completionTag); + else + standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); - else - standard_ProcessUtility(pstmt, queryString, - context, params, queryEnv, - dest, - completionTag); #endif } } @@ -1095,6 +1130,8 @@ BufferUsageAccumDiff(BufferUsage* bufusage, BufferUsage* pgBufferUsage, BufferUs INSTR_TIME_SUBTRACT(bufusage->blk_write_time, bufusage_start->blk_write_time); } #endif + +#if PG_VERSION_NUM < 140000 /* * Given an arbitrarily long query string, produce a hash for the purposes of * identifying the query, without normalizing constants. Used when hashing @@ -1106,6 +1143,7 @@ pgss_hash_string(const char *str, int len) return DatumGetUInt64(hash_any_extended((const unsigned char *) str, len, 0)); } +#endif static PgBackendStatus* pg_get_backend_status(void) @@ -1320,72 +1358,6 @@ pgss_update_entry(pgssEntry *entry, } } -static void -pgss_store_query(uint64 queryid, - const char * query, - CmdType cmd_type, - int query_location, - int query_len, -#if PG_VERSION_NUM > 130000 - JumbleState *jstate, -#else - JumbleState *jstate, -#endif - pgssStoreKind kind) -{ - char *norm_query = NULL; - - if (query_location >= 0) - { - Assert(query_location <= strlen(query)); - query += query_location; - /* Length of 0 (or -1) means "rest of string" */ - if (query_len <= 0) - query_len = strlen(query); - else - Assert(query_len <= strlen(query)); - } - else - { - /* If query location is unknown, distrust query_len as well */ - query_location = 0; - query_len = strlen(query); - } - - /* - * Discard leading and trailing whitespace, too. Use scanner_isspace() - * not libc's isspace(), because we want to match the lexer's behavior. - */ - while (query_len > 0 && scanner_isspace(query[0])) - query++, query_location++, query_len--; - while (query_len > 0 && scanner_isspace(query[query_len - 1])) - query_len--; - - if (jstate) - norm_query = generate_normalized_query(jstate, query, - query_location, - &query_len, - GetDatabaseEncoding()); - /* - * For utility statements, we just hash the query string to get an ID. - */ - if (queryid == UINT64CONST(0)) - queryid = pgss_hash_string(query, query_len); - - pgss_store(queryid, /* query id */ - PGSM_NORMALIZED_QUERY ? (norm_query ? norm_query : query) : query, /* query */ - NULL, /* PlanInfo */ - cmd_type, /* CmdType */ - NULL, /* SysInfo */ - NULL, /* ErrorInfo */ - 0, /* totaltime */ - 0, /* rows */ - NULL, /* bufusage */ - NULL, /* walusage */ - jstate, /* JumbleState */ - kind); /* pgssStoreKind */ -} - static void pgss_store_error(uint64 queryid, const char * query, @@ -1397,41 +1369,20 @@ pgss_store_error(uint64 queryid, snprintf(error_info.message, ERROR_MESSAGE_LEN, "%s", edata->message); snprintf(error_info.sqlcode, SQLCODE_LEN, "%s", unpack_sql_state(edata->sqlerrcode)); - pgss_store(queryid, /* query id */ - query, /* query text */ - NULL, /* PlanInfo */ - 0, /* CmdType */ - NULL, /* SysInfo */ - &error_info, /* ErrorInfo */ - 0, /* total_time */ - 0, /* rows */ - NULL, /* bufusage */ - NULL, /* walusage */ - NULL, /* JumbleState */ - PGSS_ERROR); /* pgssStoreKind */ -} - -static void -pgss_store_utility(const char *query, - double total_time, - uint64 rows, - BufferUsage *bufusage, - WalUsage *walusage) -{ - uint64 queryid = pgss_hash_string(query, strlen(query)); - - pgss_store(queryid, /* query id */ - query, /* query text */ - NULL, /* PlanInfo */ - 0, /* CmdType */ - NULL, /* SysInfo */ - NULL, /* ErrorInfo */ - total_time, /* total_time */ - rows, /* rows */ - bufusage, /* bufusage */ - walusage, /* walusage */ - NULL, /* JumbleState */ - PGSS_FINISHED); /* pgssStoreKind */ + pgss_store(queryid, /* query id */ + query, /* query text */ + 0, /* query location */ + strlen(query), /* query length */ + NULL, /* PlanInfo */ + 0, /* CmdType */ + NULL, /* SysInfo */ + &error_info, /* ErrorInfo */ + 0, /* total_time */ + 0, /* rows */ + NULL, /* bufusage */ + NULL, /* walusage */ + NULL, /* JumbleState */ + PGSS_ERROR); /* pgssStoreKind */ } /* @@ -1446,17 +1397,19 @@ pgss_store_utility(const char *query, */ static void pgss_store(uint64 queryid, - const char *query, - PlanInfo *plan_info, - CmdType cmd_type, - SysInfo *sys_info, - ErrorInfo *error_info, - double total_time, - uint64 rows, - BufferUsage *bufusage, - WalUsage *walusage, - JumbleState *jstate, - pgssStoreKind kind) + const char *query, + int query_location, + int query_len, + PlanInfo *plan_info, + CmdType cmd_type, + SysInfo *sys_info, + ErrorInfo *error_info, + double total_time, + uint64 rows, + BufferUsage *bufusage, + WalUsage *walusage, + JumbleState *jstate, + pgssStoreKind kind) { HTAB *pgss_hash; pgssHashKey key; @@ -1471,6 +1424,7 @@ pgss_store(uint64 queryid, uint64 planid; uint64 appid; char comments[512] = ""; + char *norm_query = NULL; static bool found_app_name = false; static bool found_client_addr = false; static uint client_addr = 0; @@ -1483,6 +1437,34 @@ pgss_store(uint64 queryid, if (!IsSystemInitialized()) return; +#if PG_VERSION_NUM >= 140000 + /* + * Nothing to do if compute_query_id isn't enabled and no other module + * computed a query identifier. + */ + if (queryid == UINT64CONST(0)) + return; +#endif + + query = CleanQuerytext(query, &query_location, &query_len); + +#if PG_VERSION_NUM < 140000 + /* + * For utility statements, we just hash the query string to get an ID. + */ + if (queryid == UINT64CONST(0)) + { + queryid = pgss_hash_string(query, query_len); + /* + * If we are unlucky enough to get a hash of zero(invalid), use + * queryID as 2 instead, queryID 1 is already in use for normal + * statements. + */ + if (queryid == UINT64CONST(0)) + queryid = UINT64CONST(2); + } +#endif + Assert(query != NULL); if (kind == PGSS_ERROR) { @@ -1529,24 +1511,41 @@ pgss_store(uint64 queryid, if (!entry) { pgssQueryEntry *query_entry; - size_t query_len = 0; bool query_found = false; uint64 prev_qbuf_len = 0; HTAB *pgss_query_hash; pgss_query_hash = pgsm_get_query_hash(); + /* + * Create a new, normalized query string if caller asked. We don't + * need to hold the lock while doing this work. (Note: in any case, + * it's possible that someone else creates a duplicate hashtable entry + * in the interval where we don't hold the lock below. That case is + * handled by entry_alloc. + */ + if (jstate && PGSM_NORMALIZED_QUERY) + { + LWLockRelease(pgss->lock); + norm_query = generate_normalized_query(jstate, query, + query_location, + &query_len, + GetDatabaseEncoding()); + LWLockAcquire(pgss->lock, LW_SHARED); + } + query_entry = hash_search(pgss_query_hash, &queryid, HASH_ENTER_NULL, &query_found); if (query_entry == NULL) { LWLockRelease(pgss->lock); + if (norm_query) + pfree(norm_query); pgsm_log_error("pgss_store: out of memory (pgss_query_hash)."); return; } else if (!query_found) { - /* New query, must add it to the buffer, calculate its length. */ - query_len = strlen(query); + /* New query, truncate length if necessary. */ if (query_len > PGSM_QUERY_MAX_LEN) query_len = PGSM_QUERY_MAX_LEN; } @@ -1557,21 +1556,28 @@ pgss_store(uint64 queryid, if (!query_found) { - if (!SaveQueryText(bucketid, queryid, pgss_qbuf, query, query_len, &query_entry->query_pos)) + if (!SaveQueryText(bucketid, + queryid, + pgss_qbuf, + norm_query ? norm_query : query, + query_len, + &query_entry->query_pos)) { LWLockRelease(pgss->lock); + if (norm_query) + pfree(norm_query); pgsm_log_error("pgss_store: insufficient shared space for query."); return; } /* - * Save current query buffer length, if we fail to add a new - * new entry to the hash table then we must restore the - * original length. - */ + * Save current query buffer length, if we fail to add a new + * new entry to the hash table then we must restore the + * original length. + */ memcpy(&prev_qbuf_len, pgss_qbuf, sizeof(prev_qbuf_len)); } - /* OK to create a new hashtable entry */ + /* OK to create a new hashtable entry */ entry = hash_entry_alloc(pgss, &key, GetDatabaseEncoding()); if (entry == NULL) { @@ -1581,7 +1587,8 @@ pgss_store(uint64 queryid, memcpy(pgss_qbuf, &prev_qbuf_len, sizeof(prev_qbuf_len)); } LWLockRelease(pgss->lock); - elog(DEBUG1, "pg_stat_monitor: out of memory"); + if (norm_query) + pfree(norm_query); return; } entry->query_pos = query_entry->query_pos; @@ -1607,6 +1614,8 @@ pgss_store(uint64 queryid, application_name_len); LWLockRelease(pgss->lock); + if (norm_query) + pfree(norm_query); } /* * Reset all statement statistics. @@ -2781,6 +2790,45 @@ RecordConstLocation(JumbleState *jstate, int location) jstate->clocations_count++; } } + +static const char * +CleanQuerytext(const char *query, int *location, int *len) +{ + int query_location = *location; + int query_len = *len; + + /* First apply starting offset, unless it's -1 (unknown). */ + if (query_location >= 0) + { + Assert(query_location <= strlen(query)); + query += query_location; + /* Length of 0 (or -1) means "rest of string" */ + if (query_len <= 0) + query_len = strlen(query); + else + Assert(query_len <= strlen(query)); + } + else + { + /* If query location is unknown, distrust query_len as well */ + query_location = 0; + query_len = strlen(query); + } + + /* + * Discard leading and trailing whitespace, too. Use scanner_isspace() + * not libc's isspace(), because we want to match the lexer's behavior. + */ + while (query_len > 0 && scanner_isspace(query[0])) + query++, query_location++, query_len--; + while (query_len > 0 && scanner_isspace(query[query_len - 1])) + query_len--; + + *location = query_location; + *len = query_len; + + return query; +} #endif /* From 82031ed52c0973cd1f1efe69a6706d7a33e878d6 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Mon, 6 Dec 2021 10:13:31 -0300 Subject: [PATCH 27/39] PG-286: Update regression tests. As the query normalization and query cleaning is always done in the right place (pgss_store), no more parsed queries have a trailling comma ';' at the end. Also, on error regression test, after fixing some problems with utility related queries, we now have two entries for the RAISE WARNING case, the first entry is the utility query itself, the second entry is the error message logged by emit_log_hook. Some queries have the order adjusted due to the fix introduced by the previous commits. --- regression/expected/application_name.out | 10 ++-- regression/expected/basic.out | 8 ++-- regression/expected/cmd_type.out | 26 +++++----- regression/expected/counters.out | 16 +++---- regression/expected/database.out | 12 ++--- regression/expected/error.out | 28 ++++++----- regression/expected/error_1.out | 28 ++++++----- regression/expected/relations.out | 60 ++++++++++++------------ regression/expected/rows.out | 16 +++---- regression/expected/rows_1.out | 16 +++---- regression/expected/state.out | 12 ++--- regression/expected/tags.out | 4 +- regression/expected/top_query.out | 34 +++++++------- regression/expected/top_query_1.out | 34 +++++++------- 14 files changed, 156 insertions(+), 148 deletions(-) diff --git a/regression/expected/application_name.out b/regression/expected/application_name.out index 9d40e02..e200c18 100644 --- a/regression/expected/application_name.out +++ b/regression/expected/application_name.out @@ -12,11 +12,11 @@ SELECT 1 AS num; (1 row) SELECT query,application_name FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | application_name ---------------------------------------------------------------------------------+----------------------------- - SELECT $1 AS num | pg_regress/application_name - SELECT pg_stat_monitor_reset(); | pg_regress/application_name - SELECT query,application_name FROM pg_stat_monitor ORDER BY query COLLATE "C"; | pg_regress/application_name + query | application_name +-------------------------------------------------------------------------------+----------------------------- + SELECT $1 AS num | pg_regress/application_name + SELECT pg_stat_monitor_reset() | pg_regress/application_name + SELECT query,application_name FROM pg_stat_monitor ORDER BY query COLLATE "C" | pg_regress/application_name (3 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/basic.out b/regression/expected/basic.out index 04b5eb8..adea370 100644 --- a/regression/expected/basic.out +++ b/regression/expected/basic.out @@ -12,11 +12,11 @@ SELECT 1 AS num; (1 row) SELECT query FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query ---------------------------------------------------------------- + query +-------------------------------------------------------------- SELECT $1 AS num - SELECT pg_stat_monitor_reset(); - SELECT query FROM pg_stat_monitor ORDER BY query COLLATE "C"; + SELECT pg_stat_monitor_reset() + SELECT query FROM pg_stat_monitor ORDER BY query COLLATE "C" (3 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/cmd_type.out b/regression/expected/cmd_type.out index aba4be0..086d230 100644 --- a/regression/expected/cmd_type.out +++ b/regression/expected/cmd_type.out @@ -24,19 +24,19 @@ SELECT b FROM t2 FOR UPDATE; TRUNCATE t1; DROP TABLE t1; SELECT query, cmd_type, cmd_type_text FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | cmd_type | cmd_type_text ------------------------------------------------------------------------------------------+----------+--------------- - CREATE TABLE t1 (a INTEGER); | 0 | - CREATE TABLE t2 (b INTEGER); | 0 | - DELETE FROM t1; | 4 | DELETE - DROP TABLE t1; | 0 | - INSERT INTO t1 VALUES($1) | 3 | INSERT - SELECT a FROM t1; | 1 | SELECT - SELECT b FROM t2 FOR UPDATE; | 1 | SELECT - SELECT pg_stat_monitor_reset(); | 1 | SELECT - SELECT query, cmd_type, cmd_type_text FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 1 | SELECT - TRUNCATE t1; | 0 | - UPDATE t1 SET a = $1 | 2 | UPDATE + query | cmd_type | cmd_type_text +----------------------------------------------------------------------------------------+----------+--------------- + CREATE TABLE t1 (a INTEGER) | 0 | + CREATE TABLE t2 (b INTEGER) | 0 | + DELETE FROM t1 | 4 | DELETE + DROP TABLE t1 | 0 | + INSERT INTO t1 VALUES($1) | 3 | INSERT + SELECT a FROM t1 | 1 | SELECT + SELECT b FROM t2 FOR UPDATE | 1 | SELECT + SELECT pg_stat_monitor_reset() | 1 | SELECT + SELECT query, cmd_type, cmd_type_text FROM pg_stat_monitor ORDER BY query COLLATE "C" | 1 | SELECT + TRUNCATE t1 | 0 | + UPDATE t1 SET a = $1 | 2 | UPDATE (11 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/counters.out b/regression/expected/counters.out index ebf26f7..40fed48 100644 --- a/regression/expected/counters.out +++ b/regression/expected/counters.out @@ -36,11 +36,11 @@ SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; (0 rows) SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | calls -----------------------------------------------------------------------------------+------- - SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; | 4 - SELECT pg_stat_monitor_reset(); | 1 - SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 1 + query | calls +---------------------------------------------------------------------------------+------- + SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a | 4 + SELECT pg_stat_monitor_reset() | 1 + SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C" | 1 (3 rows) SELECT pg_stat_monitor_reset(); @@ -69,8 +69,8 @@ SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; query | calls ---------------------------------------------------------------------------------------------------+------- SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a | 1000 - SELECT pg_stat_monitor_reset(); | 1 - SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 1 + SELECT pg_stat_monitor_reset() | 1 + SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C" | 1 do $$ +| 1 declare +| n integer:= 1; +| @@ -80,7 +80,7 @@ SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; exit when n = 1000; +| n := n + 1; +| end loop; +| - end $$; | + end $$ | (4 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/database.out b/regression/expected/database.out index 378cbea..12c5055 100644 --- a/regression/expected/database.out +++ b/regression/expected/database.out @@ -28,12 +28,12 @@ SELECT * FROM t3,t4 WHERE t3.c = t4.d; \c contrib_regression SELECT datname, query FROM pg_stat_monitor ORDER BY query COLLATE "C"; - datname | query ---------------------+------------------------------------------------------------------------ - db1 | SELECT * FROM t1,t2 WHERE t1.a = t2.b; - db2 | SELECT * FROM t3,t4 WHERE t3.c = t4.d; - contrib_regression | SELECT datname, query FROM pg_stat_monitor ORDER BY query COLLATE "C"; - contrib_regression | SELECT pg_stat_monitor_reset(); + datname | query +--------------------+----------------------------------------------------------------------- + db1 | SELECT * FROM t1,t2 WHERE t1.a = t2.b + db2 | SELECT * FROM t3,t4 WHERE t3.c = t4.d + contrib_regression | SELECT datname, query FROM pg_stat_monitor ORDER BY query COLLATE "C" + contrib_regression | SELECT pg_stat_monitor_reset() (4 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/error.out b/regression/expected/error.out index ccf7968..2e3d8fa 100644 --- a/regression/expected/error.out +++ b/regression/expected/error.out @@ -21,18 +21,22 @@ RAISE WARNING 'warning message'; END $$; WARNING: warning message SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; - query | elevel | sqlcode | message -------------------------------------------------------------------------------------------------+--------+---------+----------------------------------- - ELECET * FROM unknown; | 20 | 42601 | syntax error at or near "ELECET" - SELECT * FROM unknown; | 20 | 42P01 | relation "unknown" does not exist - SELECT 1/0; | 20 | 22012 | division by zero - SELECT pg_stat_monitor_reset(); | 0 | | - SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | - do $$ +| 19 | 01000 | warning message - BEGIN +| | | - RAISE WARNING 'warning message'; +| | | - END $$; | | | -(6 rows) + query | elevel | sqlcode | message +-----------------------------------------------------------------------------------------------+--------+---------+----------------------------------- + ELECET * FROM unknown; | 20 | 42601 | syntax error at or near "ELECET" + SELECT * FROM unknown; | 20 | 42P01 | relation "unknown" does not exist + SELECT 1/0; | 20 | 22012 | division by zero + SELECT pg_stat_monitor_reset() | 0 | | + SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel | 0 | | + do $$ +| 0 | | + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$ | | | + do $$ +| 19 | 01000 | warning message + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$; | | | +(7 rows) SELECT pg_stat_monitor_reset(); pg_stat_monitor_reset diff --git a/regression/expected/error_1.out b/regression/expected/error_1.out index 102a92a..7a61786 100644 --- a/regression/expected/error_1.out +++ b/regression/expected/error_1.out @@ -21,18 +21,22 @@ RAISE WARNING 'warning message'; END $$; WARNING: warning message SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; - query | elevel | sqlcode | message -------------------------------------------------------------------------------------------------+--------+---------+----------------------------------- - ELECET * FROM unknown; | 21 | 42601 | syntax error at or near "ELECET" - SELECT * FROM unknown; | 21 | 42P01 | relation "unknown" does not exist - SELECT 1/0; | 21 | 22012 | division by zero - SELECT pg_stat_monitor_reset(); | 0 | | - SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | - do $$ +| 19 | 01000 | warning message - BEGIN +| | | - RAISE WARNING 'warning message'; +| | | - END $$; | | | -(6 rows) + query | elevel | sqlcode | message +-----------------------------------------------------------------------------------------------+--------+---------+----------------------------------- + ELECET * FROM unknown; | 21 | 42601 | syntax error at or near "ELECET" + SELECT * FROM unknown; | 21 | 42P01 | relation "unknown" does not exist + SELECT 1/0; | 21 | 22012 | division by zero + SELECT pg_stat_monitor_reset() | 0 | | + SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel | 0 | | + do $$ +| 0 | | + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$ | | | + do $$ +| 19 | 01000 | warning message + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$; | | | +(7 rows) SELECT pg_stat_monitor_reset(); pg_stat_monitor_reset diff --git a/regression/expected/relations.out b/regression/expected/relations.out index 9ff9288..6c17a47 100644 --- a/regression/expected/relations.out +++ b/regression/expected/relations.out @@ -37,14 +37,14 @@ SELECT * FROM foo1, foo2, foo3, foo4; (0 rows) SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; - query | relations ---------------------------------------------------------------------------+--------------------------------------------------- - SELECT * FROM foo1, foo2, foo3, foo4; | {public.foo1,public.foo2,public.foo3,public.foo4} - SELECT * FROM foo1, foo2, foo3; | {public.foo1,public.foo2,public.foo3} - SELECT * FROM foo1, foo2; | {public.foo1,public.foo2} - SELECT * FROM foo1; | {public.foo1} - SELECT pg_stat_monitor_reset(); | - SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; | {public.pg_stat_monitor*,pg_catalog.pg_database} + query | relations +-------------------------------------------------------------------------+--------------------------------------------------- + SELECT * FROM foo1 | {public.foo1} + SELECT * FROM foo1, foo2 | {public.foo1,public.foo2} + SELECT * FROM foo1, foo2, foo3 | {public.foo1,public.foo2,public.foo3} + SELECT * FROM foo1, foo2, foo3, foo4 | {public.foo1,public.foo2,public.foo3,public.foo4} + SELECT pg_stat_monitor_reset() | + SELECT query, relations from pg_stat_monitor ORDER BY query collate "C" | {public.pg_stat_monitor*,pg_catalog.pg_database} (6 rows) SELECT pg_stat_monitor_reset(); @@ -89,14 +89,14 @@ SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3, sch4.foo4; (0 rows) SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; - query | relations ---------------------------------------------------------------------------+-------------------------------------------------- - SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3, sch4.foo4; | {sch1.foo1,sch2.foo2,sch3.foo3,sch4.foo4} - SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3; | {sch1.foo1,sch2.foo2,sch3.foo3} - SELECT * FROM sch1.foo1, sch2.foo2; | {sch1.foo1,sch2.foo2} - SELECT * FROM sch1.foo1; | {sch1.foo1} - SELECT pg_stat_monitor_reset(); | - SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; | {public.pg_stat_monitor*,pg_catalog.pg_database} + query | relations +-------------------------------------------------------------------------+-------------------------------------------------- + SELECT * FROM sch1.foo1 | {sch1.foo1} + SELECT * FROM sch1.foo1, sch2.foo2 | {sch1.foo1,sch2.foo2} + SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3 | {sch1.foo1,sch2.foo2,sch3.foo3} + SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3, sch4.foo4 | {sch1.foo1,sch2.foo2,sch3.foo3,sch4.foo4} + SELECT pg_stat_monitor_reset() | + SELECT query, relations from pg_stat_monitor ORDER BY query collate "C" | {public.pg_stat_monitor*,pg_catalog.pg_database} (6 rows) SELECT pg_stat_monitor_reset(); @@ -122,12 +122,12 @@ SELECT * FROM sch1.foo1, sch2.foo2, foo1, foo2; (0 rows) SELECT query, relations from pg_stat_monitor ORDER BY query; - query | relations ---------------------------------------------------------------+-------------------------------------------------- - SELECT * FROM sch1.foo1, foo1; | {sch1.foo1,public.foo1} - SELECT * FROM sch1.foo1, sch2.foo2, foo1, foo2; | {sch1.foo1,sch2.foo2,public.foo1,public.foo2} - SELECT pg_stat_monitor_reset(); | - SELECT query, relations from pg_stat_monitor ORDER BY query; | {public.pg_stat_monitor*,pg_catalog.pg_database} + query | relations +-------------------------------------------------------------+-------------------------------------------------- + SELECT * FROM sch1.foo1, foo1 | {sch1.foo1,public.foo1} + SELECT * FROM sch1.foo1, sch2.foo2, foo1, foo2 | {sch1.foo1,sch2.foo2,public.foo1,public.foo2} + SELECT pg_stat_monitor_reset() | + SELECT query, relations from pg_stat_monitor ORDER BY query | {public.pg_stat_monitor*,pg_catalog.pg_database} (4 rows) SELECT pg_stat_monitor_reset(); @@ -168,14 +168,14 @@ SELECT * FROM v1,v2,v3,v4; (0 rows) SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; - query | relations ---------------------------------------------------------------------------+----------------------------------------------------------------------------------------------- - SELECT * FROM v1,v2,v3,v4; | {public.v1*,public.foo1,public.v2*,public.foo2,public.v3*,public.foo3,public.v4*,public.foo4} - SELECT * FROM v1,v2,v3; | {public.v1*,public.foo1,public.v2*,public.foo2,public.v3*,public.foo3} - SELECT * FROM v1,v2; | {public.v1*,public.foo1,public.v2*,public.foo2} - SELECT * FROM v1; | {public.v1*,public.foo1} - SELECT pg_stat_monitor_reset(); | - SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; | {public.pg_stat_monitor*,pg_catalog.pg_database} + query | relations +-------------------------------------------------------------------------+----------------------------------------------------------------------------------------------- + SELECT * FROM v1 | {public.v1*,public.foo1} + SELECT * FROM v1,v2 | {public.v1*,public.foo1,public.v2*,public.foo2} + SELECT * FROM v1,v2,v3 | {public.v1*,public.foo1,public.v2*,public.foo2,public.v3*,public.foo3} + SELECT * FROM v1,v2,v3,v4 | {public.v1*,public.foo1,public.v2*,public.foo2,public.v3*,public.foo3,public.v4*,public.foo4} + SELECT pg_stat_monitor_reset() | + SELECT query, relations from pg_stat_monitor ORDER BY query collate "C" | {public.pg_stat_monitor*,pg_catalog.pg_database} (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/rows.out b/regression/expected/rows.out index 5a0a0df..c68a9d8 100644 --- a/regression/expected/rows.out +++ b/regression/expected/rows.out @@ -8541,14 +8541,14 @@ SELECt * FROM t2 WHERE b % 2 = 0; (2500 rows) SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | rows_retrieved --------------------------------------------------------------------------------+---------------- - SELECT * FROM t1 LIMIT $1 | 10 - SELECT * FROM t1; | 1000 - SELECT * FROM t2; | 5000 - SELECT pg_stat_monitor_reset(); | 1 - SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 0 - SELECt * FROM t2 WHERE b % $1 = $2 | 2500 + query | rows_retrieved +------------------------------------------------------------------------------+---------------- + SELECT * FROM t1 | 1000 + SELECT * FROM t1 LIMIT $1 | 10 + SELECT * FROM t2 | 5000 + SELECT pg_stat_monitor_reset() | 1 + SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C" | 0 + SELECt * FROM t2 WHERE b % $1 = $2 | 2500 (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/rows_1.out b/regression/expected/rows_1.out index d6b3c09..c43d41a 100644 --- a/regression/expected/rows_1.out +++ b/regression/expected/rows_1.out @@ -8541,14 +8541,14 @@ SELECt * FROM t2 WHERE b % 2 = 0; (2500 rows) SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | rows_retrieved --------------------------------------------------------------------------------+---------------- - SELECT * FROM t1 LIMIT $1 | 10 - SELECT * FROM t1; | 1000 - SELECT b FROM t2 FOR UPDATE; | 5000 - SELECT pg_stat_monitor_reset(); | 1 - SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 0 - SELECt * FROM t2 WHERE b % $1 = $2 | 2500 + query | rows_retrieved +------------------------------------------------------------------------------+---------------- + SELECT * FROM t1 | 1000 + SELECT * FROM t1 LIMIT $1 | 10 + SELECT b FROM t2 FOR UPDATE | 5000 + SELECT pg_stat_monitor_reset() | 1 + SELECT query, rows_retrieved FROM pg_stat_monitor ORDER BY query COLLATE "C" | 0 + SELECt * FROM t2 WHERE b % $1 = $2 | 2500 (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/state.out b/regression/expected/state.out index b051091..bac22d1 100644 --- a/regression/expected/state.out +++ b/regression/expected/state.out @@ -14,12 +14,12 @@ SELECT 1; SELECT 1/0; -- divide by zero ERROR: division by zero SELECT query, state_code, state FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | state_code | state -----------------------------------------------------------------------------------+------------+--------------------- - SELECT $1 | 3 | FINISHED - SELECT 1/0; | 4 | FINISHED WITH ERROR - SELECT pg_stat_monitor_reset(); | 3 | FINISHED - SELECT query, state_code, state FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 2 | ACTIVE + query | state_code | state +---------------------------------------------------------------------------------+------------+--------------------- + SELECT $1 | 3 | FINISHED + SELECT 1/0; | 4 | FINISHED WITH ERROR + SELECT pg_stat_monitor_reset() | 3 | FINISHED + SELECT query, state_code, state FROM pg_stat_monitor ORDER BY query COLLATE "C" | 2 | ACTIVE (4 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/tags.out b/regression/expected/tags.out index b2cda90..27d37d3 100644 --- a/regression/expected/tags.out +++ b/regression/expected/tags.out @@ -15,8 +15,8 @@ SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; query | comments ---------------------------------------------------------------------------+---------------------------------------------------------- SELECT $1 AS num /* { "application", psql_app, "real_ip", 192.168.1.3) */ | /* { "application", psql_app, "real_ip", 192.168.1.3) */ - SELECT pg_stat_monitor_reset(); | - SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; | + SELECT pg_stat_monitor_reset() | + SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C" | (3 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/top_query.out b/regression/expected/top_query.out index 5d810f8..42d4f3b 100644 --- a/regression/expected/top_query.out +++ b/regression/expected/top_query.out @@ -23,23 +23,23 @@ SELECT add2(1,2); (1 row) SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | top_query ---------------------------------------------------------------------------+-------------------- - CREATE OR REPLACE FUNCTION add(int, int) RETURNS INTEGER AS +| - $$ +| - BEGIN +| - return (select $1 + $2); +| - END; $$ language plpgsql; | - CREATE OR REPLACE function add2(int, int) RETURNS int as +| - $$ +| - BEGIN +| - return add($1,$2); +| - END; +| - $$ language plpgsql; | - SELECT (select $1 + $2) | SELECT add2($1,$2) - SELECT add2($1,$2) | - SELECT pg_stat_monitor_reset(); | - SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C"; | + query | top_query +-------------------------------------------------------------------------+-------------------- + CREATE OR REPLACE FUNCTION add(int, int) RETURNS INTEGER AS +| + $$ +| + BEGIN +| + return (select $1 + $2); +| + END; $$ language plpgsql | + CREATE OR REPLACE function add2(int, int) RETURNS int as +| + $$ +| + BEGIN +| + return add($1,$2); +| + END; +| + $$ language plpgsql | + SELECT (select $1 + $2) | SELECT add2($1,$2) + SELECT add2($1,$2) | + SELECT pg_stat_monitor_reset() | + SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C" | (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/top_query_1.out b/regression/expected/top_query_1.out index ca738e5..b8e71a8 100644 --- a/regression/expected/top_query_1.out +++ b/regression/expected/top_query_1.out @@ -23,23 +23,23 @@ SELECT add2(1,2); (1 row) SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | top_query ---------------------------------------------------------------------------+-------------------- - (select $1 + $2) | SELECT add2($1,$2) - CREATE OR REPLACE FUNCTION add(int, int) RETURNS INTEGER AS +| - $$ +| - BEGIN +| - return (select $1 + $2); +| - END; $$ language plpgsql; | - CREATE OR REPLACE function add2(int, int) RETURNS int as +| - $$ +| - BEGIN +| - return add($1,$2); +| - END; +| - $$ language plpgsql; | - SELECT add2($1,$2) | - SELECT pg_stat_monitor_reset(); | - SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C"; | + query | top_query +-------------------------------------------------------------------------+-------------------- + (select $1 + $2) | SELECT add2($1,$2) + CREATE OR REPLACE FUNCTION add(int, int) RETURNS INTEGER AS +| + $$ +| + BEGIN +| + return (select $1 + $2); +| + END; $$ language plpgsql | + CREATE OR REPLACE function add2(int, int) RETURNS int as +| + $$ +| + BEGIN +| + return add($1,$2); +| + END; +| + $$ language plpgsql | + SELECT add2($1,$2) | + SELECT pg_stat_monitor_reset() | + SELECT query, top_query FROM pg_stat_monitor ORDER BY query COLLATE "C" | (6 rows) SELECT pg_stat_monitor_reset(); From fb4f63202731c112e3f95161874d5375404981b2 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Mon, 6 Dec 2021 13:24:22 -0300 Subject: [PATCH 28/39] PG-286: Fix deadlock. Can't call elog() function from inside the pgsm_log as the pgss_hash lock could be already acquired in exclusive mode, since elog() triggers the psmg_emit_log hook, when it calls pgss_store it will try to acquire the pgss_hash lock again, leading to a deadlock. --- pgsm_errors.c | 223 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 223 insertions(+) create mode 100644 pgsm_errors.c diff --git a/pgsm_errors.c b/pgsm_errors.c new file mode 100644 index 0000000..878712a --- /dev/null +++ b/pgsm_errors.c @@ -0,0 +1,223 @@ +/*------------------------------------------------------------------------- + * + * pgsm_errors.c + * Track pg_stat_monitor internal error messages. + * + * Copyright © 2021, Percona LLC and/or its affiliates + * + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1994, The Regents of the University of California + * + * IDENTIFICATION + * contrib/pg_stat_monitor/pgsm_errors.c + * + *------------------------------------------------------------------------- + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pg_stat_monitor.h" +#include "pgsm_errors.h" + + +PG_FUNCTION_INFO_V1(pg_stat_monitor_errors); +PG_FUNCTION_INFO_V1(pg_stat_monitor_reset_errors); + + +/* + * Maximum number of error messages tracked. + * This should be set to a sensible value in order to track + * the different type of errors that pg_stat_monitor may + * report, e.g. out of memory. + */ +#define PSGM_ERRORS_MAX 128 + +static HTAB *pgsm_errors_ht = NULL; + +void psgm_errors_init(void) +{ + HASHCTL info; +#if PG_VERSION_NUM >= 140000 + int flags = HASH_ELEM | HASH_STRINGS; +#else + int flags = HASH_ELEM | HASH_BLOBS; +#endif + + + memset(&info, 0, sizeof(info)); + info.keysize = ERROR_MSG_MAX_LEN; + info.entrysize = sizeof(ErrorEntry); + pgsm_errors_ht = ShmemInitHash("pg_stat_monitor: errors hashtable", + PSGM_ERRORS_MAX, /* initial size */ + PSGM_ERRORS_MAX, /* maximum size */ + &info, + flags); +} + +size_t pgsm_errors_size(void) +{ + return hash_estimate_size(PSGM_ERRORS_MAX, sizeof(ErrorEntry)); +} + +void pgsm_log(PgsmLogSeverity severity, const char *format, ...) +{ + char key[ERROR_MSG_MAX_LEN]; + ErrorEntry *entry; + bool found = false; + va_list ap; + int n; + struct timeval tv; + struct tm *lt; + pgssSharedState *pgss; + + va_start(ap, format); + n = vsnprintf(key, ERROR_MSG_MAX_LEN, format, ap); + va_end(ap); + + if (n < 0) + return; + + pgss = pgsm_get_ss(); + LWLockAcquire(pgss->errors_lock, LW_EXCLUSIVE); + + entry = (ErrorEntry *) hash_search(pgsm_errors_ht, key, HASH_ENTER_NULL, &found); + if (!entry) + { + LWLockRelease(pgss->errors_lock); + /* + * We're out of memory, can't track this error message. + */ + return; + } + + if (!found) + { + entry->severity = severity; + entry->calls = 0; + } + + /* Update message timestamp. */ + gettimeofday(&tv, NULL); + lt = localtime(&tv.tv_sec); + snprintf(entry->time, sizeof(entry->time), + "%04d-%02d-%02d %02d:%02d:%02d", + lt->tm_year + 1900, + lt->tm_mon + 1, + lt->tm_mday, + lt->tm_hour, + lt->tm_min, + lt->tm_sec); + + entry->calls++; + + LWLockRelease(pgss->errors_lock); +} + +/* + * Clear all entries from the hash table. + */ +Datum +pg_stat_monitor_reset_errors(PG_FUNCTION_ARGS) +{ + HASH_SEQ_STATUS hash_seq; + ErrorEntry *entry; + pgssSharedState *pgss = pgsm_get_ss(); + + /* Safety check... */ + if (!IsSystemInitialized()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); + + LWLockAcquire(pgss->errors_lock, LW_EXCLUSIVE); + + hash_seq_init(&hash_seq, pgsm_errors_ht); + while ((entry = hash_seq_search(&hash_seq)) != NULL) + entry = hash_search(pgsm_errors_ht, &entry->message, HASH_REMOVE, NULL); + + LWLockRelease(pgss->errors_lock); + PG_RETURN_VOID(); +} + +/* + * Invoked when users query the view pg_stat_monitor_errors. + * This function creates tuples with error messages from data present in + * the hash table, then return the dataset to the caller. + */ +Datum +pg_stat_monitor_errors(PG_FUNCTION_ARGS) +{ + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate *tupstore; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + HASH_SEQ_STATUS hash_seq; + ErrorEntry *error_entry; + pgssSharedState *pgss = pgsm_get_ss(); + + /* Safety check... */ + if (!IsSystemInitialized()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); + + /* check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("pg_stat_monitor: set-valued function called in context that cannot accept a set"))); + + /* Switch into long-lived context to construct returned data structures */ + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "pg_stat_monitor: return type must be a row type"); + + if (tupdesc->natts != 4) + elog(ERROR, "pg_stat_monitor: incorrect number of output arguments, required 3, found %d", tupdesc->natts); + + tupstore = tuplestore_begin_heap(true, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; + + MemoryContextSwitchTo(oldcontext); + + LWLockAcquire(pgss->errors_lock, LW_SHARED); + + hash_seq_init(&hash_seq, pgsm_errors_ht); + while ((error_entry = hash_seq_search(&hash_seq)) != NULL) + { + Datum values[4]; + bool nulls[4]; + int i = 0; + memset(values, 0, sizeof(values)); + memset(nulls, 0, sizeof(nulls)); + + values[i++] = Int64GetDatumFast(error_entry->severity); + values[i++] = CStringGetTextDatum(error_entry->message); + values[i++] = CStringGetTextDatum(error_entry->time); + values[i++] = Int64GetDatumFast(error_entry->calls); + + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } + + LWLockRelease(pgss->errors_lock); + /* clean up and return the tuplestore */ + tuplestore_donestoring(tupstore); + + return (Datum)0; +} \ No newline at end of file From 1881fd737b5a4b788b7872b6a491b163b796eb9a Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Wed, 22 Dec 2021 12:46:58 -0300 Subject: [PATCH 29/39] PG-291: Fix query call count. The issue with wrong query call count was taking place during transition to a new bucket, the process is shortly describe bellow: 1. Scan for pending queries in previous bucket. 2. Add pending queries to the new bucket id. 3. Remove pending queries from previous bucket id. The problem is that when switching to a new bucket, we reset query statistics for a given entry being processed, so, for example, if the pending query had a call count of 10 (9 of which were finished, 10th is the pending one), if we move this query to the new bucket, the entry will have its stats reseted, clearing the query call count to zero. To solve the problem, whenever a pending query is detected, if the entry has a call count > 1, we mark it as finished, and don't remove it from the previous bucket in order to keep its statistics, then we move just the pending query (10th in the example) to the new bucket id. Another issue is that when moving a entry to a new bucket, we missed copying the query position from the previous entry, which is used to locate the query text in the query buffer: hash_entry_dealloc():291 new_entry->query_pos = old_entry->query_pos; --- hash_query.c | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/hash_query.c b/hash_query.c index 4006544..809c829 100644 --- a/hash_query.c +++ b/hash_query.c @@ -216,9 +216,21 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu pgssEntry *bkp_entry = malloc(sizeof(pgssEntry)); if (!bkp_entry) { - /* No memory, remove pending query entry from the previous bucket. */ - elog(ERROR, "hash_entry_dealloc: out of memory"); - entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); + pgsm_log_error("hash_entry_dealloc: out of memory"); + /* + * No memory, If the entry has calls > 1 then we change the state to finished, + * as the pending query will likely finish execution during the new bucket + * time window. The pending query will vanish in this case, can't list it + * until it completes. + * + * If there is only one call to the query and it's pending, remove the + * entry from the previous bucket and allow it to finish in the new bucket, + * in order to avoid the query living in the old bucket forever. + */ + if (entry->counters.calls.calls > 1) + entry->counters.state = PGSS_FINISHED; + else + entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); continue; } @@ -231,8 +243,20 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu /* Add the entry to a list of nodes to be processed later. */ pending_entries = lappend(pending_entries, bkp_entry); - /* Finally remove the pending query from the expired bucket id. */ - entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); + /* + * If the entry has calls > 1 then we change the state to finished in + * the previous bucket, as the pending query will likely finish execution + * during the new bucket time window. Can't remove it from the previous bucket + * as it may have many calls and we would lose the query statistics. + * + * If there is only one call to the query and it's pending, remove the entry + * from the previous bucket and allow it to finish in the new bucket, + * in order to avoid the query living in the old bucket forever. + */ + if (entry->counters.calls.calls > 1) + entry->counters.state = PGSS_FINISHED; + else + entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); } } } @@ -255,6 +279,7 @@ hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_bu new_entry->counters = old_entry->counters; SpinLockInit(&new_entry->mutex); new_entry->encoding = old_entry->encoding; + new_entry->query_pos = old_entry->query_pos; } free(old_entry); From e079e65da0a7fae66b932753939e84763750fa77 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Mon, 3 Jan 2022 11:01:01 -0300 Subject: [PATCH 30/39] PG-299: Fix conflicts between devel and master. Updated sql files (pg_stat_monitor_settings view). Using right variable name and level checking on pgss_store: key.toplevel = ((exec_nested_level + plan_nested_level) == 0); --- pg_stat_monitor--1.0.13.sql.in | 8 +++++--- pg_stat_monitor--1.0.14.sql.in | 8 +++++--- pg_stat_monitor.c | 2 +- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/pg_stat_monitor--1.0.13.sql.in b/pg_stat_monitor--1.0.13.sql.in index 990e62b..662e9a6 100644 --- a/pg_stat_monitor--1.0.13.sql.in +++ b/pg_stat_monitor--1.0.13.sql.in @@ -119,12 +119,13 @@ LANGUAGE SQL PARALLEL SAFE; CREATE FUNCTION pg_stat_monitor_settings( OUT name text, - OUT value INTEGER, - OUT default_value INTEGER, + OUT value text, + OUT default_value text, OUT description text, OUT minimum INTEGER, OUT maximum INTEGER, - OUT restart INTEGER + OUT options text, + OUT restart text ) RETURNS SETOF record AS 'MODULE_PATHNAME', 'pg_stat_monitor_settings' @@ -137,6 +138,7 @@ CREATE VIEW pg_stat_monitor_settings AS SELECT description, minimum, maximum, + options, restart FROM pg_stat_monitor_settings(); diff --git a/pg_stat_monitor--1.0.14.sql.in b/pg_stat_monitor--1.0.14.sql.in index dae64b8..f9cc22f 100644 --- a/pg_stat_monitor--1.0.14.sql.in +++ b/pg_stat_monitor--1.0.14.sql.in @@ -119,12 +119,13 @@ LANGUAGE SQL PARALLEL SAFE; CREATE FUNCTION pg_stat_monitor_settings( OUT name text, - OUT value INTEGER, - OUT default_value INTEGER, + OUT value text, + OUT default_value text, OUT description text, OUT minimum INTEGER, OUT maximum INTEGER, - OUT restart INTEGER + OUT options text, + OUT restart text ) RETURNS SETOF record AS 'MODULE_PATHNAME', 'pg_stat_monitor_settings' @@ -137,6 +138,7 @@ CREATE VIEW pg_stat_monitor_settings AS SELECT description, minimum, maximum, + options, restart FROM pg_stat_monitor_settings(); diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 42286a4..0ea38bc 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1501,7 +1501,7 @@ pgss_store(uint64 queryid, #if PG_VERSION_NUM < 140000 key.toplevel = 1; #else - key.toplevel = (nested_level == 0); + key.toplevel = ((exec_nested_level + plan_nested_level) == 0); #endif pgss_hash = pgsm_get_hash(); From 1d41f62294c24d51dd93f6444d7b7572312856ae Mon Sep 17 00:00:00 2001 From: Vadim Yalovets Date: Fri, 7 Jan 2022 14:45:29 +0200 Subject: [PATCH 31/39] DISTPG-353 PG Debian Requirement: the Description is overflowing --- debian/control | 6 +++--- debian/control.in | 14 +++++++++----- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/debian/control b/debian/control index 8e77a49..c4350a8 100644 --- a/debian/control +++ b/debian/control @@ -4,12 +4,12 @@ Priority: optional Maintainer: Percona Development Team Build-Depends: debhelper (>= 9), - postgresql-server-dev-all (>= 153~), + postgresql-server-dev-all (>= 153~) | percona-postgresql-server-dev-all (>= 153~), -Package: percona-pg-stat-monitor@@PG_REL@@ +Package: percona-pg-stat-monitorPGVERSION Architecture: any Depends: - postgresql-@@PG_REL@@, + postgresql-PGVERSION, ${misc:Depends}, ${shlibs:Depends}, Description: enhancement query planning and execution statistics collector diff --git a/debian/control.in b/debian/control.in index 66a62c4..c4350a8 100644 --- a/debian/control.in +++ b/debian/control.in @@ -12,9 +12,13 @@ Depends: postgresql-PGVERSION, ${misc:Depends}, ${shlibs:Depends}, -Description: The pg_stat_monitor is statistics collector tool - based on PostgreSQL's contrib module "pg_stat_statements". +Description: enhancement query planning and execution statistics collector + The pg_stat_monitor is a Query Performance Monitoring tool for PostgreSQL. + It attempts to provide a more holistic picture by providing much-needed query + performance insights in a single view. . - pg_stat_monitor is developed on the basis of pg_stat_statments - as more advanced replacement for pg_stat_statment. - It provides all the features of pg_stat_statment plus its own feature set. + pg_stat_monitor provides improved insights that allow database users to + understand query origins, execution, planning statistics and details, query + information, and metadata. This significantly improves observability, enabling + users to debug and tune query performance. pg_stat_monitor is developed on the + basis of pg_stat_statements as its more advanced replacement. From 24ae3fa66f0238e5501436360b3f928c30a4f5a8 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Thu, 23 Dec 2021 11:15:38 -0300 Subject: [PATCH 32/39] PG-296: Fix application name. If a backend would change the application name during execution, pg_stat_monitor would then fail to read the updated value, as it caches the result in order to avoid calling the expensive functions pgstat_fetch_stat_numbackends() and pgstat_fetch_stat_local_beentry(). A workaround was found, we can just read an exported GUC from PostgreSQL backend itself, namely application_name, from utils/guc.h, thus saving us from having to call those expensive functions. --- pg_stat_monitor.c | 72 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 51 insertions(+), 21 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 0ea38bc..5938dbe 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -17,6 +17,7 @@ #include "postgres.h" #include "access/parallel.h" +#include "utils/guc.h" #include #ifdef BENCHMARK #include /* clock() */ @@ -199,6 +200,8 @@ static uint64 get_query_id(JumbleState *jstate, Query *query); /* Daniel J. Bernstein's hash algorithm: see http://www.cse.yorku.ca/~oz/hash.html */ static uint64 djb2_hash(unsigned char *str, size_t len); +/* Same as above, but stores the calculated string length into *out_len (small optimization) */ +static uint64 djb2_hash_str(unsigned char *str, int *out_len); /* * Module load callback */ @@ -1172,8 +1175,6 @@ static int pg_get_application_name(char *application_name, bool *ok) { PgBackendStatus *beentry = pg_get_backend_status(); - if (!beentry) - return snprintf(application_name, APPLICATIONNAME_LEN, "%s", "postmaster"); if (!beentry) return snprintf(application_name, APPLICATIONNAME_LEN, "%s", "unknown"); @@ -1412,22 +1413,23 @@ pgss_store(uint64 queryid, pgssStoreKind kind) { HTAB *pgss_hash; - pgssHashKey key; + pgssHashKey key; pgssEntry *entry; pgssSharedState *pgss = pgsm_get_ss(); - static char application_name[APPLICATIONNAME_LEN] = ""; - static int application_name_len = 0; - bool reset = false; - uint64 bucketid; - uint64 prev_bucket_id; - uint64 userid; - uint64 planid; - uint64 appid; - char comments[512] = ""; + char *app_name_ptr; + char app_name[APPLICATIONNAME_LEN] = ""; + int app_name_len = 0; + bool reset = false; + uint64 bucketid; + uint64 prev_bucket_id; + uint64 userid; + uint64 planid; + uint64 appid = 0; + char comments[512] = ""; char *norm_query = NULL; - static bool found_app_name = false; - static bool found_client_addr = false; - static uint client_addr = 0; + bool found_app_name = false; + bool found_client_addr = false; + uint client_addr = 0; /* Monitoring is disabled */ if (!PGSM_ENABLED) @@ -1474,14 +1476,24 @@ pgss_store(uint64 queryid, else userid = GetUserId(); - if (!found_app_name) - application_name_len = pg_get_application_name(application_name, &found_app_name); + /* Try to read application name from GUC directly */ + if (application_name && *application_name) + { + app_name_ptr = application_name; + appid = djb2_hash_str((unsigned char *)application_name, &app_name_len); + } + else + { + app_name_len = pg_get_application_name(app_name, &found_app_name); + if (found_app_name) + appid = djb2_hash((unsigned char *)app_name, app_name_len); + app_name_ptr = app_name; + } if (!found_client_addr) client_addr = pg_get_client_addr(&found_client_addr); planid = plan_info ? plan_info->planid : 0; - appid = djb2_hash((unsigned char *)application_name, application_name_len); extract_query_comments(query, comments, sizeof(comments)); @@ -1610,8 +1622,9 @@ pgss_store(uint64 queryid, walusage, /* walusage */ reset, /* reset */ kind, /* kind */ - application_name, - application_name_len); + app_name_ptr, + app_name_len); + } LWLockRelease(pgss->lock); if (norm_query) @@ -3729,7 +3742,24 @@ static uint64 djb2_hash(unsigned char *str, size_t len) uint64 hash = 5381LLU; while (len--) - hash = ((hash << 5) + hash) ^ *str++; // hash(i - 1) * 33 ^ str[i] + hash = ((hash << 5) + hash) ^ *str++; // hash(i - 1) * 33 ^ str[i] + + return hash; +} + +static uint64 djb2_hash_str(unsigned char *str, int *out_len) +{ + uint64 hash = 5381LLU; + unsigned char *start = str; + unsigned char c; + + while ((c = *str) != '\0') + { + hash = ((hash << 5) + hash) ^ c; // hash(i - 1) * 33 ^ str[i] + ++str; + } + + *out_len = str - start; return hash; } From 9f8f94ed9c8fac929b33551e3d14c01b86984906 Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Wed, 12 Jan 2022 17:02:08 -0300 Subject: [PATCH 33/39] PG-325: Fix deadlock. If a query exceeds pg_stat_monitor.pgsm_query_max_len, then it's truncated before we save it into the query buffer (SaveQueryText). When reading the query back, on pg_stat_monitor_internal, we allocate a buffer for the query with length = pg_stat_monitor.pgsm_query_max_len, the problem is that the read_query function adds a '\0' to the end of the buffer when reading a query, thus if a query has been truncated, for example, to 1024, when reading it back read_query will store the '\0' at the position 1025, an out of array bounds position. Then, when we call pfree to release the buffer, PostgreSQL notices the buffer overrun and triggers an error assertion, the assertion calls our error hook which attempts to acquire the shared pgss->lock before pg_stat_monitor_internal has released it, leading to a deadlock. To avoid the problem we add 1 more byte to the extra '\0' during palloc call for query_text and parent_query_text. Also, we release the lock before calling pfree, just in case PostgreSQL finds a problem in pfree we won't deadlock again and get the error reported correctly. --- pg_stat_monitor.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 5938dbe..52780ab 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -1701,8 +1701,8 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, char parentid_txt[32]; pgssSharedState *pgss = pgsm_get_ss(); HTAB *pgss_hash = pgsm_get_hash(); - char *query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN); - char *parent_query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN); + char *query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN + 1); + char *parent_query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN + 1); /* Safety check... */ if (!IsSystemInitialized()) @@ -2048,11 +2048,12 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, values[i++] = BoolGetDatum(toplevel); tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - pfree(query_txt); - pfree(parent_query_txt); /* clean up and return the tuplestore */ LWLockRelease(pgss->lock); + pfree(query_txt); + pfree(parent_query_txt); + tuplestore_donestoring(tupstore); } From 8d4cbefdf5e06a4315bdda7a6fbd7b68d9d67a9f Mon Sep 17 00:00:00 2001 From: Anastasia Alexadrova Date: Thu, 13 Jan 2022 10:53:49 +0200 Subject: [PATCH 34/39] PG-298 Add 'Report a bug' section to readme modified: README.md --- README.md | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 24d1ad5..9b0a19a 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ * [Setup](#setup) * [Building from source code](#building-from-source) * [How to contribute](#how-to-contribute) -* [Report a Bug](#report-a-bug) +* [Report a bug](#report-a-bug) * [Support, discussions and forums](#support-discussions-and-forums) * [License](#license) * [Copyright notice](#copyright-notice) @@ -85,7 +85,7 @@ The PostgreSQL YUM repository supports `pg_stat_monitor` for all [supported vers Find the list of supported platforms for `pg_stat_monitor` within [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) on the [Percona Release Lifecycle Overview](https://www.percona.com/services/policies/percona-software-support-lifecycle#pgsql) page. -### Installation Guidelines +### Installation guidelines You can install `pg_stat_monitor` from the following sources: @@ -118,7 +118,7 @@ Replace XX with the desired PostgreSQL version. For example, to install `pg_stat yum install percona-pg-stat-monitor13 ``` -#### Installing from PostgreSQL yum repositories +#### Installing from PostgreSQL `yum` repositories Install the PostgreSQL repositories following the instructions in the [Linux downloads (Red Hat family)](https://www.postgresql.org/download/linux/redhat/) chapter in PostgreSQL documentation. @@ -154,10 +154,8 @@ You can enable `pg_stat_monitor` when your `postgresql` instance is not running. Use the [ALTER SYSTEM](https://www.postgresql.org/docs/current/sql-altersystem.html)command from `psql` terminal to modify the `shared_preload_libraries` parameter. -``` +```sql ALTER SYSTEM SET shared_preload_libraries = 'pg_stat_monitor'; - -ALTER SYSTEM ``` > **NOTE**: If you’ve added other modules to the `shared_preload_libraries` parameter (for example, `pg_stat_statements`), list all of them separated by commas for the `ALTER SYSTEM` command. @@ -170,23 +168,22 @@ Start or restart the `postgresql` instance to apply the changes. * On Debian and Ubuntu: -``` +```sh sudo systemctl restart postgresql.service ``` * On Red Hat Enterprise Linux and CentOS: -``` +```sh sudo systemctl restart postgresql-13 ``` Create the extension using the [CREATE EXTENSION](https://www.postgresql.org/docs/current/sql-createextension.html) command. Using this command requires the privileges of a superuser or a database owner. Connect to `psql` as a superuser for a database and run the following command: -``` +```sql CREATE EXTENSION pg_stat_monitor; -CREATE EXTENSION ``` @@ -242,9 +239,11 @@ We welcome and strongly encourage community participation and contributions, and The [Contributing Guide](https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) contains the guidelines on how you can contribute. -### Report a Bug +### Report a bug -Please report all bugs to Percona's Jira: https://jira.percona.com/projects/PG/issues +If you would like to suggest a new feature / an improvement or you found a bug in `pg_stat_monitor`, please submit the report to the [Percona Jira issue tracker](https://jira.percona.com/projects/PG). + +Refer to the [Submit a bug report or a feature request](https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md#submit-a-bug-report-or-a-feature-request) section for bug reporting guidelines. ### Support, discussions and forums From 11def58e7eb20af00bad7ef1946048d2a7d3df88 Mon Sep 17 00:00:00 2001 From: Anastasia Alexadrova Date: Thu, 13 Jan 2022 16:47:00 +0200 Subject: [PATCH 35/39] PG-308 README Updates Added a link to pg_stat_monitor view reference in the Overview Aded PG 14 to supported versions Updated links in the Documentation section modified: README.md --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 9b0a19a..9aefd37 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ **NOTE**: This is a beta release and is subject to further changes. We recommend using it in testing environments only. -The `pg_stat_monitor` is a **_Query Performance Monitoring_** tool for PostgreSQL. It attempts to provide a more holistic picture by providing much-needed query performance insights in a single view. +The `pg_stat_monitor` is a **_Query Performance Monitoring_** tool for PostgreSQL. It attempts to provide a more holistic picture by providing much-needed query performance insights in a [single view](https://github.com/percona/pg_stat_monitor/blob/master/docs/REFERENCE.md). `pg_stat_monitor` provides improved insights that allow database users to understand query origins, execution, planning statistics and details, query information, and metadata. This significantly improves observability, enabling users to debug and tune query performance. `pg_stat_monitor` is developed on the basis of `pg_stat_statements` as its more advanced replacement. @@ -39,7 +39,7 @@ To learn about other features, available in `pg_stat_monitor`, see the [Features `pg_stat_monitor` supports PostgreSQL versions 11 and above. It is compatible with both PostgreSQL provided by PostgreSQL Global Development Group (PGDG) and [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution). -The RPM (for RHEL and CentOS) and the DEB (for Debian and Ubuntu) packages are available from Percona repositories for PostgreSQL versions [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/), and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/). +The `RPM` (for RHEL and CentOS) and the `DEB` (for Debian and Ubuntu) packages are available from Percona repositories for PostgreSQL versions [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/), and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/). The RPM packages are also available in the official PostgreSQL (PGDG) yum repositories. @@ -49,8 +49,8 @@ The `pg_stat_monitor` should work on the latest version of both [Percona Distrib | **Distribution** | **Version** | **Provider** | | ---------------- | --------------- | ------------ | -|[Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution)| [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/) and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/)| Percona| -| PostgreSQL | 11, 12, and 13 | PostgreSQL Global Development Group (PGDG) | +|[Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution)| [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/), [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/) and [14](https://www.percona.com/downloads/postgresql-distribution-14/LATEST/)| Percona| +| PostgreSQL | 11, 12, 13 and 14 | PostgreSQL Global Development Group (PGDG) | ### Features @@ -69,10 +69,10 @@ The `pg_stat_monitor` should work on the latest version of both [Percona Distrib ### Documentation 1. [User guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md) -2. pg_stat_monitor vs pg_stat_statements -3. pg_stat_monitor view reference +2. [Comparing `pg_stat_monitor` and `pg_stat_statements`](https://github.com/percona/pg_stat_monitor/blob/master/docs/COMPARISON.md) +3. [pg_stat_monitor view reference](https://github.com/percona/pg_stat_monitor/blob/master/docs/REFERENCE.md) 4. [Release notes](https://github.com/percona/pg_stat_monitor/blob/master/docs/RELEASE_NOTES.md) -5. Contributing guide (https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) +5. [Contributing guide](https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) ### Supported platforms From 0573be4090925b31c327584979cf1afdcb6a4c6d Mon Sep 17 00:00:00 2001 From: Diego Fronza Date: Wed, 12 Jan 2022 17:57:07 -0300 Subject: [PATCH 36/39] PG-329: Fix creation of pg_stat_monitor_errors view on SQL files. After the split into multiple pg_stat_monitor--1.0.XX.sql.in sql files, where XX is the PostgreSQL version, it was forgotten to add the errors view to the relevant files, this commit fixes that. --- pg_stat_monitor--1.0.13.sql.in | 35 +++++++++++++++++++++++++++++++++- pg_stat_monitor--1.0.14.sql.in | 35 +++++++++++++++++++++++++++++++++- 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/pg_stat_monitor--1.0.13.sql.in b/pg_stat_monitor--1.0.13.sql.in index 662e9a6..864b617 100644 --- a/pg_stat_monitor--1.0.13.sql.in +++ b/pg_stat_monitor--1.0.13.sql.in @@ -257,9 +257,42 @@ $$ language plpgsql; -- total_time / greatest(ncalls, 1) as avg_time, -- ncalls, -- ROUND(CAST(total_time / greatest(sum(total_time) OVER(), 0.00000001) * 100 as numeric), 2)::text || '%' as load_comparison ---FROM pg_stat_monitor_hook_stats(); +-- FROM pg_stat_monitor_hook_stats(); + +CREATE FUNCTION pg_stat_monitor_errors( + OUT severity int, + OUT message text, + OUT msgtime text, + OUT calls int8 +) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pg_stat_monitor_errors' +LANGUAGE C STRICT VOLATILE PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION pgsm_log_severity_as_text(severity int) RETURNS TEXT AS +$$ +SELECT + CASE + WHEN severity = 0 THEN 'INFO' + WHEN severity = 1 THEN 'WARNING' + WHEN severity = 2 THEN 'ERROR' + END +$$ +LANGUAGE SQL PARALLEL SAFE; + +CREATE VIEW pg_stat_monitor_errors AS SELECT + pgsm_log_severity_as_text(severity) as severity, message, msgtime, calls +FROM pg_stat_monitor_errors(); + +CREATE FUNCTION pg_stat_monitor_reset_errors() +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C PARALLEL SAFE; GRANT SELECT ON pg_stat_monitor TO PUBLIC; GRANT SELECT ON pg_stat_monitor_settings TO PUBLIC; + +GRANT SELECT ON pg_stat_monitor_errors TO PUBLIC; -- Don't want this to be available to non-superusers. REVOKE ALL ON FUNCTION pg_stat_monitor_reset() FROM PUBLIC; +REVOKE ALL ON FUNCTION pg_stat_monitor_reset_errors() FROM PUBLIC; diff --git a/pg_stat_monitor--1.0.14.sql.in b/pg_stat_monitor--1.0.14.sql.in index f9cc22f..08f13b9 100644 --- a/pg_stat_monitor--1.0.14.sql.in +++ b/pg_stat_monitor--1.0.14.sql.in @@ -258,9 +258,42 @@ $$ language plpgsql; -- total_time / greatest(ncalls, 1) as avg_time, -- ncalls, -- ROUND(CAST(total_time / greatest(sum(total_time) OVER(), 0.00000001) * 100 as numeric), 2)::text || '%' as load_comparison ---FROM pg_stat_monitor_hook_stats(); +-- FROM pg_stat_monitor_hook_stats(); + +CREATE FUNCTION pg_stat_monitor_errors( + OUT severity int, + OUT message text, + OUT msgtime text, + OUT calls int8 +) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pg_stat_monitor_errors' +LANGUAGE C STRICT VOLATILE PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION pgsm_log_severity_as_text(severity int) RETURNS TEXT AS +$$ +SELECT + CASE + WHEN severity = 0 THEN 'INFO' + WHEN severity = 1 THEN 'WARNING' + WHEN severity = 2 THEN 'ERROR' + END +$$ +LANGUAGE SQL PARALLEL SAFE; + +CREATE VIEW pg_stat_monitor_errors AS SELECT + pgsm_log_severity_as_text(severity) as severity, message, msgtime, calls +FROM pg_stat_monitor_errors(); + +CREATE FUNCTION pg_stat_monitor_reset_errors() +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C PARALLEL SAFE; GRANT SELECT ON pg_stat_monitor TO PUBLIC; GRANT SELECT ON pg_stat_monitor_settings TO PUBLIC; + +GRANT SELECT ON pg_stat_monitor_errors TO PUBLIC; -- Don't want this to be available to non-superusers. REVOKE ALL ON FUNCTION pg_stat_monitor_reset() FROM PUBLIC; +REVOKE ALL ON FUNCTION pg_stat_monitor_reset_errors() FROM PUBLIC; From 5aa6764041cbf5cdf4536b861dd0aa8706827c8b Mon Sep 17 00:00:00 2001 From: Naeem Akhter Date: Wed, 19 Jan 2022 18:04:02 +0500 Subject: [PATCH 37/39] PG-267 Add testcase to test histogram. This commit adds following three sql based testcases: 1) Test unique application name set by user. 2) Histogram function is working properly as desired. 3) Error on insert is shown with proper message. --- Makefile | 2 +- .../expected/application_name_unique.out | 38 ++++++++++ regression/expected/error_insert.out | 34 +++++++++ regression/expected/error_insert_1.out | 34 +++++++++ regression/expected/histogram.out | 71 +++++++++++++++++++ regression/expected/histogram_1.out | 71 +++++++++++++++++++ regression/sql/application_name_unique.sql | 9 +++ regression/sql/error_insert.sql | 17 +++++ regression/sql/histogram.sql | 55 ++++++++------ 9 files changed, 308 insertions(+), 23 deletions(-) create mode 100644 regression/expected/application_name_unique.out create mode 100644 regression/expected/error_insert.out create mode 100644 regression/expected/error_insert_1.out create mode 100644 regression/expected/histogram_1.out create mode 100644 regression/sql/application_name_unique.sql create mode 100644 regression/sql/error_insert.sql diff --git a/Makefile b/Makefile index 02cdb99..1b2a38d 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ PGFILEDESC = "pg_stat_monitor - execution statistics of SQL statements" LDFLAGS_SL += $(filter -lm, $(LIBS)) REGRESS_OPTS = --temp-config $(top_srcdir)/contrib/pg_stat_monitor/pg_stat_monitor.conf --inputdir=regression -REGRESS = basic version guc counters relations database top_query application_name cmd_type error state rows tags +REGRESS = basic version guc counters relations database error_insert application_name application_name_unique top_query cmd_type error rows tags histogram # Disabled because these tests require "shared_preload_libraries=pg_stat_statements", # which typical installcheck users do not have (e.g. buildfarm clients). diff --git a/regression/expected/application_name_unique.out b/regression/expected/application_name_unique.out new file mode 100644 index 0000000..15ba62f --- /dev/null +++ b/regression/expected/application_name_unique.out @@ -0,0 +1,38 @@ +Create EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +Set application_name = 'naeem' ; +SELECT 1 AS num; + num +----- + 1 +(1 row) + +Set application_name = 'psql' ; +SELECT 1 AS num; + num +----- + 1 +(1 row) + +SELECT query,application_name FROM pg_stat_monitor ORDER BY query, application_name COLLATE "C"; + query | application_name +--------------------------------+------------------------------------ + SELECT $1 AS num | naeem + SELECT $1 AS num | psql + SELECT pg_stat_monitor_reset() | pg_regress/application_name_unique + Set application_name = 'naeem' | naeem + Set application_name = 'psql' | psql +(5 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +DROP EXTENSION pg_stat_monitor; diff --git a/regression/expected/error_insert.out b/regression/expected/error_insert.out new file mode 100644 index 0000000..19a1d89 --- /dev/null +++ b/regression/expected/error_insert.out @@ -0,0 +1,34 @@ +Drop Table if exists Company; +NOTICE: table "company" does not exist, skipping +CREATE TABLE Company( + ID INT PRIMARY KEY NOT NULL, + NAME TEXT NOT NULL +); +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); +ERROR: duplicate key value violates unique constraint "company_pkey" +DETAIL: Key (id)=(1) already exists. +Drop Table if exists Company; +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; + query | elevel | sqlcode | message +-------------------------------------------------------+--------+---------+--------------------------------------------------------------- + Drop Table if exists Company | 0 | | + INSERT INTO Company(ID, Name) VALUES ($1, $2) | 0 | | + INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); | 21 | 23505 | duplicate key value violates unique constraint "company_pkey" + SELECT pg_stat_monitor_reset() | 0 | | +(4 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +DROP EXTENSION pg_stat_monitor; diff --git a/regression/expected/error_insert_1.out b/regression/expected/error_insert_1.out new file mode 100644 index 0000000..c706613 --- /dev/null +++ b/regression/expected/error_insert_1.out @@ -0,0 +1,34 @@ +Drop Table if exists Company; +NOTICE: table "company" does not exist, skipping +CREATE TABLE Company( + ID INT PRIMARY KEY NOT NULL, + NAME TEXT NOT NULL +); +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); +ERROR: duplicate key value violates unique constraint "company_pkey" +DETAIL: Key (id)=(1) already exists. +Drop Table if exists Company; +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; + query | elevel | sqlcode | message +-------------------------------------------------------+--------+---------+--------------------------------------------------------------- + Drop Table if exists Company | 0 | | + INSERT INTO Company(ID, Name) VALUES ($1, $2) | 0 | | + INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); | 20 | 23505 | duplicate key value violates unique constraint "company_pkey" + SELECT pg_stat_monitor_reset() | 0 | | +(4 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +DROP EXTENSION pg_stat_monitor; diff --git a/regression/expected/histogram.out b/regression/expected/histogram.out index e69de29..40e7fe5 100644 --- a/regression/expected/histogram.out +++ b/regression/expected/histogram.out @@ -0,0 +1,71 @@ +CREATE OR REPLACE FUNCTION generate_histogram() + RETURNS TABLE ( + range TEXT, freq INT, bar TEXT + ) AS $$ +Declare + bucket_id integer; + query_id text; +BEGIN + select bucket into bucket_id from pg_stat_monitor order by calls desc limit 1; + select queryid into query_id from pg_stat_monitor order by calls desc limit 1; + --RAISE INFO 'bucket_id %', bucket_id; + --RAISE INFO 'query_id %', query_id; + return query + SELECT * FROM histogram(bucket_id, query_id) AS a(range TEXT, freq INT, bar TEXT); +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION run_pg_sleep(INTEGER) RETURNS VOID AS $$ +DECLARE + loops ALIAS FOR $1; +BEGIN + FOR i IN 1..loops LOOP + --RAISE INFO 'Current timestamp: %', timeofday()::TIMESTAMP; + RAISE INFO 'Sleep % seconds', i; + PERFORM pg_sleep(i); + END LOOP; +END; +$$ LANGUAGE 'plpgsql' STRICT; +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +Set pg_stat_monitor.track='all'; +select run_pg_sleep(5); +INFO: Sleep 1 seconds +INFO: Sleep 2 seconds +INFO: Sleep 3 seconds +INFO: Sleep 4 seconds +INFO: Sleep 5 seconds + run_pg_sleep +-------------- + +(1 row) + +SELECT substr(query, 0,50) as query, calls, resp_calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; + query | calls | resp_calls +---------------------------------+-------+----------------------- + SELECT pg_sleep(i) | 5 | {0,0,0,0,0,0,3,2,0,0} + SELECT pg_stat_monitor_reset() | 1 | {1,0,0,0,0,0,0,0,0,0} + Set pg_stat_monitor.track='all' | 1 | {1,0,0,0,0,0,0,0,0,0} + select run_pg_sleep($1) | 1 | {0,0,0,0,0,0,0,0,1,0} +(4 rows) + +select * from generate_histogram(); + range | freq | bar +--------------------+------+-------------------------------------------------------------------------------------------- + (0 - 3)} | 0 | + (3 - 10)} | 0 | + (10 - 31)} | 0 | + (31 - 100)} | 0 | + (100 - 316)} | 0 | + (316 - 1000)} | 0 | + (1000 - 3162)} | 3 | ■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ + (3162 - 10000)} | 2 | ■■■■■■■■■■■■■■■■■■■■ + (10000 - 31622)} | 0 | + (31622 - 100000)} | 0 | +(10 rows) + +DROP EXTENSION pg_stat_monitor; diff --git a/regression/expected/histogram_1.out b/regression/expected/histogram_1.out new file mode 100644 index 0000000..f926990 --- /dev/null +++ b/regression/expected/histogram_1.out @@ -0,0 +1,71 @@ +CREATE OR REPLACE FUNCTION generate_histogram() + RETURNS TABLE ( + range TEXT, freq INT, bar TEXT + ) AS $$ +Declare + bucket_id integer; + query_id text; +BEGIN + select bucket into bucket_id from pg_stat_monitor order by calls desc limit 1; + select queryid into query_id from pg_stat_monitor order by calls desc limit 1; + --RAISE INFO 'bucket_id %', bucket_id; + --RAISE INFO 'query_id %', query_id; + return query + SELECT * FROM histogram(bucket_id, query_id) AS a(range TEXT, freq INT, bar TEXT); +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION run_pg_sleep(INTEGER) RETURNS VOID AS $$ +DECLARE + loops ALIAS FOR $1; +BEGIN + FOR i IN 1..loops LOOP + --RAISE INFO 'Current timestamp: %', timeofday()::TIMESTAMP; + RAISE INFO 'Sleep % seconds', i; + PERFORM pg_sleep(i); + END LOOP; +END; +$$ LANGUAGE 'plpgsql' STRICT; +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +Set pg_stat_monitor.track='all'; +select run_pg_sleep(5); +INFO: Sleep 1 seconds +INFO: Sleep 2 seconds +INFO: Sleep 3 seconds +INFO: Sleep 4 seconds +INFO: Sleep 5 seconds + run_pg_sleep +-------------- + +(1 row) + +SELECT substr(query, 0,50) as query, calls, resp_calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; + query | calls | resp_calls +---------------------------------+-------+----------------------- + SELECT pg_sleep(i) | 5 | {0,0,0,0,0,0,3,2,0,0} + SELECT pg_stat_monitor_reset() | 1 | {1,0,0,0,0,0,0,0,0,0} + Set pg_stat_monitor.track='all' | 1 | {1,0,0,0,0,0,0,0,0,0} + select run_pg_sleep($1) | 1 | {0,0,0,0,0,0,0,0,1,0} +(4 rows) + +select * from generate_histogram(); + range | freq | bar +--------------------+------+-------------------------------- + (0 - 3)} | 0 | + (3 - 10)} | 0 | + (10 - 31)} | 0 | + (31 - 100)} | 0 | + (100 - 316)} | 0 | + (316 - 1000)} | 0 | + (1000 - 3162)} | 3 | ■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ + (3162 - 10000)} | 2 | ■■■■■■■■■■■■■■■■■■■■ + (10000 - 31622)} | 0 | + (31622 - 100000)} | 0 | +(10 rows) + +DROP EXTENSION pg_stat_monitor; diff --git a/regression/sql/application_name_unique.sql b/regression/sql/application_name_unique.sql new file mode 100644 index 0000000..49a1d55 --- /dev/null +++ b/regression/sql/application_name_unique.sql @@ -0,0 +1,9 @@ +Create EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); +Set application_name = 'naeem' ; +SELECT 1 AS num; +Set application_name = 'psql' ; +SELECT 1 AS num; +SELECT query,application_name FROM pg_stat_monitor ORDER BY query, application_name COLLATE "C"; +SELECT pg_stat_monitor_reset(); +DROP EXTENSION pg_stat_monitor; diff --git a/regression/sql/error_insert.sql b/regression/sql/error_insert.sql new file mode 100644 index 0000000..14f21f1 --- /dev/null +++ b/regression/sql/error_insert.sql @@ -0,0 +1,17 @@ +Drop Table if exists Company; + +CREATE TABLE Company( + ID INT PRIMARY KEY NOT NULL, + NAME TEXT NOT NULL +); + + +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); +INSERT INTO Company(ID, Name) VALUES (1, 'Percona'); + +Drop Table if exists Company; +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; +SELECT pg_stat_monitor_reset(); +DROP EXTENSION pg_stat_monitor; diff --git a/regression/sql/histogram.sql b/regression/sql/histogram.sql index 50aa897..92960df 100644 --- a/regression/sql/histogram.sql +++ b/regression/sql/histogram.sql @@ -1,28 +1,39 @@ +CREATE OR REPLACE FUNCTION generate_histogram() + RETURNS TABLE ( + range TEXT, freq INT, bar TEXT + ) AS $$ +Declare + bucket_id integer; + query_id text; +BEGIN + select bucket into bucket_id from pg_stat_monitor order by calls desc limit 1; + select queryid into query_id from pg_stat_monitor order by calls desc limit 1; + --RAISE INFO 'bucket_id %', bucket_id; + --RAISE INFO 'query_id %', query_id; + return query + SELECT * FROM histogram(bucket_id, query_id) AS a(range TEXT, freq INT, bar TEXT); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION run_pg_sleep(INTEGER) RETURNS VOID AS $$ +DECLARE + loops ALIAS FOR $1; +BEGIN + FOR i IN 1..loops LOOP + --RAISE INFO 'Current timestamp: %', timeofday()::TIMESTAMP; + RAISE INFO 'Sleep % seconds', i; + PERFORM pg_sleep(i); + END LOOP; +END; +$$ LANGUAGE 'plpgsql' STRICT; + CREATE EXTENSION pg_stat_monitor; - -CREATE TABLE t1(a int); - SELECT pg_stat_monitor_reset(); +Set pg_stat_monitor.track='all'; +select run_pg_sleep(5); -INSERT INTO t1 VALUES(generate_series(1,10)); -ANALYZE t1; -SELECT count(*) FROM t1; +SELECT substr(query, 0,50) as query, calls, resp_calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; -INSERT INTO t1 VALUES(generate_series(1,10000)); -ANALYZE t1; -SELECT count(*) FROM t1;; +select * from generate_histogram(); -INSERT INTO t1 VALUES(generate_series(1,1000000)); -ANALYZE t1; -SELECT count(*) FROM t1; - -INSERT INTO t1 VALUES(generate_series(1,10000000)); -ANALYZE t1; -SELECT count(*) FROM t1; - -SELECT query, calls, min_time, max_time, resp_calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; -SELECT * FROM histogram(0, 'F44CD1B4B33A47AF') AS a(range TEXT, freq INT, bar TEXT); - -DROP TABLE t1; -SELECT pg_stat_monitor_reset(); DROP EXTENSION pg_stat_monitor; From 482744e77bf193effcc14ef365b47d13980bc726 Mon Sep 17 00:00:00 2001 From: Lenz Grimmer Date: Wed, 19 Jan 2022 15:35:35 +0100 Subject: [PATCH 38/39] docs: Updated Forum URL, removed Discord link Updated the Forum URL to point to the dedicated pg_stat_monitor forum in `README.md` and `CONTRIBUTING.md`, removed link to the Discord channel to ensure that conversations are focused to one location. Updated the table of contents in the README. Signed-off-by: Lenz Grimmer --- CONTRIBUTING.md | 2 +- README.md | 35 ++++++++++++++++++++--------------- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f8ee2e8..15a347a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,7 +6,7 @@ We're glad that you would like to become a Percona community member and particip You can contribute in one of the following ways: -1. Reach us on our [Forums](https://forums.percona.com/) and Discord. +1. Reach us on our [Forums](https://forums.percona.com/c/postgresql/pg-stat-monitor/69). 2. [Submit a bug report or a feature request](#submit-a-bug-report-or-a-feature-request) 3. [Submit a pull request (PR) with the code patch](#submit-a-pull-request) 4. [Contribute to documentation](#contributing-to-documentation) diff --git a/README.md b/README.md index 9aefd37..eb6b689 100644 --- a/README.md +++ b/README.md @@ -10,20 +10,25 @@ ## Table of Contents -* [Overview](#overview) -* [Supported versions](#supported-versions) -* [Features](#features) -* [Documentation](#documentation) -* [Supported platforms](#supported-platforms) -* [Installation guidelines](#installation-guidelines) -* [Configuration](#configuration) -* [Setup](#setup) -* [Building from source code](#building-from-source) -* [How to contribute](#how-to-contribute) -* [Report a bug](#report-a-bug) -* [Support, discussions and forums](#support-discussions-and-forums) -* [License](#license) -* [Copyright notice](#copyright-notice) +- [pg_stat_monitor: Query Performance Monitoring Tool for PostgreSQL](#pg_stat_monitor-query-performance-monitoring-tool-for-postgresql) + - [Table of Contents](#table-of-contents) + - [Overview](#overview) + - [Supported versions](#supported-versions) + - [Features](#features) + - [Documentation](#documentation) + - [Supported platforms](#supported-platforms) + - [Installation guidelines](#installation-guidelines) + - [Installing from Percona repositories](#installing-from-percona-repositories) + - [Installing from PostgreSQL `yum` repositories](#installing-from-postgresql-yum-repositories) + - [Installing from PGXN](#installing-from-pgxn) + - [Configuration](#configuration) + - [Setup](#setup) + - [Building from source](#building-from-source) + - [How to contribute](#how-to-contribute) + - [Report a bug](#report-a-bug) + - [Support, discussions and forums](#support-discussions-and-forums) + - [License](#license) + - [Copyright notice](#copyright-notice) ## Overview @@ -248,7 +253,7 @@ Refer to the [Submit a bug report or a feature request](https://github.com/perco ### Support, discussions and forums -We welcome your feedback on your experience with `pg_stat_monitor`. Join our [technical forum](https://forums.percona.com/) or [Discord](https://discord.gg/mQEyGPkNbR) channel for help with `pg_stat_monitor` and Percona's open source software for MySQL®, [PostgreSQL](https://www.percona.com/software/postgresql-distribution), and MongoDB® databases. +We welcome your feedback on your experience with `pg_stat_monitor`. Join our [technical forum](https://forums.percona.com/c/postgresql/pg-stat-monitor/69) for help with `pg_stat_monitor`. ### License From 781282364c6f13ad9015cf9d7065dce9209c1eb4 Mon Sep 17 00:00:00 2001 From: Anastasia Alexadrova Date: Thu, 20 Jan 2022 11:12:46 +0200 Subject: [PATCH 39/39] PG-287 Added uninstall steps to README modified: README.md --- README.md | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/README.md b/README.md index eb6b689..9c93e8f 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ - [Configuration](#configuration) - [Setup](#setup) - [Building from source](#building-from-source) + - [Uninstall `pg_stat_monitor`](#uninstall-pg_stat_monitor) - [How to contribute](#how-to-contribute) - [Report a bug](#report-a-bug) - [Support, discussions and forums](#support-discussions-and-forums) @@ -238,6 +239,45 @@ make USE_PGXS=1 make USE_PGXS=1 install ``` +### Uninstall `pg_stat_monitor` + +To uninstall `pg_stat_monitor`, do the following: + +1. Disable statistics collection. From the `psql` terminal, run the following command: + + ```sql + ALTER SYSTEM SET pg_stat_monitor.pgsm_enable = 0; + ``` + +2. Drop `pg_stat_monitor` extension: + + ```sql + DROP EXTENSION pg_stat_monitor; + ``` + +3. Remove `pg_stat_monitor` from the `shared_preload_libraries` configuration parameter: + + ```sql + ALTER SYSTEM SET shared_preload_libraries = ''; + ``` + + **Important**: If the `shared_preload_libraries` parameter includes other modules, specify them all for the `ALTER SYSTEM SET` command to keep using them. + +4. Restart the `postgresql` instance to apply the changes. The following command restarts PostgreSQL 13. Replace the version value with the one you are using. + + * On Debian and Ubuntu: + + ```sh + sudo systemctl restart postgresql.service + ``` + + * On Red Hat Enterprise Linux and CentOS: + + + ```sh + sudo systemctl restart postgresql-13 + ``` + ### How to contribute We welcome and strongly encourage community participation and contributions, and are always looking for new members that are as dedicated to serving the community as we are.