diff --git a/.github/workflows/coverage_test.yml b/.github/workflows/coverage_test.yml index 55304c4..41429d9 100644 --- a/.github/workflows/coverage_test.yml +++ b/.github/workflows/coverage_test.yml @@ -22,7 +22,9 @@ jobs: run: | sudo apt-get update sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl git lcov -y + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y docbook-xsl docbook-xsl + sudo apt-get install -y libxml2 libxml2-utils libxml2-dev libxslt-dev xsltproc libkrb5-dev libldap2-dev libsystemd-dev gettext tcl-dev libperl-dev + sudo apt-get install -y pkg-config clang-9 llvm-9 llvm-9-dev libselinux1-dev python-dev python3-dev uuid-dev liblz4-dev lcov sudo rm -rf /var/lib/postgresql/ sudo rm -rf /var/log/postgresql/ sudo rm -rf /etc/postgresql/ @@ -37,27 +39,54 @@ jobs: - name: Build postgres run: | export PATH="/opt/pgsql/bin:$PATH" - ./configure --enable-coverage --enable-tap-tests --prefix=/opt/pgsql + ./configure '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=${prefix}/include' \ + '--enable-coverage' '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' \ + '--sysconfdir=/etc' '--localstatedir=/var' '--disable-silent-rules' \ + '--libdir=${prefix}/lib/x86_64-linux-gnu' \ + '--libexecdir=${prefix}/lib/x86_64-linux-gnu' '--disable-maintainer-mode' \ + '--disable-dependency-tracking' '--with-icu' '--with-tcl' '--with-perl' \ + '--with-python' '--with-pam' '--with-openssl' '--with-libxml' '--with-libxslt' \ + 'PYTHON=/usr/bin/python3' '--mandir=/usr/share/postgresql/13/man' \ + '--docdir=/usr/share/doc/postgresql-doc-13' \ + '--sysconfdir=/etc/postgresql-common' '--datarootdir=/usr/share/' \ + '--datadir=/usr/share/postgresql/13' '--bindir=/usr/lib/postgresql/13/bin' \ + '--libdir=/usr/lib/x86_64-linux-gnu/' '--libexecdir=/usr/lib/postgresql/' \ + '--includedir=/usr/include/postgresql/' '--with-extra-version= (Ubuntu 2:13-x.focal)' \ + '--enable-nls' '--enable-thread-safety' '--enable-tap-tests' '--enable-debug' \ + '--enable-dtrace' '--disable-rpath' '--with-uuid=e2fs' '--with-gnu-ld' \ + '--with-pgport=5432' '--with-system-tzdata=/usr/share/zoneinfo' '--with-llvm' \ + 'LLVM_CONFIG=/usr/bin/llvm-config-11' 'CLANG=/usr/bin/clang-11' \ + '--with-systemd' '--with-selinux' 'MKDIR_P=/bin/mkdir -p' 'PROVE=/usr/bin/prove' \ + 'TAR=/bin/tar' 'XSLTPROC=xsltproc --nonet' 'CFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security -fno-omit-frame-pointer' \ + 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now' '--with-gssapi' '--with-ldap' \ + 'build_alias=x86_64-linux-gnu' 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' \ + 'CXXFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security' make - make install + sudo make install + - name: Start postgresql cluster run: | - export PATH="/opt/pgsql/bin:$PATH" - /opt/pgsql/bin/initdb -D /opt/pgsql/data - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start + /usr/lib/postgresql/13/bin/initdb -D /opt/pgsql/data + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile start + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + - name: Build pg_stat_monitor run: | - export PATH="/opt/pgsql/bin:$PATH" - sudo cp /opt/pgsql/bin/pg_config /usr/bin + export PATH="/usr/lib/postgresql/13/bin:$PATH" + sudo cp /usr/lib/postgresql/13/bin/pg_config /usr/bin make USE_PGXS=1 - make USE_PGXS=1 install + sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ - name: Start pg_stat_monitor_tests & Run code coverage run: | - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile stop + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile stop echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile start make installcheck make coverage-html lcov --capture --directory . --output-file coverage/lcov.info diff --git a/.github/workflows/pg11packagetest.yml b/.github/workflows/pg11packagetest.yml new file mode 100644 index 0000000..71913bc --- /dev/null +++ b/.github/workflows/pg11packagetest.yml @@ -0,0 +1,60 @@ +name: pg11package-test +on: [push] + +jobs: + build: + name: pg11package-test + runs-on: ubuntu-20.04 + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl wget -y + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install postgresql 11 + run: | + sudo wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg main" >> /etc/apt/sources.list.d/pgdg.list' + sudo apt update + sudo apt -y install postgresql-11 postgresql-server-dev-11 + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Change sources owner to postgres + run: sudo chown -R postgres:postgres src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/11/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg11test-pgdg-packages.yml b/.github/workflows/pg11test-pgdg-packages.yml new file mode 100644 index 0000000..ad8ef4d --- /dev/null +++ b/.github/workflows/pg11test-pgdg-packages.yml @@ -0,0 +1,63 @@ +name: Test-with-pg11-pgdg-packages +on: [push] + +jobs: + build: + name: pg11-test-with-pgdg-packages + runs-on: ubuntu-18.04 + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install PG Distribution Postgresql 11 + run: | + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get update + sudo apt-get -y install postgresql-11 + sudo apt-get update + sudo apt-get -y install postgresql-client-11 + sudo apt-get update + sudo apt install postgresql-server-dev-11 + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + export PATH="/usr/lib/postgresql/11/bin:$PATH" + sudo cp /usr/lib/postgresql/11/bin/pg_config /usr/bin + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/11/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg11test.yml b/.github/workflows/pg11test.yml index 57ea811..a1f7e53 100644 --- a/.github/workflows/pg11test.yml +++ b/.github/workflows/pg11test.yml @@ -1,10 +1,10 @@ -name: pg11-test +name: Test-with-pg11-build on: [push] jobs: build: name: pg11-test - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 steps: - name: Clone postgres repository uses: actions/checkout@v2 @@ -12,16 +12,13 @@ jobs: repository: 'postgres/postgres' ref: 'REL_11_STABLE' - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - name: Install dependencies run: | sudo apt-get update sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y docbook-xsl docbook-xsl + sudo apt-get install -y libxml2 libxml2-utils libxml2-dev libxslt-dev xsltproc libkrb5-dev libldap2-dev libsystemd-dev gettext tcl-dev libperl-dev + sudo apt-get install -y pkg-config clang-9 llvm-9 llvm-9-dev libselinux1-dev python-dev python3-dev uuid-dev liblz4-dev sudo rm -rf /var/lib/postgresql/ sudo rm -rf /var/log/postgresql/ sudo rm -rf /etc/postgresql/ @@ -37,34 +34,76 @@ jobs: - name: Build postgres run: | export PATH="/opt/pgsql/bin:$PATH" - ./configure --enable-tap-tests --prefix=/opt/pgsql - make - make install + ./configure '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=/usr/include' \ + '--mandir=/usr/share/man' '--infodir=/usr/share/info' '--sysconfdir=/etc' \ + '--localstatedir=/var' '--disable-silent-rules' '--libdir=/usr/lib/x86_64-linux-gnu' \ + 'runstatedir=/run' '--disable-maintainer-mode' '--disable-dependency-tracking' \ + '--with-icu' '--with-tcl' '--with-perl' '--with-python' '--with-pam' '--with-openssl' \ + '--with-libxml' '--with-libxslt' 'PYTHON=/usr/bin/python3' \ + '--mandir=/usr/share/postgresql/11/man' '--docdir=/usr/share/doc/postgresql-doc-11' \ + '--sysconfdir=/etc/postgresql-common' '--datarootdir=/usr/share/' \ + '--datadir=/usr/share/postgresql/11' '--bindir=/usr/lib/postgresql/11/bin' \ + '--libdir=/usr/lib/x86_64-linux-gnu/' '--libexecdir=/usr/lib/postgresql/' \ + '--includedir=/usr/include/postgresql/' '--with-extra-version= (Ubuntu 11.x.pgdg20.04+1)' \ + '--enable-nls' '--enable-thread-safety' '--enable-tap-tests' '--enable-debug' \ + '--enable-dtrace' '--disable-rpath' '--with-uuid=e2fs' '--with-gnu-ld' \ + '--with-pgport=5432' '--with-system-tzdata=/usr/share/zoneinfo' '--with-llvm' \ + 'LLVM_CONFIG=/usr/bin/llvm-config-9' 'CLANG=/usr/bin/clang-9' '--with-systemd' \ + '--with-selinux' 'MKDIR_P=/bin/mkdir -p' 'PROVE=/usr/bin/prove' 'TAR=/bin/tar' \ + 'CFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security -fno-omit-frame-pointer' \ + 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now' '--with-gssapi' '--with-ldap' \ + '--with-includes=/usr/include/mit-krb5' '--with-libs=/usr/lib/mit-krb5' \ + '--with-libs=/usr/lib/x86_64-linux-gnu/mit-krb5' 'build_alias=x86_64-linux-gnu' \ + 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' 'CXXFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security' + make world + sudo make install-world - name: Start postgresql cluster run: | - export PATH="/opt/pgsql/bin:$PATH" - /opt/pgsql/bin/initdb -D /opt/pgsql/data - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start + /usr/lib/postgresql/11/bin/initdb -D /opt/pgsql/data + /usr/lib/postgresql/11/bin/pg_ctl -D /opt/pgsql/data -l logfile start + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' - name: Build pg_stat_monitor run: | - export PATH="/opt/pgsql/bin:$PATH" - sudo cp /opt/pgsql/bin/pg_config /usr/bin + export PATH="/usr/lib/postgresql/11/bin:$PATH" + sudo cp /usr/lib/postgresql/11/bin/pg_config /usr/bin make USE_PGXS=1 - make USE_PGXS=1 install + sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ + - name: Load pg_stat_monitor library and Restart Server + run: | + /usr/lib/postgresql/11/bin/pg_ctl -D /opt/pgsql/data -l logfile stop + echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf + /usr/lib/postgresql/11/bin/pg_ctl -D /opt/pgsql/data -l logfile start + working-directory: src/pg_stat_monitor/ + + - name: Start Server installcheck-world tests (without TAP) + run: | + make installcheck-world + + - name: Report on installcheck-world test suites fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions output files of failed testsuite, and postgresql log + path: | + **/regression.diffs + **/regression.out + src/pg_stat_monitor/logfile + retention-days: 1 - name: Start pg_stat_monitor_tests run: | - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile stop - echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start make installcheck working-directory: src/pg_stat_monitor/ - - name: Report on test fail + - name: Report on pg_stat_monitor test fail uses: actions/upload-artifact@v2 if: ${{ failure() }} with: diff --git a/.github/workflows/pg12packagetest.yml b/.github/workflows/pg12packagetest.yml new file mode 100644 index 0000000..d8207a3 --- /dev/null +++ b/.github/workflows/pg12packagetest.yml @@ -0,0 +1,60 @@ +name: pg12package-test +on: [push] + +jobs: + build: + name: pg12package-test + runs-on: ubuntu-20.04 + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl wget -y + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install postgresql 12 + run: | + sudo wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg main" >> /etc/apt/sources.list.d/pgdg.list' + sudo apt update + sudo apt -y install postgresql-12 postgresql-server-dev-12 + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Change sources owner to postgres + run: sudo chown -R postgres:postgres src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/12/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg12test-pgdg-packages.yml b/.github/workflows/pg12test-pgdg-packages.yml new file mode 100644 index 0000000..274e082 --- /dev/null +++ b/.github/workflows/pg12test-pgdg-packages.yml @@ -0,0 +1,63 @@ +name: Test-with-pg12-pgdg-packages +on: [push] + +jobs: + build: + name: pg12-test-with-pgdg-packages + runs-on: ubuntu-latest + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install PG Distribution Postgresql 12 + run: | + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get update + sudo apt-get -y install postgresql-12 + sudo apt-get update + sudo apt-get -y install postgresql-client-12 + sudo apt-get update + sudo apt install postgresql-server-dev-12 + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + export PATH="/usr/lib/postgresql/12/bin:$PATH" + sudo cp /usr/lib/postgresql/12/bin/pg_config /usr/bin + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/12/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg12test.yml b/.github/workflows/pg12test.yml index 1eee4c2..919614b 100644 --- a/.github/workflows/pg12test.yml +++ b/.github/workflows/pg12test.yml @@ -1,4 +1,4 @@ -name: pg12-test +name: Test-with-pg12-build on: [push] jobs: @@ -12,16 +12,13 @@ jobs: repository: 'postgres/postgres' ref: 'REL_12_STABLE' - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - name: Install dependencies run: | sudo apt-get update sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y docbook-xsl docbook-xsl + sudo apt-get install -y libxml2 libxml2-utils libxml2-dev libxslt-dev xsltproc libkrb5-dev libldap2-dev libsystemd-dev gettext tcl-dev libperl-dev + sudo apt-get install -y pkg-config clang-9 llvm-9 llvm-9-dev libselinux1-dev python-dev python3-dev uuid-dev liblz4-dev sudo rm -rf /var/lib/postgresql/ sudo rm -rf /var/log/postgresql/ sudo rm -rf /etc/postgresql/ @@ -30,41 +27,77 @@ jobs: sudo rm -rf /usr/share/postgresql sudo rm -rf /etc/postgresql sudo rm -f /usr/bin/pg_config - - name: Create pgsql dir run: mkdir -p /opt/pgsql - name: Build postgres run: | export PATH="/opt/pgsql/bin:$PATH" - ./configure --enable-tap-tests --prefix=/opt/pgsql - make - make install + ./configure '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=/usr/include' '--mandir=/usr/share/man' \ + '--infodir=/usr/share/info' '--sysconfdir=/etc' '--localstatedir=/var' '--disable-silent-rules' \ + '--libdir=/usr/lib/x86_64-linux-gnu' 'runstatedir=/run' '--disable-maintainer-mode' \ + '--disable-dependency-tracking' '--with-icu' '--with-tcl' '--with-perl' '--with-python' \ + '--with-pam' '--with-openssl' '--with-libxml' '--with-libxslt' 'PYTHON=/usr/bin/python3' \ + '--mandir=/usr/share/postgresql/12/man' '--docdir=/usr/share/doc/postgresql-doc-12' \ + '--sysconfdir=/etc/postgresql-common' '--datarootdir=/usr/share/' '--datadir=/usr/share/postgresql/12' \ + '--bindir=/usr/lib/postgresql/12/bin' '--libdir=/usr/lib/x86_64-linux-gnu/' '--libexecdir=/usr/lib/postgresql/' \ + '--includedir=/usr/include/postgresql/' '--with-extra-version= (Ubuntu 12.x.pgdg20.04+1)' '--enable-nls' \ + '--enable-thread-safety' '--enable-tap-tests' '--enable-debug' '--enable-dtrace' '--disable-rpath' \ + '--with-uuid=e2fs' '--with-gnu-ld' '--with-pgport=5432' '--with-system-tzdata=/usr/share/zoneinfo' '--with-llvm' \ + 'LLVM_CONFIG=/usr/bin/llvm-config-9' 'CLANG=/usr/bin/clang-9' '--with-systemd' '--with-selinux' 'MKDIR_P=/bin/mkdir -p' \ + 'PROVE=/usr/bin/prove' 'TAR=/bin/tar' 'CFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security -fno-omit-frame-pointer' \ + 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now' '--with-gssapi' '--with-ldap' \ + '--with-includes=/usr/include/mit-krb5' '--with-libs=/usr/lib/mit-krb5' \ + '--with-libs=/usr/lib/x86_64-linux-gnu/mit-krb5' 'build_alias=x86_64-linux-gnu' \ + 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' 'CXXFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security' + make world + sudo make install-world - name: Start postgresql cluster run: | - export PATH="/opt/pgsql/bin:$PATH" - /opt/pgsql/bin/initdb -D /opt/pgsql/data - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start + /usr/lib/postgresql/12/bin/initdb -D /opt/pgsql/data + /usr/lib/postgresql/12/bin/pg_ctl -D /opt/pgsql/data -l logfile start + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' - name: Build pg_stat_monitor run: | - export PATH="/opt/pgsql/bin:$PATH" - sudo cp /opt/pgsql/bin/pg_config /usr/bin + export PATH="/usr/lib/postgresql/12/bin:$PATH" + sudo cp /usr/lib/postgresql/12/bin/pg_config /usr/bin make USE_PGXS=1 - make USE_PGXS=1 install + sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ + - name: Load pg_stat_monitor library and Restart Server + run: | + /usr/lib/postgresql/12/bin/pg_ctl -D /opt/pgsql/data -l logfile stop + echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf + /usr/lib/postgresql/12/bin/pg_ctl -D /opt/pgsql/data -l logfile start + working-directory: src/pg_stat_monitor/ + + - name: Start Server installcheck-world tests (without TAP) + run: | + make installcheck-world + - name: Report on installcheck-world test suites fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions output files of failed testsuite, and postgresql log + path: | + **/regression.diffs + **/regression.out + src/pg_stat_monitor/logfile + retention-days: 1 - name: Start pg_stat_monitor_tests run: | - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile stop - echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start make installcheck working-directory: src/pg_stat_monitor/ - - name: Report on test fail + - name: Report on pg_stat_monitor test fail uses: actions/upload-artifact@v2 if: ${{ failure() }} with: diff --git a/.github/workflows/pg13packagetest.yml b/.github/workflows/pg13packagetest.yml new file mode 100644 index 0000000..7bc1176 --- /dev/null +++ b/.github/workflows/pg13packagetest.yml @@ -0,0 +1,60 @@ +name: pg13package-test +on: [push] + +jobs: + build: + name: pg13package-test + runs-on: ubuntu-20.04 + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl wget -y + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install postgresql 13 + run: | + sudo wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg main" >> /etc/apt/sources.list.d/pgdg.list' + sudo apt update + sudo apt -y install postgresql-13 postgresql-server-dev-13 + + - name: Build pg_stat_monitor + run: | + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Change sources owner to postgres + run: sudo chown -R postgres:postgres src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/13/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg13test-pgdg-packages.yml b/.github/workflows/pg13test-pgdg-packages.yml new file mode 100644 index 0000000..1a718d2 --- /dev/null +++ b/.github/workflows/pg13test-pgdg-packages.yml @@ -0,0 +1,59 @@ +name: Test-with-pg13-pgdg-packages +on: [push] + +jobs: + build: + name: pg13-test-with-pgdg-packages + runs-on: ubuntu-latest + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install PG Distribution Postgresql 13 + run: | + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get update + sudo apt-get -y install postgresql-13 postgresql-client-13 postgresql-contrib postgresql-server-dev-13 + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + export PATH="/usr/lib/postgresql/13/bin:$PATH" + sudo cp /usr/lib/postgresql/13/bin/pg_config /usr/bin + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/13/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg13test.yml b/.github/workflows/pg13test.yml index dd746fa..6a85cd5 100644 --- a/.github/workflows/pg13test.yml +++ b/.github/workflows/pg13test.yml @@ -1,4 +1,4 @@ -name: pg13-test +name: Test-with-pg13-build on: [push] jobs: @@ -12,16 +12,13 @@ jobs: repository: 'postgres/postgres' ref: 'REL_13_STABLE' - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - name: Install dependencies run: | sudo apt-get update sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y docbook-xsl docbook-xsl + sudo apt-get install -y libxml2 libxml2-utils libxml2-dev libxslt-dev xsltproc libkrb5-dev libldap2-dev libsystemd-dev gettext tcl-dev libperl-dev + sudo apt-get install -y pkg-config clang-9 llvm-9 llvm-9-dev libselinux1-dev python-dev python3-dev uuid-dev liblz4-dev sudo rm -rf /var/lib/postgresql/ sudo rm -rf /var/log/postgresql/ sudo rm -rf /etc/postgresql/ @@ -30,41 +27,82 @@ jobs: sudo rm -rf /usr/share/postgresql sudo rm -rf /etc/postgresql sudo rm -f /usr/bin/pg_config - - name: Create pgsql dir run: mkdir -p /opt/pgsql - name: Build postgres run: | export PATH="/opt/pgsql/bin:$PATH" - ./configure --enable-tap-tests --prefix=/opt/pgsql - make - make install + ./configure '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=${prefix}/include' \ + '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' \ + '--sysconfdir=/etc' '--localstatedir=/var' '--disable-silent-rules' \ + '--libdir=${prefix}/lib/x86_64-linux-gnu' \ + '--libexecdir=${prefix}/lib/x86_64-linux-gnu' '--disable-maintainer-mode' \ + '--disable-dependency-tracking' '--with-icu' '--with-tcl' '--with-perl' \ + '--with-python' '--with-pam' '--with-openssl' '--with-libxml' '--with-libxslt' \ + 'PYTHON=/usr/bin/python3' '--mandir=/usr/share/postgresql/13/man' \ + '--docdir=/usr/share/doc/postgresql-doc-13' \ + '--sysconfdir=/etc/postgresql-common' '--datarootdir=/usr/share/' \ + '--datadir=/usr/share/postgresql/13' '--bindir=/usr/lib/postgresql/13/bin' \ + '--libdir=/usr/lib/x86_64-linux-gnu/' '--libexecdir=/usr/lib/postgresql/' \ + '--includedir=/usr/include/postgresql/' '--with-extra-version= (Ubuntu 2:13-x.focal)' \ + '--enable-nls' '--enable-thread-safety' '--enable-tap-tests' '--enable-debug' \ + '--enable-dtrace' '--disable-rpath' '--with-uuid=e2fs' '--with-gnu-ld' \ + '--with-pgport=5432' '--with-system-tzdata=/usr/share/zoneinfo' '--with-llvm' \ + 'LLVM_CONFIG=/usr/bin/llvm-config-11' 'CLANG=/usr/bin/clang-11' \ + '--with-systemd' '--with-selinux' 'MKDIR_P=/bin/mkdir -p' 'PROVE=/usr/bin/prove' \ + 'TAR=/bin/tar' 'XSLTPROC=xsltproc --nonet' 'CFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security -fno-omit-frame-pointer' \ + 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now' '--with-gssapi' '--with-ldap' \ + 'build_alias=x86_64-linux-gnu' 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' \ + 'CXXFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security' + make world + sudo make install-world - name: Start postgresql cluster run: | - export PATH="/opt/pgsql/bin:$PATH" - /opt/pgsql/bin/initdb -D /opt/pgsql/data - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start + /usr/lib/postgresql/13/bin/initdb -D /opt/pgsql/data + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile start + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' - name: Build pg_stat_monitor run: | - export PATH="/opt/pgsql/bin:$PATH" - sudo cp /opt/pgsql/bin/pg_config /usr/bin + export PATH="/usr/lib/postgresql/13/bin:$PATH" + sudo cp /usr/lib/postgresql/13/bin/pg_config /usr/bin make USE_PGXS=1 - make USE_PGXS=1 install + sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ + - name: Load pg_stat_monitor library and Restart Server + run: | + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile stop + echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf + /usr/lib/postgresql/13/bin/pg_ctl -D /opt/pgsql/data -l logfile start + working-directory: src/pg_stat_monitor/ + + - name: Start Server installcheck-world tests (without TAP) + run: | + make installcheck-world + - name: Report on installcheck-world test suites fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions output files of failed testsuite, and postgresql log + path: | + **/regression.diffs + **/regression.out + src/pg_stat_monitor/logfile + retention-days: 1 - name: Start pg_stat_monitor_tests run: | - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile stop - echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start make installcheck working-directory: src/pg_stat_monitor/ - - name: Report on test fail + - name: Report on pg_stat_monitor test fail uses: actions/upload-artifact@v2 if: ${{ failure() }} with: diff --git a/.github/workflows/pg14test-pgdg-packages.yml b/.github/workflows/pg14test-pgdg-packages.yml new file mode 100644 index 0000000..ad6bc91 --- /dev/null +++ b/.github/workflows/pg14test-pgdg-packages.yml @@ -0,0 +1,63 @@ +name: Test-with-pg14-pgdg-packages +on: [push] + +jobs: + build: + name: pg14-test-with-pgdg-packages + runs-on: ubuntu-latest + steps: + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' + + - name: Delete old postgresql files + run: | + sudo apt-get update + sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* + sudo rm -rf /var/lib/postgresql/ + sudo rm -rf /var/log/postgresql/ + sudo rm -rf /etc/postgresql/ + sudo rm -rf /usr/lib/postgresql + sudo rm -rf /usr/include/postgresql + sudo rm -rf /usr/share/postgresql + sudo rm -rf /etc/postgresql + sudo rm -f /usr/bin/pg_config + + - name: Install PG Distribution Postgresql 14 + run: | + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main 14" > /etc/apt/sources.list.d/pgdg.list' + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get update + sudo apt-get -y install postgresql-14 + sudo apt-get update + sudo apt-get -y install postgresql-client-14 + sudo apt-get update + sudo apt-get -y install postgresql-server-dev-14 + sudo chown -R postgres:postgres src/ + + - name: Build pg_stat_monitor + run: | + export PATH="/usr/lib/postgresql/14/bin:$PATH" + sudo cp /usr/lib/postgresql/14/bin/pg_config /usr/bin + sudo make USE_PGXS=1 + sudo make USE_PGXS=1 install + working-directory: src/pg_stat_monitor/ + + - name: Start pg_stat_monitor_tests + run: | + sudo service postgresql stop + echo "shared_preload_libraries = 'pg_stat_monitor'" | sudo tee -a /etc/postgresql/14/main/postgresql.conf + sudo service postgresql start + sudo -u postgres bash -c 'make installcheck USE_PGXS=1' + working-directory: src/pg_stat_monitor/ + + - name: Report on test fail + uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: Regressions diff and postgresql log + path: | + src/pg_stat_monitor/regression.diffs + src/pg_stat_monitor/logfile + retention-days: 1 diff --git a/.github/workflows/pg14test.yml b/.github/workflows/pg14test.yml index acfcdbf..0824bd4 100644 --- a/.github/workflows/pg14test.yml +++ b/.github/workflows/pg14test.yml @@ -1,4 +1,4 @@ -name: pg14-test +name: Test-with-pg14-build on: [push] jobs: @@ -12,16 +12,13 @@ jobs: repository: 'postgres/postgres' ref: 'REL_14_STABLE' - - name: Clone pg_stat_monitor repository - uses: actions/checkout@v2 - with: - path: 'src/pg_stat_monitor' - - name: Install dependencies run: | sudo apt-get update sudo apt purge postgresql-client-common postgresql-common postgresql postgresql* - sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y + sudo apt-get install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison flex libipc-run-perl -y docbook-xsl docbook-xsl + sudo apt-get install -y libxml2 libxml2-utils libxml2-dev libxslt-dev xsltproc libkrb5-dev libldap2-dev libsystemd-dev gettext tcl-dev libperl-dev + sudo apt-get install -y pkg-config clang-9 llvm-9 llvm-9-dev libselinux1-dev python-dev python3-dev uuid-dev liblz4-dev sudo rm -rf /var/lib/postgresql/ sudo rm -rf /var/log/postgresql/ sudo rm -rf /etc/postgresql/ @@ -30,41 +27,68 @@ jobs: sudo rm -rf /usr/share/postgresql sudo rm -rf /etc/postgresql sudo rm -f /usr/bin/pg_config - - name: Create pgsql dir run: mkdir -p /opt/pgsql - name: Build postgres run: | export PATH="/opt/pgsql/bin:$PATH" - ./configure --enable-tap-tests --prefix=/opt/pgsql - make - make install + ./configure '--build=x86_64-linux-gnu' '--prefix=/usr' '--includedir=${prefix}/include' \ + '--mandir=${prefix}/share/man' '--infodir=${prefix}/share/info' \ + '--sysconfdir=/etc' '--localstatedir=/var' '--disable-silent-rules' \ + '--libdir=${prefix}/lib/x86_64-linux-gnu' \ + '--libexecdir=${prefix}/lib/x86_64-linux-gnu' '--disable-maintainer-mode' \ + '--disable-dependency-tracking' '--with-icu' '--with-tcl' '--with-perl' \ + '--with-python' '--with-pam' '--with-openssl' '--with-libxml' '--with-libxslt' \ + 'PYTHON=/usr/bin/python3' '--mandir=/usr/share/postgresql/14/man' \ + '--docdir=/usr/share/doc/postgresql-doc-14' \ + '--sysconfdir=/etc/postgresql-common' '--datarootdir=/usr/share/' \ + '--datadir=/usr/share/postgresql/14' '--bindir=/usr/lib/postgresql/14/bin' \ + '--libdir=/usr/lib/x86_64-linux-gnu/' '--libexecdir=/usr/lib/postgresql/' \ + '--includedir=/usr/include/postgresql/' '--with-extra-version= (Ubuntu 2:14-x.focal)' \ + '--enable-nls' '--enable-thread-safety' '--enable-tap-tests' '--enable-debug' \ + '--enable-dtrace' '--disable-rpath' '--with-uuid=e2fs' '--with-gnu-ld' \ + '--with-pgport=5432' '--with-system-tzdata=/usr/share/zoneinfo' '--with-llvm' \ + 'LLVM_CONFIG=/usr/bin/llvm-config-11' 'CLANG=/usr/bin/clang-11' \ + '--with-systemd' '--with-selinux' 'MKDIR_P=/bin/mkdir -p' 'PROVE=/usr/bin/prove' \ + 'TAR=/bin/tar' 'XSLTPROC=xsltproc --nonet' 'CFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security -fno-omit-frame-pointer' \ + 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-z,now' '--with-gssapi' '--with-ldap' \ + 'build_alias=x86_64-linux-gnu' 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' \ + 'CXXFLAGS=-g -O2 -fstack-protector-strong -Wformat -Werror=format-security' + make world + sudo make install-world - name: Start postgresql cluster run: | - export PATH="/opt/pgsql/bin:$PATH" - /opt/pgsql/bin/initdb -D /opt/pgsql/data - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start + /usr/lib/postgresql/14/bin/initdb -D /opt/pgsql/data + /usr/lib/postgresql/14/bin/pg_ctl -D /opt/pgsql/data -l logfile start + + - name: Clone pg_stat_monitor repository + uses: actions/checkout@v2 + with: + path: 'src/pg_stat_monitor' - name: Build pg_stat_monitor run: | - export PATH="/opt/pgsql/bin:$PATH" - sudo cp /opt/pgsql/bin/pg_config /usr/bin + export PATH="/usr/lib/postgresql/14/bin:$PATH" + sudo cp /usr/lib/postgresql/14/bin/pg_config /usr/bin make USE_PGXS=1 - make USE_PGXS=1 install + sudo make USE_PGXS=1 install working-directory: src/pg_stat_monitor/ + - name: Load pg_stat_monitor library and Restart Server + run: | + /usr/lib/postgresql/14/bin/pg_ctl -D /opt/pgsql/data -l logfile stop + echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf + /usr/lib/postgresql/14/bin/pg_ctl -D /opt/pgsql/data -l logfile start + working-directory: src/pg_stat_monitor/ - name: Start pg_stat_monitor_tests run: | - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile stop - echo "shared_preload_libraries = 'pg_stat_monitor'" >> /opt/pgsql/data/postgresql.conf - /opt/pgsql/bin/pg_ctl -D /opt/pgsql/data -l logfile start make installcheck working-directory: src/pg_stat_monitor/ - - name: Report on test fail + - name: Report on pg_stat_monitor test fail uses: actions/upload-artifact@v2 if: ${{ failure() }} with: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..78ec34d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,119 @@ +# Contributing guide + +Welcome to `pg_stat_monitor` - the Query Performance Monitoring tool for PostgreSQL! + +We're glad that you would like to become a Percona community member and participate in keeping open source open. + +You can contribute in one of the following ways: + +1. Reach us on our [Forums](https://forums.percona.com/) and Discord. +2. [Submit a bug report or a feature request](#submit-a-bug-report-or-a-feature-request) +3. [Submit a pull request (PR) with the code patch](#submit-a-pull-request) +4. [Contribute to documentation](#contributing-to-documentation) + +By contributing, you agree to the [Percona Community code of conduct](https://github.com/percona/community/blob/main/content/contribute/coc.md). + + +## Submit a bug report or a feature request + +All bug reports, enhancements and feature requests are tracked in [Jira issue tracker](https://jira.percona.com/projects/PG). If you would like to suggest a new feature / an improvement or you found a bug in `pg_stat_monitor`, please submit the report to the [PG project](https://jira.percona.com/projects/PG/issues). + +Start by searching the open tickets for a similar report. If you find that someone else has already reported your issue, then you can upvote that report to increase its visibility. + +If there is no existing report, submit your report following these steps: + +1. Sign in to [Jira issue tracker](https://jira.percona.com/projects/PG/issues). You will need to create an account if you do not have one. +2. In the _Summary_, _Description_, _Steps To Reproduce_, _Affects Version_ fields describe the problem you have detected or an idea that you have for a new feature or improvement. +3. As a general rule of thumb, try to create bug reports that are: + + * Reproducible: describe the steps to reproduce the problem. + * Unique: check if there already exists a JIRA ticket to describe the problem. + * Scoped to a Single Bug: only report one bug in one JIRA ticket + +## Submit a pull request + +Though not mandatory, we encourage you to first check for a bug report among Jira issues and in the PR list: perhaps the bug has already been addressed. + +For feature requests and enhancements, we do ask you to create a Jira issue, describe your idea and discuss the design with us. This way we align your ideas with our vision for the product development. + +If the bug hasn’t been reported / addressed, or we’ve agreed on the enhancement implementation with you, do the following: + +1. [Fork](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo) this repository +2. Clone this repository on your machine. +3. Create a separate branch for your changes. If you work on a Jira issue, please include the issue number in the branch name so it reads as `-my_branch`. This makes it easier to track your contribution. +4. Make your changes. Please follow the guidelines outlined in the [PostgreSQL Coding Standard](https://www.postgresql.org/docs/current/source.html) to improve code readability. +5. Test your changes locally. See the [Running tests ](#running-tests) section for more information +6. Update the documentation describing your changes. See the [Contributing to documentation](#contributing-to-documentation) section for details +8. Commit the changes. Add the Jira issue number at the beginning of your message subject, so that is reads as ` : My commit message`. Follow this pattern for your commits: + + ``` + PG-1234: Main commit message. + + Details of fix. + ``` + + The [commit message guidelines](https://gist.github.com/robertpainsi/b632364184e70900af4ab688decf6f53) will help you with writing great commit messages + +9. Open a pull request to Percona +10. Our team will review your code and if everything is correct, will merge it. Otherwise, we will contact you for additional information or with the request to make changes. +11. Make sure your pull request contains only one commit message + +### Building pg_stat_monitor + +To build `pg_stat_monitor` from source code, you require the following: + +* git +* make + +Refer to the [Building from source code](https://github.com/percona/pg_stat_monitor#installing-from-source-code) section for guidelines. + + +### Running tests + +When you work, you should periodically run tests to check that your changes don’t break existing code. + +You can find the tests in the `regression` directory. + +#### Run manually + +1. Change directory to pg_stat_monitor + +**NOTE**: Make sure `postgres` user is the owner of the `pg_stat_monitor` directory + +2. Start the tests + 1. If you built PostgreSQL from PGDG, use the following command: + + ```sh + make installcheck + ``` + + + 2. If you installed PostgreSQL server from Percona Distribution for PostgreSQL, use the following command: + + ```sh + sudo su postgres bash -c 'make installcheck USE_PGXS=1' + ``` +#### Run automatically + +The tests are run automatically with GitHub actions once you commit and push your changes. Make sure all tests are successfully passed before you proceed. + + +## Contributing to documentation + +`pg_stat_monitor` documentation is written in Markdown language, so you can +[edit it online via GitHub](#edit-documentation-online-vi-github). Alternatively, you can include doc changes in your patch. The doc files are in the `docs` directory. + +### Edit documentation online via GitHub + +1. Click the **Edit this page** link on the sidebar. The source `.md` file of the page opens in GitHub editor in your browser. If you haven’t worked with the repository before, GitHub creates a [fork](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo) of it for you. +2. Edit the page. You can check your changes on the **Preview** tab. +3. Commit your changes. + * In the _Commit changes_ section, describe your changes. + * Select the **Create a new branch for this commit** and start a pull request option + * Click **Propose changes**. +4. GitHub creates a branch and a commit for your changes. It loads a new page on which you can open a pull request to Percona. The page shows the base branch - the one you offer your changes for, your commit message and a diff - a visual representation of your changes against the original page. This allows you to make a last-minute review. When you are ready, click the Create pull request button. +5. Someone from our team reviews the pull request and if everything is correct, merges it into the documentation. Then it gets published on the site. + +## After your pull request is merged + +Once your pull request is merged, you are an official Percona Community Contributor. Welcome to the community! diff --git a/META.json b/META.json index fcb66e0..0397750 100644 --- a/META.json +++ b/META.json @@ -1,8 +1,8 @@ { "name": "pg_stat_monitor", "abstract": "PostgreSQL Query Performance Monitoring Tool", - "description": "The pg_stat_monitor is a PostgreSQL Query Performance Monitoring tool, based on PostgreSQL's contrib module pg_stat_statements. PostgreSQL’s pg_stat_statements provides the basic statistics, which is sometimes not enough. The major shortcoming in pg_stat_statements is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user needs to calculate the aggregate which is quite expensive.", - "version": "0.7.1", + "description": "pg_stat_monitor is a PostgreSQL Query Performance Monitoring tool, based on PostgreSQL's contrib module pg_stat_statements. PostgreSQL’s pg_stat_statements provides the basic statistics, which is sometimes not enough. The major shortcoming in pg_stat_statements is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user would need to calculate the aggregates, which is quite an expensive operation.", + "version": "0.9.2-beta1", "maintainer": [ "ibrar.ahmed@percona.com" ], @@ -12,7 +12,7 @@ "abstract": "PostgreSQL Query Performance Monitoring Tool", "file": "pg_stat_monitor--1.0.sql", "docfile": "README.md", - "version": "0.7.1" + "version": "0.9.2-beta1" } }, "prereqs": { diff --git a/README.md b/README.md index 263ea19..8beb28d 100644 --- a/README.md +++ b/README.md @@ -1,114 +1,200 @@ -![pg11-test](https://github.com/percona/pg_stat_monitor/workflows/pg11-test/badge.svg) -![pg12-test](https://github.com/percona/pg_stat_monitor/workflows/pg12-test/badge.svg) -![pg13-test](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) -![pg14-test](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) +![PostgreSQL-11](https://github.com/percona/pg_stat_monitor/workflows/pg11-test/badge.svg) +![PostgreSQL-12](https://github.com/percona/pg_stat_monitor/workflows/pg12-test/badge.svg) +![PostgreSQL-13](https://github.com/percona/pg_stat_monitor/workflows/pg13-test/badge.svg) +![PostgreSQL-14](https://github.com/percona/pg_stat_monitor/workflows/pg14-test/badge.svg) +![PostgreSQL-11-Package](https://github.com/percona/pg_stat_monitor/workflows/pg11package-test/badge.svg) +![PostgreSQL-12-Packages](https://github.com/percona/pg_stat_monitor/workflows/pg12package-test/badge.svg) +![PostgreSQL-13-Packages](https://github.com/percona/pg_stat_monitor/workflows/pg13package-test/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/percona/pg_stat_monitor/badge.svg)](https://coveralls.io/github/percona/pg_stat_monitor) -## What is pg_stat_monitor? +# pg_stat_monitor: Query Performance Monitoring Tool for PostgreSQL -The **pg_stat_monitor** is a **Query Performance Monitoring** tool for [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL. **pg_stat_monitor** is based on PostgreSQL's contrib module ``pg_stat_statements``. pg_stat_statements provides the basic statistics, which is sometimes not enough. The major shortcoming in pg_stat_statements is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user needs to calculate the aggregate which is quite expensive. +## Table of Contents -**pg_stat_monitor** is developed on the basis of pg_stat_statements as its more advanced replacement. It provides all the features of pg_stat_statements plus its own feature set. +* [Overview](#overview) +* [Supported versions](#supported-versions) +* [Features](#features) +* [Documentation](#documentation) +* [Supported platforms](#supported-platforms) +* [Installation guidelines](#installation-guidelines) +* [Configuration](#configuration) +* [Setup](#setup) +* [Building from source code](#building-from-source) +* [How to contribute](#how-to-contribute) +* [License](#license) +* [Copyright](#copyright) -### How pg_stat_monitor works? +## Overview -``pg_stat_monitor`` accumulates the information in the form of buckets. All the aggregated information is bucket based. The size of a bucket and the number of buckets should be configured using GUC (Grand Unified Configuration). When a bucket time elapses, ``pg_stat_monitor`` resets all the statistics and switches to the next bucket. After the last bucket elapses, ``pg_stat_monitor`` goes back to the first bucket. All the data on the first bucket is cleared out with new writes; therefore, to not lose the data, users must read the buckets before that. +**NOTE**: This is a beta release and is subject to further changes. We recommend using it in testing environments only. -## Documentation -1. [Supported PostgreSQL Versions](#supported-postgresql-versions) -2. [Installation](#installation) -3. [Setup](#setup) -4. [User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md) -6. [Release Notes](https://github.com/percona/pg_stat_monitor/blob/master/docs/RELEASE_NOTES.md) -7. [License](https://github.com/percona/pg_stat_monitor/blob/master/LICENSE) -8. [Submitting Bug Reports](#submitting-bug-reports) -9. [Copyright Notice](#copyright-notice) +The `pg_stat_monitor` is a **_Query Performance Monitoring_** tool for PostgreSQL. It attempts to provide a more holistic picture by providing much-needed query performance insights in a single view. -## Supported PostgreSQL Versions -The ``pg_stat_monitor`` should work on the latest version of both [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL but is only tested with these versions: +`pg_stat_monitor` provides improved insights that allow database users to understand query origins, execution, planning statistics and details, query information, and metadata. This significantly improves observability, enabling users to debug and tune query performance. `pg_stat_monitor` is developed on the basis of `pg_stat_statements` as its more advanced replacement. -| Distribution | Version | Supported | -| ------------------------------------|---------|--------------------| -| PostgreSQL | < 11 | :x: | -| PostgreSQL | 11 | :heavy_check_mark: | -| PostgreSQL | 12 | :heavy_check_mark: | -| PostgreSQL | 13 | :heavy_check_mark: | -| Percona Distribution for PostgreSQL | < 11 | :x: | -| [Percona Distribution for PostgreSQL](https://www.percona.com/downloads/percona-postgresql-11/) | 11 | :heavy_check_mark: | -| [Percona Distribution for PostgreSQL](https://www.percona.com/downloads/percona-postgresql-12/) | 12 | :heavy_check_mark: | -| [Percona Distribution for PostgreSQL](https://www.percona.com/downloads/percona-postgresql-13/) | 13 | :heavy_check_mark: | +While `pg_stat_statements` provides ever-increasing metrics, `pg_stat_monitor` aggregates the collected data, saving user efforts for doing it themselves. `pg_stat_monitor` stores statistics in configurable time-based units – buckets. This allows focusing on statistics generated for shorter time periods and makes query timing information such as max/min/mean time more accurate. -## Installation +To learn about other features, available in `pg_stat_monitor`, see the [Features](#pg_stat_monitor-features) section and the [User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md). -You can install ``pg_stat_monitor`` from [Percona repositories](#installing-from-percona-repositories) and from [source code](#installing-from-source-code). +`pg_stat_monitor` supports PostgreSQL versions 11 and above. It is compatible with both PostgreSQL provided by PostgreSQL Global Development Group (PGDG) and [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution). -### Installing from Percona repositories +The RPM (for RHEL and CentOS) and the DEB (for Debian and Ubuntu) packages are available from Percona repositories for PostgreSQL versions [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/), and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/). -``pg_stat_monitor`` is supplied as part of Percona Distribution for PostgreSQL. The rpm/deb packages are available from Percona repositories. To install ``pg_stat_monitor``, follow [the installation instructions](https://www.percona.com/doc/postgresql/LATEST/installing.html). +The RPM packages are also available in the official PostgreSQL (PGDG) yum repositories. -### Installing from PGXN +### Supported versions -You can install ``pg_stat_monitor`` from PGXN (PostgreSQL Extensions Network) using the [PGXN client](https://pgxn.github.io/pgxnclient/). +The `pg_stat_monitor` should work on the latest version of both [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) and PostgreSQL, but is only tested with these versions: +| **Distribution** | **Version** | **Provider** | +| ---------------- | --------------- | ------------ | +|[Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution)| [11](https://www.percona.com/downloads/percona-postgresql-11/LATEST/), [12](https://www.percona.com/downloads/postgresql-distribution-12/LATEST/) and [13](https://www.percona.com/downloads/postgresql-distribution-13/LATEST/)| Percona| +| PostgreSQL | 11, 12, and 13 | PostgreSQL Global Development Group (PGDG) | + + +### Features + +`pg_stat_monitor` simplifies query observability by providing a more holistic view of query from performance, application and analysis perspectives. This is achieved by grouping data in configurable time buckets that allow capturing of load and performance information for smaller time windows. So performance issues and patterns can be identified based on time and workload. + + +* **Time Interval Grouping:** Instead of supplying one set of ever-increasing counts, `pg_stat_monitor` computes stats for a configured number of time intervals - time buckets. This allows for much better data accuracy, especially in the case of high resolution or unreliable networks. +* **Multi-Dimensional Grouping:** While `pg_stat_statements` groups counters by userid, dbid, queryid, `pg_stat_monitor` uses a more detailed group for higher precision. This allows a user to drill down into the performance of queries. +* **Capture Actual Parameters in the Queries:** `pg_stat_monitor` allows you to choose if you want to see queries with placeholders for parameters or actual parameter data. This simplifies debugging and analysis processes by enabling users to execute the same query. +* **Query Plan:** Each SQL is now accompanied by its actual plan that was constructed for its execution. That’s a huge advantage if you want to understand why a particular query is slower than expected. +* **Tables Access Statistics for a Statement:** This allows us to easily identify all queries that accessed a given table. This set is at par with the information provided by the `pg_stat_statements`. +* **Histogram:** Visual representation is very helpful as it can help identify issues. With the help of the histogram function, one can now view a timing/calling data histogram in response to an SQL query. And yes, it even works in psql. + + +### Documentation + +1. [User guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md) +2. pg_stat_monitor vs pg_stat_statements +3. pg_stat_monitor view reference +4. [Release notes](https://github.com/percona/pg_stat_monitor/blob/master/docs/RELEASE_NOTES.md) +5. Contributing guide (https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) + + +### Supported platforms + +The PostgreSQL YUM repository supports `pg_stat_monitor` for all [supported versions](#supported-versions) for the following platforms: + +* Red Hat Enterprise/Rocky/CentOS/Oracle Linux 7 and 8 +* Fedora 33 and 34 + +Find the list of supported platforms for `pg_stat_monitor` within [Percona Distribution for PostgreSQL](https://www.percona.com/software/postgresql-distribution) on the [Percona Release Lifecycle Overview](https://www.percona.com/services/policies/percona-software-support-lifecycle#pgsql) page. + + +### Installation Guidelines + +You can install `pg_stat_monitor` from the following sources: + +* [Percona repositories](#installing-from-percona-repositories), +* [PostgreSQL PGDG yum repositories](#installing-from-postgresql-yum-repositories), +* [PGXN](#installing-from-pgxn) and +* [source code](#building-from-source). + + +#### Installing from Percona repositories + +To install `pg_stat_monitor` from Percona repositories, you need to use the `percona-release` repository management tool. + +1. [Install percona-release](https://www.percona.com/doc/percona-repo-config/installing.html) following the instructions relevant to your operating system +2. Enable Percona repository: + +``` sh +percona-release setup ppgXX +``` + +Replace XX with the desired PostgreSQL version. For example, to install `pg_stat_monitor ` for PostgreSQL 13, specify `ppg13`. + +3. Install `pg_stat_monitor` package + * For Debian and Ubuntu: + ``` sh + apt-get install percona-pg-stat-monitor13 + ``` + * For RHEL and CentOS: + ``` sh + yum install percona-pg-stat-monitor13 + ``` + +#### Installing from PostgreSQL yum repositories + +Install the PostgreSQL repositories following the instructions in the [Linux downloads (Red Hat family)](https://www.postgresql.org/download/linux/redhat/) chapter in PostgreSQL documentation. + +Install `pg_stat_monitor`: + +``` +dnf install -y pg_stat_monitor_ +``` + +Replace the `VERSION` variable with the PostgreSQL version you are using (e.g. specify `pg_stat_monitor_13` for PostgreSQL 13) + + +#### Installing from PGXN + +You can install `pg_stat_monitor` from PGXN (PostgreSQL Extensions Network) using the [PGXN client](https://pgxn.github.io/pgxnclient/). Use the following command: -```sh +``` pgxn install pg_stat_monitor ``` -### Installing from source code +### Configuration -You can download the source code of the latest release of ``pg_stat_monitor`` from [the releases page on GitHub](https://github.com/Percona/pg_stat_monitor/releases) or using git: -```sh -git clone git://github.com/Percona/pg_stat_monitor.git -``` +You can find the configuration parameters of the `pg_stat_monitor` extension in the `pg_stat_monitor_settings` view. To change the default configuration, specify new values for the desired parameters using the GUC (Grant Unified Configuration) system. To learn more, refer to the [Configuration](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md#configuration) section of the user guide. -Compile and install the extension -```sh -cd pg_stat_monitor -make USE_PGXS=1 -make USE_PGXS=1 install -``` -## Setup -``pg_stat_monitor`` cannot be enabled in your running ``postgresql`` instance. ``pg_stat_monitor`` needs to be loaded at the start time. This requires adding the ``pg_stat_monitor`` extension for the ``shared_preload_libraries`` parameter and restarting the ``postgresql`` instance. +### Setup -You can set the ``pg_stat_monitor`` extension in the ``postgresql.conf`` file. +You can enable `pg_stat_monitor` when your `postgresql` instance is not running. + +`pg_stat_monitor` needs to be loaded at the start time. The extension requires additional shared memory; therefore, add the `pg_stat_monitor` value for the `shared_preload_libraries` parameter and restart the `postgresql` instance. + +Use the [ALTER SYSTEM](https://www.postgresql.org/docs/current/sql-altersystem.html)command from `psql` terminal to modify the `shared_preload_libraries` parameter. ``` -# - Shared Library Preloading - - -shared_preload_libraries = 'pg_stat_monitor' # (change requires restart) -#local_preload_libraries = '' -#session_preload_libraries = '' -``` - -Or you can set it from `psql` terminal using the ``ALTER SYSTEM`` command. - -```sql ALTER SYSTEM SET shared_preload_libraries = 'pg_stat_monitor'; + ALTER SYSTEM ``` -```sh +**NOTE**: If you’ve added other values to the shared_preload_libraries parameter, list all of them separated by commas for the `ALTER SYSTEM` command. For example, `ALTER SYSTEM SET shared_preload_libraries = 'foo, bar, pg_stat_monitor'` + +Start or restart the `postgresql` instance to apply the changes. + +* On Debian and Ubuntu: + +``` +sudo systemctl restart postgresql.service +``` + +* On Red Hat Enterprise Linux and CentOS: + + +``` sudo systemctl restart postgresql-13 ``` +Create the extension using the [CREATE EXTENSION](https://www.postgresql.org/docs/current/sql-createextension.html) command. Using this command requires the privileges of a superuser or a database owner. Connect to `psql` as a superuser for a database and run the following command: -Create the extension using the ``CREATE EXTENSION`` command. -```sql + +``` CREATE EXTENSION pg_stat_monitor; CREATE EXTENSION ``` -```sql + +This allows you to see the stats collected by `pg_stat_monitor`. + + +``` -- Select some of the query information, like client_ip, username and application_name etc. -postgres=# SELECT application_name, userid AS user_name, datname AS database_name, substr(query,0, 50) AS query, calls, client_ip +postgres=# SELECT application_name, userid AS user_name, datname AS database_name, substr(query,0, 50) AS query, calls, client_ip FROM pg_stat_monitor; - application_name | user_name | database_name | query | calls | client_ip + application_name | user_name | database_name | query | calls | client_ip ------------------+-----------+---------------+---------------------------------------------------+-------+----------- psql | vagrant | postgres | SELECT application_name, userid::regrole AS user_ | 1 | 127.0.0.1 psql | vagrant | postgres | SELECT application_name, userid AS user_name, dat | 3 | 127.0.0.1 @@ -116,51 +202,47 @@ postgres=# SELECT application_name, userid AS user_name, datname AS database_nam psql | vagrant | postgres | SELECT application_name, userid AS user_name, dat | 8 | 127.0.0.1 psql | vagrant | postgres | SELECT bucket, substr(query,$1, $2) AS query, cmd | 1 | 127.0.0.1 (5 rows) +``` + +To learn more about `pg_stat_monitor` features and usage, see [User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md). To view all other data elements provided by `pg_stat_monitor`, please see the reference. + + +### Building from source + +You can download the source code of the latest release of `pg_stat_monitor` from [the releases page on GitHub](https://github.com/Percona/pg_stat_monitor/releases) or using git: ``` - -```sql --- Select queries along with elevel, message and sqlcode which have some errors. - -SELECT decode_error_level(elevel) AS elevel, sqlcode, query, message FROM pg_stat_monitor WHERE elevel != 0; - elevel. | sqlcode | query | message ---------------------+---------+-------------------------------------------------------------------------------------------+------------------------------------------------ - ERROR | 132 | select count(*) from pgbench_branches | permission denied for table pgbench_branches - ERROR | 130 | select 1/0; | division by zero - ERROR | 132 | SELECT decode_elevel(elevel), sqlcode, message from pg_stat_monitor where elevel != 0; | function decode_elevel(integer) does not exist - ERROR | 132 | drop table if exists pgbench_accounts, pgbench_branches, pgbench_history, pgbench_tellers | must be owner of table pgbench_accounts -(4 rows) - +git clone git://github.com/Percona/pg_stat_monitor.git ``` -To learn more about ``pg_stat_monitor`` configuration and usage, see [User Guide](https://github.com/percona/pg_stat_monitor/blob/master/docs/USER_GUIDE.md). +Compile and install the extension -## Submitting Bug Reports +``` +cd pg_stat_monitor +make USE_PGXS=1 +make USE_PGXS=1 install +``` -If you found a bug in ``pg_stat_monitor``, please submit the report to the [Jira issue tracker](https://jira.percona.com/projects/PG/issues). +### How to contribute -Start by searching the open tickets for a similar report. If you find that someone else has already reported your issue, then you can upvote that report to increase its visibility. +We welcome and strongly encourage community participation and contributions, and are always looking for new members that are as dedicated to serving the community as we are. -If there is no existing report, submit your report following these steps: - -1. Sign in to [Jira issue tracker](https://jira.percona.com/projects/PG/issues). You will need to create an account if you do not have one. - -2. In the *Summary*, *Description*, *Steps To Reproduce*, *Affects Version* fields describe the problem you have detected. - -3. As a general rule of thumb, try to create bug reports that are: - -- Reproducible: describe the steps to reproduce the problem. - -- Unique: check if there already exists a JIRA ticket to describe the problem. - -- Scoped to a Single Bug: only report one bug in one JIRA ticket. +The [Contributing Guide](https://github.com/percona/pg_stat_monitor/blob/master/CONTRIBUTING.md) contains the guidelines on how you can contribute. -## Copyright Notice +### Support, discussions and forums -Portions Copyright © 2018-2021, Percona LLC and/or its affiliates +We welcome your feedback on your experience with `pg_stat_monitor`. Join our [technical forum](https://forums.percona.com/) or [Discord](https://discord.gg/mQEyGPkNbR) channel for help with `pg_stat_monitor` and Percona's open source software for MySQL®, [PostgreSQL](https://www.percona.com/software/postgresql-distribution), and MongoDB® databases. -Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group -Portions Copyright (c) 1994, The Regents of the University of California +### License + +This project is licensed under the same open liberal terms and conditions as the PostgreSQL project itself. Please refer to the [LICENSE](https://github.com/percona/pg_stat_monitor/blob/master/LICENSE) file for more details. + + +### Copyright notice + +* Portions Copyright © 2018-2021, Percona LLC and/or its affiliates +* Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group +* Portions Copyright (c) 1994, The Regents of the University of California diff --git a/code-of-conduct.md b/code-of-conduct.md new file mode 100644 index 0000000..6af3e44 --- /dev/null +++ b/code-of-conduct.md @@ -0,0 +1,5 @@ +# Percona Distribution for PostgreSQL Operator Code of Conduct + +All Percona Products follow the [Percona Community Code of Conduct](https://github.com/percona/community/blob/main/content/contribute/coc.md). + +If you notice any unacceptable behavior, let us know as soon as possible by writing to . We will respond within 48 hours. diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md index a671e1d..da9a7db 100644 --- a/docs/USER_GUIDE.md +++ b/docs/USER_GUIDE.md @@ -30,6 +30,7 @@ The following are the key features of pg_stat_monitor: * [Integration with Percona Monitoring and Management (PMM) tool](#integration-with-pmm), * [Histograms](#histogram) - visual representation of query performance. + ### Time buckets Instead of supplying one set of ever-increasing counts, `pg_stat_monitor` computes stats for a configured number of time intervals; time buckets. This allows for much better data accuracy, especially in the case of high-resolution or unreliable networks. @@ -38,8 +39,10 @@ Instead of supplying one set of ever-increasing counts, `pg_stat_monitor` comput `pg_stat_monitor` collects the information about what tables were accessed by a statement. This allows you to identify all queries which access a given table easily. + ### Query and client information + `pg_stat_monitor` provides additional metrics for detailed analysis of query performance from various perspectives, including client connection details like user name, application name, IP address to name a few relevant columns. With this information, `pg_stat_monitor` enables users to track a query to the originating application. More details about the application or query may be incorporated in the SQL query in a [Google’s Sqlcommenter](https://google.github.io/sqlcommenter/) format. @@ -47,6 +50,7 @@ With this information, `pg_stat_monitor` enables users to track a query to the o Understanding query execution time stats helps you identify what affects query performance and take measures to optimize it. `pg_stat_monitor` collects the total, min, max and average (mean) time it took to execute a particular query and provides this data in separate columns. See the [Query timing information](#usage-examples-query-timing-information) example for the sample output. + ### Query execution plan information Every query has a plan that was constructed for its executing. Collecting the query plan information as well as monitoring query plan timing helps you understand how you can modify the query to optimize its execution. It also helps make communication about the query clearer when discussing query performance with other DBAs and application developers. @@ -180,6 +184,7 @@ The following table shows setup options for each configuration parameter and whe #### Parameters description: + ##### pg_stat_monitor.pgsm_max Values: diff --git a/guc.c b/guc.c index 5d16b33..52db4e3 100644 --- a/guc.c +++ b/guc.c @@ -104,7 +104,7 @@ init_guc(void) conf[i] = (GucVariable) { .guc_name = "pg_stat_monitor.pgsm_bucket_time", .guc_desc = "Sets the time in seconds per bucket.", - .guc_default = 300, + .guc_default = 60, .guc_min = 1, .guc_max = INT_MAX, .guc_restart = true, diff --git a/hash_query.c b/hash_query.c index 55d1cde..29d0cc8 100644 --- a/hash_query.c +++ b/hash_query.c @@ -15,14 +15,26 @@ *------------------------------------------------------------------------- */ #include "postgres.h" +#include "nodes/pg_list.h" #include "pg_stat_monitor.h" + static pgssSharedState *pgss; static HTAB *pgss_hash; -static HTAB *pgss_query_hash; static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size); +/* + * Copy query from src_buffer to dst_buff. + * Use query_id and query_pos to fast locate query in source buffer. + * Store updated query position in the destination buffer into param query_pos. + */ +static bool copy_query(uint64 bucket_id, + uint64 query_id, + uint64 query_pos, + unsigned char *dst_buf, + unsigned char *src_buf, + size_t *new_query_pos); static HTAB* hash_init(const char *hash_name, int key_size, int entry_size, int hash_size) @@ -44,7 +56,6 @@ pgss_startup(void) pgss = NULL; pgss_hash = NULL; - pgss_query_hash = NULL; /* * Create or attach to the shared memory state, including hash table @@ -60,6 +71,10 @@ pgss_startup(void) ResetSharedState(pgss); } +#ifdef BENCHMARK + init_hook_stats(); +#endif + pgss->query_buf_size_bucket = MAX_QUERY_BUF / PGSM_MAX_BUCKETS; for (i = 0; i < PGSM_MAX_BUCKETS; i++) @@ -70,7 +85,6 @@ pgss_startup(void) } pgss_hash = hash_init("pg_stat_monitor: bucket hashtable", sizeof(pgssHashKey), sizeof(pgssEntry), MAX_BUCKET_ENTRIES); - pgss_query_hash = hash_init("pg_stat_monitor: query hashtable", sizeof(pgssQueryHashKey), sizeof(pgssQueryEntry),MAX_BUCKET_ENTRIES); LWLockRelease(AddinShmemInitLock); @@ -93,12 +107,6 @@ pgsm_get_hash(void) return pgss_hash; } -HTAB* -pgsm_get_query_hash(void) -{ - return pgss_query_hash; -} - /* * shmem_shutdown hook: Dump statistics into file. * @@ -132,7 +140,7 @@ hash_memsize(void) } pgssEntry * -hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key,int encoding) +hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding) { pgssEntry *entry = NULL; bool found = false; @@ -143,10 +151,10 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key,int encoding) return NULL; } /* Find or create an entry with desired hash code */ - entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found); + entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER_NULL, &found); if (!found) { - pgss->bucket_entry[pgss->current_wbucket]++; + pgss->bucket_entry[pg_atomic_read_u64(&pgss->current_wbucket)]++; /* New entry, initialize it */ /* reset the statistics */ memset(&entry->counters, 0, sizeof(Counters)); @@ -160,89 +168,119 @@ hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key,int encoding) elog(DEBUG1, "%s", "pg_stat_monitor: out of memory"); return entry; } + /* - * Reset all the entries. + * Prepare resources for using the new bucket: + * - Deallocate finished hash table entries in new_bucket_id (entries whose + * state is PGSS_FINISHED or PGSS_FINISHED). + * - Clear query buffer for new_bucket_id. + * - If old_bucket_id != -1, move all pending hash table entries in + * old_bucket_id to the new bucket id, also move pending queries from the + * previous query buffer (query_buffer[old_bucket_id]) to the new one + * (query_buffer[new_bucket_id]). * * Caller must hold an exclusive lock on pgss->lock. */ void -hash_query_entryies_reset() -{ - HASH_SEQ_STATUS hash_seq; - pgssQueryEntry *entry; - - hash_seq_init(&hash_seq, pgss_query_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) - entry = hash_search(pgss_query_hash, &entry->key, HASH_REMOVE, NULL); -} - - -/* - * Deallocate finished entries. - * - * Caller must hold an exclusive lock on pgss->lock. - */ -void -hash_query_entry_dealloc(int bucket, unsigned char *buf) -{ - HASH_SEQ_STATUS hash_seq; - pgssQueryEntry *entry; - unsigned char *old_buf; - pgssSharedState *pgss = pgsm_get_ss(); - - old_buf = palloc0(pgss->query_buf_size_bucket); - memcpy(old_buf, buf, pgss->query_buf_size_bucket); - - memset(buf, 0, pgss->query_buf_size_bucket); - - hash_seq_init(&hash_seq, pgss_query_hash); - while ((entry = hash_seq_search(&hash_seq)) != NULL) - { - if (entry->key.bucket_id == bucket) - { - if (entry->state == PGSS_FINISHED || entry->state == PGSS_ERROR) - { - entry = hash_search(pgss_query_hash, &entry->key, HASH_REMOVE, NULL); - } - else - { - int len; - char query_txt[1024]; - if (read_query(old_buf, entry->key.bucket_id, entry->key.queryid, query_txt) == 0) - { - len = read_query_buffer(entry->key.bucket_id, entry->key.queryid, query_txt); - if (len != MAX_QUERY_BUFFER_BUCKET) - snprintf(query_txt, 32, "%s", ""); - } - SaveQueryText(entry->key.bucket_id, entry->key.queryid, buf, query_txt, strlen(query_txt)); - } - } - } - pfree(old_buf); -} - -/* - * Deallocate least-used entries. - * - * Caller must hold an exclusive lock on pgss->lock. - */ -bool -hash_entry_dealloc(int bucket) +hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]) { HASH_SEQ_STATUS hash_seq; pgssEntry *entry = NULL; + pgssSharedState *pgss = pgsm_get_ss(); + /* Store pending query ids from the previous bucket. */ + List *pending_entries = NIL; + ListCell *pending_entry; + + if (new_bucket_id != -1) + { + /* Clear all queries in the query buffer for the new bucket. */ + memset(query_buffer[new_bucket_id], 0, pgss->query_buf_size_bucket); + } + + /* Iterate over the hash table. */ hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { - if (bucket < 0 || - (entry->key.bucket_id == bucket && + /* + * Remove all entries if new_bucket_id == -1. + * Otherwise remove entry in new_bucket_id if it has finished already. + */ + if (new_bucket_id < 0 || + (entry->key.bucket_id == new_bucket_id && (entry->counters.state == PGSS_FINISHED || entry->counters.state == PGSS_ERROR))) { entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); } + + /* + * If we detect a pending query residing in the previous bucket id, + * we add it to a list of pending elements to be moved to the new + * bucket id. + * Can't update the hash table while iterating it inside this loop, + * as this may introduce all sort of problems. + */ + if (old_bucket_id != -1 && entry->key.bucket_id == old_bucket_id) + { + if (entry->counters.state == PGSS_PARSE || + entry->counters.state == PGSS_PLAN || + entry->counters.state == PGSS_EXEC) + { + pgssEntry *bkp_entry = malloc(sizeof(pgssEntry)); + if (!bkp_entry) + { + /* No memory, remove pending query entry from the previous bucket. */ + elog(ERROR, "hash_entry_dealloc: out of memory"); + entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); + continue; + } + + /* Save key/data from the previous entry. */ + memcpy(bkp_entry, entry, sizeof(pgssEntry)); + + /* Update key to use the new bucket id. */ + bkp_entry->key.bucket_id = new_bucket_id; + + /* Add the entry to a list of nodes to be processed later. */ + pending_entries = lappend(pending_entries, bkp_entry); + + /* Finally remove the pending query from the expired bucket id. */ + entry = hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); + } + } } - return true; + + /* + * Iterate over the list of pending queries in order + * to add them back to the hash table with the updated bucket id. + */ + foreach (pending_entry, pending_entries) { + bool found = false; + pgssEntry *new_entry; + pgssEntry *old_entry = (pgssEntry *) lfirst(pending_entry); + + new_entry = (pgssEntry *) hash_search(pgss_hash, &old_entry->key, HASH_ENTER_NULL, &found); + if (new_entry == NULL) + elog(DEBUG1, "%s", "pg_stat_monitor: out of memory"); + else if (!found) + { + /* Restore counters and other data. */ + new_entry->counters = old_entry->counters; + SpinLockInit(&new_entry->mutex); + new_entry->encoding = old_entry->encoding; + /* copy query's text from previous bucket to the new one. */ + copy_query(new_bucket_id, + new_entry->key.queryid, /* query id */ + old_entry->query_pos, /* query position in buffer */ + query_buffer[new_bucket_id], /* destination query buffer */ + query_buffer[old_bucket_id], /* source query buffer */ + &new_entry->query_pos); /* position in which query was inserted into destination buffer */ + } + + free(old_entry); + } + + list_free(pending_entries); } /* @@ -262,49 +300,10 @@ hash_entry_reset() { hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL); } - pgss->current_wbucket = 0; + pg_atomic_write_u64(&pgss->current_wbucket, 0); LWLockRelease(pgss->lock); } -/* Caller must accuire lock */ -pgssQueryEntry* -hash_create_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid) -{ - pgssQueryHashKey key; - pgssQueryEntry *entry; - bool found; - - key.queryid = queryid; - key.bucket_id = bucket_id; - key.dbid = dbid; - key.userid = userid; - key.ip = ip; - key.appid = appid; - - entry = (pgssQueryEntry *) hash_search(pgss_query_hash, &key, HASH_ENTER, &found); - return entry; -} - -/* Caller must accuire lock */ -pgssQueryEntry* -hash_find_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid) -{ - pgssQueryHashKey key; - pgssQueryEntry *entry; - bool found; - - key.queryid = queryid; - key.bucket_id = bucket_id; - key.dbid = dbid; - key.userid = userid; - key.ip = ip; - key.appid = appid; - - /* Lookup the hash table entry with shared lock. */ - entry = (pgssQueryEntry *) hash_search(pgss_query_hash, &key, HASH_FIND, &found); - return entry; -} - bool IsHashInitialize(void) { @@ -312,3 +311,38 @@ IsHashInitialize(void) pgss_hash != NULL); } +static bool copy_query(uint64 bucket_id, + uint64 query_id, + uint64 query_pos, + unsigned char *dst_buf, + unsigned char *src_buf, + size_t *new_query_pos) +{ + uint64 query_len = 0; + uint64 buf_len = 0; + + memcpy(&buf_len, src_buf, sizeof (uint64)); + if (buf_len <= 0) + return false; + + /* Try to locate the query directly. */ + if (query_pos != 0 && (query_pos + sizeof(uint64) + sizeof(uint64)) < buf_len) + { + if (*(uint64 *)&src_buf[query_pos] != query_id) + return false; + + query_pos += sizeof(uint64); + + memcpy(&query_len, &src_buf[query_pos], sizeof(uint64)); /* query len */ + query_pos += sizeof(uint64); + + if (query_pos + query_len > buf_len) /* avoid reading past buffer's length. */ + return false; + + return SaveQueryText(bucket_id, query_id, dst_buf, + (const char *)&src_buf[query_pos], + query_len, new_query_pos); + } + + return false; +} diff --git a/pg_stat_monitor--1.0.sql b/pg_stat_monitor--1.0.sql index 73153b5..67cb3c2 100644 --- a/pg_stat_monitor--1.0.sql +++ b/pg_stat_monitor--1.0.sql @@ -36,6 +36,7 @@ CREATE FUNCTION pg_stat_monitor_internal(IN showtext boolean, OUT query_plan text, OUT state_code int8, OUT top_queryid text, + OUT top_query text, OUT application_name text, OUT relations text, -- 11 @@ -148,7 +149,7 @@ CREATE VIEW pg_stat_monitor AS SELECT comments, planid, query_plan, - (SELECT query from pg_stat_monitor_internal(true) s where s.queryid = p.top_queryid) AS top_query, + top_query, application_name, string_to_array(relations, ',') AS relations, cmd_type, @@ -157,17 +158,17 @@ CREATE VIEW pg_stat_monitor AS SELECT sqlcode, message, calls, - round( CAST(total_time as numeric), 4)::float8 as total_time, - round( CAST(min_time as numeric), 4)::float8 as min_time, - round( CAST(max_time as numeric), 4)::float8 as max_time, - round( CAST(mean_time as numeric), 4)::float8 as mean_time, - round( CAST(stddev_time as numeric), 4)::float8 as stddev_time, + total_time, + min_time, + max_time, + mean_time, + stddev_time, rows_retrieved, plans_calls, - round( CAST(plan_total_time as numeric), 4)::float8 as plan_total_time, - round( CAST(plan_min_time as numeric), 4)::float8 as plan_min_time, - round( CAST(plan_max_time as numeric), 4)::float8 as plan_max_time, - round( CAST(plan_mean_time as numeric), 4)::float8 as plan_mean_time, + plan_total_time, + plan_min_time, + plan_max_time, + plan_mean_time, shared_blks_hit, shared_blks_read, @@ -182,8 +183,8 @@ CREATE VIEW pg_stat_monitor AS SELECT blk_read_time, blk_write_time, (string_to_array(resp_calls, ',')) resp_calls, - round(cpu_user_time::numeric, 4) as cpu_user_time, - round(cpu_sys_time::numeric, 4) as cpu_sys_time, + cpu_user_time, + cpu_sys_time, wal_records, wal_fpi, wal_bytes, @@ -227,6 +228,27 @@ end loop; END $$ language plpgsql; +CREATE FUNCTION pg_stat_monitor_hook_stats( + OUT hook text, + OUT min_time float8, + OUT max_time float8, + OUT total_time float8, + OUT ncalls int8 +) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pg_stat_monitor_hook_stats' +LANGUAGE C STRICT VOLATILE PARALLEL SAFE; + +CREATE VIEW pg_stat_monitor_hook_stats AS SELECT + hook, + min_time, + max_time, + total_time, + total_time / greatest(ncalls, 1) as avg_time, + ncalls, + ROUND(CAST(total_time / greatest(sum(total_time) OVER(), 0.00000001) * 100 as numeric), 2)::text || '%' as load_comparison +FROM pg_stat_monitor_hook_stats(); + GRANT SELECT ON pg_stat_monitor TO PUBLIC; GRANT SELECT ON pg_stat_monitor_settings TO PUBLIC; -- Don't want this to be available to non-superusers. diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index 6f651a9..5fb38eb 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -14,38 +14,35 @@ * *------------------------------------------------------------------------- */ + #include "postgres.h" +#include "access/parallel.h" #include +#ifdef BENCHMARK +#include /* clock() */ +#endif #include "commands/explain.h" #include "pg_stat_monitor.h" PG_MODULE_MAGIC; -#define BUILD_VERSION "devel" -#define PG_STAT_STATEMENTS_COLS 52 /* maximum of above */ +#define BUILD_VERSION "1.0.0-beta-2" +#define PG_STAT_STATEMENTS_COLS 53 /* maximum of above */ #define PGSM_TEXT_FILE "/tmp/pg_stat_monitor_query" +#define roundf(x,d) ((floor(((x)*pow(10,d))+.5))/pow(10,d)) + #define PGUNSIXBIT(val) (((val) & 0x3F) + '0') #define _snprintf(_str_dst, _str_src, _len, _max_len)\ -do \ -{ \ - int i; \ - for(i = 0; i < _len && i < _max_len; i++) \ - {\ - _str_dst[i] = _str_src[i]; \ - }\ -}while(0) + memcpy((void *)_str_dst, _str_src, _len < _max_len ? _len : _max_len) #define _snprintf2(_str_dst, _str_src, _len1, _len2)\ do \ { \ - int i,j; \ + int i; \ for(i = 0; i < _len1; i++) \ - for(j = 0; j < _len2; j++) \ - { \ - _str_dst[i][j] = _str_src[i][j]; \ - } \ + strlcpy((char *)_str_dst[i], _str_src[i], _len2); \ }while(0) /*---- Initicalization Function Declarations ----*/ @@ -63,14 +60,20 @@ static int plan_nested_level = 0; /* The array to store outer layer query id*/ uint64 *nested_queryids; -FILE *qfile; +/* Regex object used to extract query comments. */ +static regex_t preg_query_comments; +static char relations[REL_LST][REL_LEN]; +static int num_relations; /* Number of relation in the query */ static bool system_init = false; static struct rusage rusage_start; static struct rusage rusage_end; static unsigned char *pgss_qbuf[MAX_BUCKETS]; static char *pgss_explain(QueryDesc *queryDesc); +#ifdef BENCHMARK +static struct pg_hook_stats_t *pg_hook_stats; +#endif -static char *extract_query_comments(const char *query); +static void extract_query_comments(const char *query, char *comments, size_t max_len); static int get_histogram_bucket(double q_time); static bool IsSystemInitialized(void); static void dump_queries_buffer(int bucket_id, unsigned char *buf, int buf_len); @@ -89,17 +92,16 @@ static ExecutorFinish_hook_type prev_ExecutorFinish = NULL; static ExecutorEnd_hook_type prev_ExecutorEnd = NULL; static ProcessUtility_hook_type prev_ProcessUtility = NULL; static emit_log_hook_type prev_emit_log_hook = NULL; -void pgsm_emit_log_hook(ErrorData *edata); +DECLARE_HOOK(void pgsm_emit_log_hook, ErrorData *edata); static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static ExecutorCheckPerms_hook_type prev_ExecutorCheckPerms_hook = NULL; PG_FUNCTION_INFO_V1(pg_stat_monitor_version); PG_FUNCTION_INFO_V1(pg_stat_monitor_reset); -PG_FUNCTION_INFO_V1(pg_stat_monitor_1_2); -PG_FUNCTION_INFO_V1(pg_stat_monitor_1_3); PG_FUNCTION_INFO_V1(pg_stat_monitor); PG_FUNCTION_INFO_V1(pg_stat_monitor_settings); PG_FUNCTION_INFO_V1(get_histogram_timings); +PG_FUNCTION_INFO_V1(pg_stat_monitor_hook_stats); static uint pg_get_client_addr(void); static int pg_get_application_name(char* application_name); @@ -108,35 +110,35 @@ static Datum intarray_get_datum(int32 arr[], int len); #if PG_VERSION_NUM < 140000 -static void pgss_post_parse_analyze(ParseState *pstate, Query *query); +DECLARE_HOOK(void pgss_post_parse_analyze, ParseState *pstate, Query *query); #else -static void pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate); +DECLARE_HOOK(void pgss_post_parse_analyze, ParseState *pstate, Query *query, JumbleState *jstate); #endif -static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags); -static void pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count, bool execute_once); -static void pgss_ExecutorFinish(QueryDesc *queryDesc); -static void pgss_ExecutorEnd(QueryDesc *queryDesc); -static bool pgss_ExecutorCheckPerms(List *rt, bool abort); +DECLARE_HOOK(void pgss_ExecutorStart, QueryDesc *queryDesc, int eflags); +DECLARE_HOOK(void pgss_ExecutorRun, QueryDesc *queryDesc, ScanDirection direction, uint64 count, bool execute_once); +DECLARE_HOOK(void pgss_ExecutorFinish, QueryDesc *queryDesc); +DECLARE_HOOK(void pgss_ExecutorEnd, QueryDesc *queryDesc); +DECLARE_HOOK(bool pgss_ExecutorCheckPerms, List *rt, bool abort); #if PG_VERSION_NUM >= 140000 -static PlannedStmt * pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams); -static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, +DECLARE_HOOK(PlannedStmt * pgss_planner_hook, Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams); +DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *qc); #elif PG_VERSION_NUM >= 130000 -static PlannedStmt * pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams); -static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, +DECLARE_HOOK(PlannedStmt * pgss_planner_hook, Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams); +DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, QueryCompletion *qc); #else static void BufferUsageAccumDiff(BufferUsage* bufusage, BufferUsage* pgBufferUsage, BufferUsage* bufusage_start); -static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, +DECLARE_HOOK(void pgss_ProcessUtility, PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, DestReceiver *dest, @@ -147,15 +149,6 @@ static uint64 pgss_hash_string(const char *str, int len); char *unpack_sql_state(int sql_state); static void pgss_store_error(uint64 queryid, const char * query, ErrorData *edata); -static pgssQueryEntry *pgss_store_query_info(uint64 bucketid, - uint64 queryid, - uint64 dbid, - uint64 userid, - uint64 ip, - uint64 appid, - const char *query, - uint64 query_len, - pgssStoreKind kind); static void pgss_store_utility(const char *query, double total_time, @@ -221,7 +214,8 @@ static uint64 djb2_hash(unsigned char *str, size_t len); void _PG_init(void) { - int i; + int i, rc; + elog(DEBUG2, "pg_stat_monitor: %s()", __FUNCTION__); /* * In order to create our shared memory area, we have to be loaded via @@ -254,12 +248,21 @@ _PG_init(void) EmitWarningsOnPlaceholders("pg_stat_monitor"); + /* + * Compile regular expression for extracting out query comments only once. + */ + rc = regcomp(&preg_query_comments, "/\\*([^*]|[\r\n]|(\\*+([^*/]|[\r\n])))*\\*+/", REG_EXTENDED); + if (rc != 0) + { + elog(ERROR, "pg_stat_monitor: query comments regcomp() failed, return code=(%d)\n", rc); + } + /* * Request additional shared resources. (These are no-ops if we're not in * the postmaster process.) We'll allocate or attach to the shared * resources in pgss_shmem_startup(). */ - RequestAddinShmemSpace(hash_memsize()); + RequestAddinShmemSpace(hash_memsize() + HOOK_STATS_SIZE); RequestNamedLWLockTranche("pg_stat_monitor", 1); /* @@ -268,24 +271,25 @@ _PG_init(void) prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = pgss_shmem_startup; prev_post_parse_analyze_hook = post_parse_analyze_hook; - post_parse_analyze_hook = pgss_post_parse_analyze; + post_parse_analyze_hook = HOOK(pgss_post_parse_analyze); prev_ExecutorStart = ExecutorStart_hook; - ExecutorStart_hook = pgss_ExecutorStart; + ExecutorStart_hook = HOOK(pgss_ExecutorStart); prev_ExecutorRun = ExecutorRun_hook; - ExecutorRun_hook = pgss_ExecutorRun; + ExecutorRun_hook = HOOK(pgss_ExecutorRun); prev_ExecutorFinish = ExecutorFinish_hook; - ExecutorFinish_hook = pgss_ExecutorFinish; + ExecutorFinish_hook = HOOK(pgss_ExecutorFinish); prev_ExecutorEnd = ExecutorEnd_hook; - ExecutorEnd_hook = pgss_ExecutorEnd; + ExecutorEnd_hook = HOOK(pgss_ExecutorEnd); prev_ProcessUtility = ProcessUtility_hook; - ProcessUtility_hook = pgss_ProcessUtility; + ProcessUtility_hook = HOOK(pgss_ProcessUtility); #if PG_VERSION_NUM >= 130000 planner_hook_next = planner_hook; - planner_hook = pgss_planner_hook; + planner_hook = HOOK(pgss_planner_hook); #endif - emit_log_hook = pgsm_emit_log_hook; + prev_emit_log_hook = emit_log_hook; + emit_log_hook = HOOK(pgsm_emit_log_hook); prev_ExecutorCheckPerms_hook = ExecutorCheckPerms_hook; - ExecutorCheckPerms_hook = pgss_ExecutorCheckPerms; + ExecutorCheckPerms_hook = HOOK(pgss_ExecutorCheckPerms); nested_queryids = (uint64*) malloc(sizeof(uint64) * max_stack_depth); @@ -307,8 +311,10 @@ _PG_fini(void) ExecutorFinish_hook = prev_ExecutorFinish; ExecutorEnd_hook = prev_ExecutorEnd; ProcessUtility_hook = prev_ProcessUtility; + emit_log_hook = prev_emit_log_hook; free(nested_queryids); + regfree(&preg_query_comments); hash_entry_reset(); } @@ -338,6 +344,16 @@ pg_stat_monitor_version(PG_FUNCTION_ARGS) } #if PG_VERSION_NUM >= 140000 +#ifdef BENCHMARK +static void +pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query, JumbleState *jstate) +{ + double start_time = (double)clock(); + pgss_post_parse_analyze(pstate, query, jstate); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_POST_PARSE_ANALYZE, elapsed); +} +#endif /* * Post-parse-analysis hook: mark query with a queryId */ @@ -353,6 +369,9 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) if (!IsSystemInitialized()) return; + if (IsParallelWorker()) + return; + /* * Clear queryId for prepared statements related utility, as those will * inherit from the underlying statement's one (except DEALLOCATE which is @@ -383,6 +402,16 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate) } #else +#ifdef BENCHMARK +static void +pgss_post_parse_analyze_benchmark(ParseState *pstate, Query *query) +{ + double start_time = (double)clock(); + pgss_post_parse_analyze(pstate, query); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_POST_PARSE_ANALYZE, elapsed); +} +#endif /* * Post-parse-analysis hook: mark query with a queryId */ @@ -399,6 +428,9 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query) if (!IsSystemInitialized()) return; + if (IsParallelWorker()) + return; + /* * Utility statements get queryId zero. We do this even in cases where * the statement contains an optimizable statement for which a queryId @@ -435,6 +467,16 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query) } #endif +#ifdef BENCHMARK +static void +pgss_ExecutorStart_benchmark(QueryDesc *queryDesc, int eflags) +{ + double start_time = (double)clock(); + pgss_ExecutorStart(queryDesc, eflags); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_EXECUTORSTART, elapsed); +} +#endif /* * ExecutorStart hook: start up tracking if needed */ @@ -451,6 +493,9 @@ pgss_ExecutorStart(QueryDesc *queryDesc, int eflags) else standard_ExecutorStart(queryDesc, eflags); + if (IsParallelWorker()) + return; + /* * If query has queryId zero, don't track it. This prevents double * counting of optimizable statements that are directly contained in @@ -494,6 +539,18 @@ pgss_ExecutorStart(QueryDesc *queryDesc, int eflags) } } +#ifdef BENCHMARK +static void +pgss_ExecutorRun_benchmark(QueryDesc *queryDesc, ScanDirection direction, uint64 count, + bool execute_once) +{ + double start_time = (double)clock(); + pgss_ExecutorRun(queryDesc, direction, count, execute_once); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_EXECUTORUN, elapsed); +} +#endif + /* * ExecutorRun hook: all we need do is track nesting depth */ @@ -524,6 +581,17 @@ pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count, PG_END_TRY(); } +#ifdef BENCHMARK +static void +pgss_ExecutorFinish_benchmark(QueryDesc *queryDesc) +{ + double start_time = (double)clock(); + pgss_ExecutorFinish(queryDesc); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_EXECUTORFINISH, elapsed); +} +#endif + /* * ExecutorFinish hook: all we need do is track nesting depth */ @@ -567,6 +635,17 @@ pgss_explain(QueryDesc *queryDesc) return es->str->data; } +#ifdef BENCHMARK +static void +pgss_ExecutorEnd_benchmark(QueryDesc *queryDesc) +{ + double start_time = (double)clock(); + pgss_ExecutorEnd(queryDesc); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_EXECUTOREND, elapsed); +} +#endif + /* * ExecutorEnd hook: store results if needed */ @@ -574,7 +653,6 @@ static void pgss_ExecutorEnd(QueryDesc *queryDesc) { uint64 queryId = queryDesc->plannedstmt->queryId; - pgssSharedState *pgss = pgsm_get_ss(); SysInfo sys_info; PlanInfo plan_info; @@ -588,7 +666,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) MemoryContextSwitchTo(mct); } - if (queryId != UINT64CONST(0) && queryDesc->totaltime) + if (queryId != UINT64CONST(0) && queryDesc->totaltime && !IsParallelWorker()) { /* * Make sure stats accumulation is done. (Note: it's okay if several @@ -622,20 +700,31 @@ pgss_ExecutorEnd(QueryDesc *queryDesc) prev_ExecutorEnd(queryDesc); else standard_ExecutorEnd(queryDesc); - pgss->num_relations = 0; + num_relations = 0; } +#ifdef BENCHMARK +static bool +pgss_ExecutorCheckPerms_benchmark(List *rt, bool abort) +{ + bool ret; + double start_time = (double)clock(); + ret = pgss_ExecutorCheckPerms(rt, abort); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_EXECUTORCHECKPERMS, elapsed); + return ret; +} +#endif + static bool pgss_ExecutorCheckPerms(List *rt, bool abort) { ListCell *lr = NULL; - pgssSharedState *pgss = pgsm_get_ss(); int i = 0; int j = 0; Oid list_oid[20]; - LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - pgss->num_relations = 0; + num_relations = 0; foreach(lr, rt) { @@ -660,14 +749,13 @@ pgss_ExecutorCheckPerms(List *rt, bool abort) namespace_name = get_namespace_name(get_rel_namespace(rte->relid)); relation_name = get_rel_name(rte->relid); if (rte->relkind == 'v') - snprintf(pgss->relations[i++], REL_LEN, "%s.%s*", namespace_name, relation_name); + snprintf(relations[i++], REL_LEN, "%s.%s*", namespace_name, relation_name); else - snprintf(pgss->relations[i++], REL_LEN, "%s.%s", namespace_name, relation_name); + snprintf(relations[i++], REL_LEN, "%s.%s", namespace_name, relation_name); } } } - pgss->num_relations = i; - LWLockRelease(pgss->lock); + num_relations = i; if (prev_ExecutorCheckPerms_hook) return prev_ExecutorCheckPerms_hook(rt, abort); @@ -676,12 +764,24 @@ pgss_ExecutorCheckPerms(List *rt, bool abort) } #if PG_VERSION_NUM >= 130000 +#ifdef BENCHMARK +static PlannedStmt* +pgss_planner_hook_benchmark(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams) +{ + PlannedStmt *ret; + double start_time = (double)clock(); + ret = pgss_planner_hook(parse, query_string, cursorOptions, boundParams); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_PLANNER_HOOK, elapsed); + return ret; +} +#endif static PlannedStmt* pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams) { PlannedStmt *result; - if (PGSM_TRACK_PLANNING && query_string && parse->queryId != UINT64CONST(0)) + if (PGSM_TRACK_PLANNING && query_string && parse->queryId != UINT64CONST(0) && !IsParallelWorker()) { PlanInfo plan_info; instr_time start; @@ -769,6 +869,21 @@ pgss_planner_hook(Query *parse, const char *query_string, int cursorOptions, Par * ProcessUtility hook */ #if PG_VERSION_NUM >= 140000 +#ifdef BENCHMARK +static void +pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc) +{ + double start_time = (double)clock(); + pgss_ProcessUtility(pstmt, queryString, readOnlyTree, context, params, queryEnv, dest, qc); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed); +} +#endif static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, ProcessUtilityContext context, @@ -777,6 +892,20 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, QueryCompletion *qc) #elif PG_VERSION_NUM >= 130000 +#ifdef BENCHMARK +static void +pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc) +{ + double start_time = (double)clock(); + pgss_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, qc); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed); +} +#endif static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, @@ -784,6 +913,20 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, QueryCompletion *qc) #else +#ifdef BENCHMARK +static void +pgss_ProcessUtility_benchmark(PlannedStmt *pstmt, const char *queryString, + ProcessUtilityContext context, ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + char *completionTag) +{ + double start_time = (double)clock(); + pgss_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSS_PROCESSUTILITY, elapsed); +} +#endif static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, QueryEnvironment *queryEnv, @@ -810,7 +953,7 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString, if (PGSM_TRACK_UTILITY && !IsA(parsetree, ExecuteStmt) && !IsA(parsetree, PrepareStmt) && - !IsA(parsetree, DeallocateStmt)) + !IsA(parsetree, DeallocateStmt) && !IsParallelWorker()) { instr_time start; instr_time duration; @@ -1045,7 +1188,6 @@ pgss_update_entry(pgssEntry *entry, int index; char application_name[APPLICATIONNAME_LEN]; int application_name_len = pg_get_application_name(application_name); - pgssSharedState *pgss = pgsm_get_ss(); double old_mean; int message_len = error_info ? strlen (error_info->message) : 0; int comments_len = comments ? strlen (comments) : 0; @@ -1061,7 +1203,8 @@ pgss_update_entry(pgssEntry *entry, if (reset) memset(&entry->counters, 0, sizeof(Counters)); - _snprintf(e->counters.info.comments, comments, comments_len, COMMENTS_LEN); + if (comments_len > 0) + _snprintf(e->counters.info.comments, comments, comments_len + 1, COMMENTS_LEN); e->counters.state = kind; if (kind == PGSS_PLAN) { @@ -1113,11 +1256,14 @@ pgss_update_entry(pgssEntry *entry, e->counters.resp_calls[index]++; } - _snprintf(e->counters.planinfo.plan_text, plan_info->plan_text, plan_text_len, PLAN_TEXT_LEN); - _snprintf(e->counters.info.application_name, application_name, application_name_len, APPLICATIONNAME_LEN); + if (plan_text_len > 0) + _snprintf(e->counters.planinfo.plan_text, plan_info->plan_text, plan_text_len + 1, PLAN_TEXT_LEN); - e->counters.info.num_relations = pgss->num_relations; - _snprintf2(e->counters.info.relations, pgss->relations, pgss->num_relations, REL_LEN); + if (application_name_len > 0) + _snprintf(e->counters.info.application_name, application_name, application_name_len + 1, APPLICATIONNAME_LEN); + + e->counters.info.num_relations = num_relations; + _snprintf2(e->counters.info.relations, relations, num_relations, REL_LEN); e->counters.info.cmd_type = cmd_type; @@ -1170,40 +1316,6 @@ pgss_update_entry(pgssEntry *entry, } } -static pgssEntry* -pgss_get_entry(uint64 bucket_id, - uint64 userid, - uint64 dbid, - uint64 queryid, - uint64 ip, - uint64 planid, - uint64 appid) -{ - pgssEntry *entry; - pgssHashKey key; - HTAB *pgss_hash = pgsm_get_hash(); - pgssSharedState *pgss = pgsm_get_ss(); - - key.bucket_id = bucket_id; - key.userid = userid; - key.dbid = MyDatabaseId; - key.queryid = queryid; - key.ip = pg_get_client_addr(); - key.planid = planid; - key.appid = appid; - - entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL); - if(!entry) - { - /* OK to create a new hashtable entry */ - entry = hash_entry_alloc(pgss, &key, GetDatabaseEncoding()); - if (entry == NULL) - return NULL; - } - Assert(entry); - return entry; -} - static void pgss_store_query(uint64 queryid, const char * query, @@ -1342,90 +1454,114 @@ pgss_store(uint64 queryid, JumbleState *jstate, pgssStoreKind kind) { - pgssEntry *entry; + HTAB *pgss_hash; + pgssHashKey key; + pgssEntry *entry; pgssSharedState *pgss = pgsm_get_ss(); - char application_name[APPLICATIONNAME_LEN]; - int application_name_len = pg_get_application_name(application_name); - bool reset = false; - uint64 bucketid; - uint64 userid = GetUserId(); - uint64 dbid = MyDatabaseId; - uint64 ip = pg_get_client_addr(); - uint64 planid = plan_info ? plan_info->planid: 0; - uint64 appid = djb2_hash((unsigned char *)application_name, application_name_len); - char *comments; + char application_name[APPLICATIONNAME_LEN]; + int application_name_len; + bool reset = false; + uint64 bucketid; + uint64 prev_bucket_id; + uint64 userid; + uint64 planid; + uint64 appid; + char comments[512] = ""; + size_t query_len; + /* Monitoring is disabled */ if (!PGSM_ENABLED) return; - Assert(query != NULL); - - comments = extract_query_comments(query); - /* Safety check... */ - if (!IsSystemInitialized() || !pgss_qbuf[pgss->current_wbucket]) + if (!IsSystemInitialized() || !pgss_qbuf[pg_atomic_read_u64(&pgss->current_wbucket)]) return; + Assert(query != NULL); + userid = GetUserId(); + + application_name_len = pg_get_application_name(application_name); + planid = plan_info ? plan_info->planid: 0; + appid = djb2_hash((unsigned char *)application_name, application_name_len); + + extract_query_comments(query, comments, sizeof(comments)); + + prev_bucket_id = pg_atomic_read_u64(&pgss->current_wbucket); bucketid = get_next_wbucket(pgss); - if (bucketid != pgss->current_wbucket) - { + + if (bucketid != prev_bucket_id) reset = true; - pgss->current_wbucket = bucketid; - } - LWLockAcquire(pgss->lock, LW_EXCLUSIVE); + key.bucket_id = bucketid; + key.userid = userid; + key.dbid = MyDatabaseId; + key.queryid = queryid; + key.ip = pg_get_client_addr(); + key.planid = planid; + key.appid = appid; - switch (kind) + pgss_hash = pgsm_get_hash(); + + LWLockAcquire(pgss->lock, LW_SHARED); + + entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL); + if (!entry) { - case PGSS_PARSE: - case PGSS_PLAN: - { - pgssQueryEntry *query_entry; - query_entry = pgss_store_query_info(bucketid, queryid, dbid, userid, ip, appid, query, strlen(query), kind); - if (query_entry == NULL) - elog(DEBUG1, "pg_stat_monitor: out of memory"); - break; - } - case PGSS_ERROR: - case PGSS_EXEC: - case PGSS_FINISHED: - { - pgssQueryEntry *query_entry; - query_entry = pgss_store_query_info(bucketid, queryid, dbid, userid, ip, appid, query, strlen(query), kind); - if (query_entry == NULL) - { - elog(DEBUG1, "pg_stat_monitor: out of memory"); - break; - } - entry = pgss_get_entry(bucketid, userid, dbid, queryid, ip, planid, appid); - if (entry == NULL) - { - elog(DEBUG1, "pg_stat_monitor: out of memory"); - break; - } + uint64 prev_qbuf_len; + /* position in which the query's text was inserted into the query buffer. */ + size_t qpos = 0; - if (jstate == NULL) - pgss_update_entry(entry, /* entry */ - bucketid, /* bucketid */ - queryid, /* queryid */ - query, /* query */ - comments, /* comments */ - plan_info, /* PlanInfo */ - cmd_type, /* CmdType */ - sys_info, /* SysInfo */ - error_info, /* ErrorInfo */ - total_time, /* total_time */ - rows, /* rows */ - bufusage, /* bufusage */ - walusage, /* walusage */ - reset, /* reset */ - kind); /* kind */ + query_len = strlen(query); + if (query_len > PGSM_QUERY_MAX_LEN) + query_len = PGSM_QUERY_MAX_LEN; + + /* Need exclusive lock to make a new hashtable entry - promote */ + LWLockRelease(pgss->lock); + LWLockAcquire(pgss->lock, LW_EXCLUSIVE); + + /* + * Save current query buffer length, if we fail to add a new + * new entry to the hash table then we must restore the + * original length. + */ + memcpy(&prev_qbuf_len, pgss_qbuf[bucketid], sizeof(prev_qbuf_len)); + if (!SaveQueryText(bucketid, queryid, pgss_qbuf[bucketid], query, query_len, &qpos)) + { + LWLockRelease(pgss->lock); + elog(DEBUG1, "pg_stat_monitor: insufficient shared space for query."); + return; } - break; - case PGSS_NUMKIND: - case PGSS_INVALID: - break; + + /* OK to create a new hashtable entry */ + entry = hash_entry_alloc(pgss, &key, GetDatabaseEncoding()); + if (entry == NULL) + { + /* Restore previous query buffer length. */ + memcpy(pgss_qbuf[bucketid], &prev_qbuf_len, sizeof(prev_qbuf_len)); + LWLockRelease(pgss->lock); + elog(DEBUG1, "pg_stat_monitor: out of memory"); + return; + } + entry->query_pos = qpos; } + + if (jstate == NULL) + pgss_update_entry(entry, /* entry */ + bucketid, /* bucketid */ + queryid, /* queryid */ + query, /* query */ + comments, /* comments */ + plan_info, /* PlanInfo */ + cmd_type, /* CmdType */ + sys_info, /* SysInfo */ + error_info, /* ErrorInfo */ + total_time, /* total_time */ + rows, /* rows */ + bufusage, /* bufusage */ + walusage, /* walusage */ + reset, /* reset */ + kind); /* kind */ + LWLockRelease(pgss->lock); } /* @@ -1441,8 +1577,20 @@ pg_stat_monitor_reset(PG_FUNCTION_ARGS) (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - hash_entry_dealloc(-1); - hash_query_entryies_reset(); + hash_entry_dealloc(-1, -1, NULL); + /* Reset query buffers. */ + for (size_t i = 0; i < MAX_BUCKETS; ++i) + { + *(uint64 *)pgss_qbuf[i] = 0; + } +#ifdef BENCHMARK + for (int i = STATS_START; i < STATS_END; ++i) { + pg_hook_stats[i].min_time = 0; + pg_hook_stats[i].max_time = 0; + pg_hook_stats[i].total_time = 0; + pg_hook_stats[i].ncalls = 0; + } +#endif LWLockRelease(pgss->lock); PG_RETURN_VOID(); } @@ -1485,11 +1633,11 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, MemoryContext oldcontext; HASH_SEQ_STATUS hash_seq; pgssEntry *entry; - pgssQueryEntry *query_entry; char parentid_txt[32]; pgssSharedState *pgss = pgsm_get_ss(); HTAB *pgss_hash = pgsm_get_hash(); - char *query_txt = (char*) malloc(PGSM_QUERY_MAX_LEN); + char *query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN); + char *parent_query_txt = (char*) palloc0(PGSM_QUERY_MAX_LEN); /* Safety check... */ if (!IsSystemInitialized()) @@ -1516,7 +1664,7 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) elog(ERROR, "pg_stat_monitor: return type must be a row type"); - if (tupdesc->natts != 49) + if (tupdesc->natts != 50) elog(ERROR, "pg_stat_monitor: incorrect number of output arguments, required %d", tupdesc->natts); tupstore = tuplestore_begin_heap(true, false, work_mem); @@ -1544,19 +1692,14 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, uint64 userid = entry->key.userid; uint64 ip = entry->key.ip; uint64 planid = entry->key.planid; - uint64 appid = entry->key.appid; unsigned char *buf = pgss_qbuf[bucketid]; - char *query_txt = (char*) malloc(PGSM_QUERY_MAX_LEN); #if PG_VERSION_NUM < 140000 bool is_allowed_role = is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS); #else bool is_allowed_role = is_member_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS); #endif - query_entry = hash_find_query_entry(bucketid, queryid, dbid, userid, ip, appid); - if (query_entry == NULL) - continue; - if (read_query(buf, bucketid, queryid, query_txt) == 0) + if (read_query(buf, queryid, query_txt, entry->query_pos) == 0) { int len; len = read_query_buffer(bucketid, queryid, query_txt); @@ -1576,6 +1719,21 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, if (tmp.state == PGSS_FINISHED) continue; } + + /* Skip queries such as, $1, $2 := $3, etc. */ + if (tmp.state == PGSS_PARSE || tmp.state == PGSS_PLAN) + continue; + + if (tmp.info.parentid != UINT64CONST(0)) + { + int len = 0; + if (read_query(buf, tmp.info.parentid, parent_query_txt, 0) == 0) + { + len = read_query_buffer(bucketid, tmp.info.parentid, parent_query_txt); + if (len != MAX_QUERY_BUFFER_BUCKET) + snprintf(parent_query_txt, 32, "%s", ""); + } + } /* bucketid at column number 0 */ values[i++] = Int64GetDatumFast(bucketid); @@ -1650,10 +1808,12 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, { snprintf(parentid_txt, 32, "%08lX",tmp.info.parentid); values[i++] = CStringGetTextDatum(parentid_txt); + values[i++] = CStringGetTextDatum(parent_query_txt); } else { nulls[i++] = true; + nulls[i++] = true; } /* application_name at column number 9 */ @@ -1722,23 +1882,23 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, values[i++] = Int64GetDatumFast(tmp.calls.calls); /* total_time at column number 17 */ - values[i++] = Float8GetDatumFast(tmp.time.total_time); + values[i++] = Float8GetDatumFast(roundf(tmp.time.total_time, 4)); /* min_time at column number 18 */ - values[i++] = Float8GetDatumFast(tmp.time.min_time); + values[i++] = Float8GetDatumFast(roundf(tmp.time.min_time,4)); /* max_time at column number 19 */ - values[i++] = Float8GetDatumFast(tmp.time.max_time); + values[i++] = Float8GetDatumFast(roundf(tmp.time.max_time,4)); /* mean_time at column number 20 */ - values[i++] = Float8GetDatumFast(tmp.time.mean_time); + values[i++] = Float8GetDatumFast(roundf(tmp.time.mean_time,4)); if (tmp.calls.calls > 1) stddev = sqrt(tmp.time.sum_var_time / tmp.calls.calls); else stddev = 0.0; /* calls at column number 21 */ - values[i++] = Float8GetDatumFast(stddev); + values[i++] = Float8GetDatumFast(roundf(stddev,4)); /* calls at column number 22 */ values[i++] = Int64GetDatumFast(tmp.calls.rows); @@ -1754,23 +1914,23 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, values[i++] = Int64GetDatumFast(tmp.plancalls.calls); /* total_time at column number 24 */ - values[i++] = Float8GetDatumFast(tmp.plantime.total_time); + values[i++] = Float8GetDatumFast(roundf(tmp.plantime.total_time,4)); /* min_time at column number 25 */ - values[i++] = Float8GetDatumFast(tmp.plantime.min_time); + values[i++] = Float8GetDatumFast(roundf(tmp.plantime.min_time,4)); /* max_time at column number 26 */ - values[i++] = Float8GetDatumFast(tmp.plantime.max_time); + values[i++] = Float8GetDatumFast(roundf(tmp.plantime.max_time,4)); /* mean_time at column number 27 */ - values[i++] = Float8GetDatumFast(tmp.plantime.mean_time); + values[i++] = Float8GetDatumFast(roundf(tmp.plantime.mean_time,4)); if (tmp.plancalls.calls > 1) stddev = sqrt(tmp.plantime.sum_var_time / tmp.plancalls.calls); else stddev = 0.0; /* calls at column number 28 */ - values[i++] = Float8GetDatumFast(stddev); + values[i++] = Float8GetDatumFast(roundf(stddev,4)); /* blocks are from column number 29 - 40 */ values[i++] = Int64GetDatumFast(tmp.blocks.shared_blks_hit); @@ -1790,10 +1950,10 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, values[i++] = IntArrayGetTextDatum(tmp.resp_calls, MAX_RESPONSE_BUCKET); /* utime at column number 42 */ - values[i++] = Float8GetDatumFast(tmp.sysinfo.utime); + values[i++] = Float8GetDatumFast(roundf(tmp.sysinfo.utime,4)); /* stime at column number 43 */ - values[i++] = Float8GetDatumFast(tmp.sysinfo.stime); + values[i++] = Float8GetDatumFast(roundf(tmp.sysinfo.stime,4)); { char buf[256]; Datum wal_bytes; @@ -1822,7 +1982,8 @@ pg_stat_monitor_internal(FunctionCallInfo fcinfo, } tuplestore_putvalues(tupstore, tupdesc, values, nulls); } - free(query_txt); + pfree(query_txt); + pfree(parent_query_txt); /* clean up and return the tuplestore */ LWLockRelease(pgss->lock); @@ -1833,40 +1994,73 @@ static uint64 get_next_wbucket(pgssSharedState *pgss) { struct timeval tv; - uint64 current_usec; - uint64 bucket_id; - struct tm *lt; + uint64 current_usec; + uint64 current_bucket_usec; + uint64 new_bucket_id; + uint64 prev_bucket_id; + struct tm *lt; + bool update_bucket = false; gettimeofday(&tv,NULL); current_usec = (TimestampTz) tv.tv_sec - ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY); current_usec = (current_usec * USECS_PER_SEC) + tv.tv_usec; + current_bucket_usec = pg_atomic_read_u64(&pgss->prev_bucket_usec); - if ((current_usec - pgss->prev_bucket_usec) > (PGSM_BUCKET_TIME * 1000 * 1000)) + /* + * If current bucket expired we loop attempting to update prev_bucket_usec. + * + * pg_atomic_compare_exchange_u64 may fail in two possible ways: + * 1. Another thread/process updated the variable before us. + * 2. A spurious failure / hardware event. + * + * In both failure cases we read prev_bucket_usec from memory again, if it was + * a spurious failure then the value of prev_bucket_usec must be the same as + * before, which will cause the while loop to execute again. + * + * If another thread updated prev_bucket_usec, then its current value will + * definitely make the while condition to fail, we can stop the loop as another + * thread has already updated prev_bucket_usec. + */ + while ((current_usec - current_bucket_usec) > (PGSM_BUCKET_TIME * 1000 * 1000)) + { + if (pg_atomic_compare_exchange_u64(&pgss->prev_bucket_usec, ¤t_bucket_usec, current_usec)) + { + update_bucket = true; + break; + } + + current_bucket_usec = pg_atomic_read_u64(&pgss->prev_bucket_usec); + } + + if (update_bucket) { - unsigned char *buf; char file_name[1024]; int sec = 0; - bucket_id = (tv.tv_sec / PGSM_BUCKET_TIME) % PGSM_MAX_BUCKETS; - LWLockAcquire(pgss->lock, LW_EXCLUSIVE); - buf = pgss_qbuf[bucket_id]; - hash_entry_dealloc(bucket_id); - hash_query_entry_dealloc(bucket_id, buf); + new_bucket_id = (tv.tv_sec / PGSM_BUCKET_TIME) % PGSM_MAX_BUCKETS; - snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, (int)bucket_id); + /* Update bucket id and retrieve the previous one. */ + prev_bucket_id = pg_atomic_exchange_u64(&pgss->current_wbucket, new_bucket_id); + + LWLockAcquire(pgss->lock, LW_EXCLUSIVE); + hash_entry_dealloc(new_bucket_id, prev_bucket_id, pgss_qbuf); + + snprintf(file_name, 1024, "%s.%d", PGSM_TEXT_FILE, (int)new_bucket_id); unlink(file_name); LWLockRelease(pgss->lock); - pgss->prev_bucket_usec = current_usec; + lt = localtime(&tv.tv_sec); sec = lt->tm_sec - (lt->tm_sec % PGSM_BUCKET_TIME); if (sec < 0) sec = 0; - snprintf(pgss->bucket_start_time[bucket_id], sizeof(pgss->bucket_start_time[bucket_id]), + snprintf(pgss->bucket_start_time[new_bucket_id], sizeof(pgss->bucket_start_time[new_bucket_id]), "%04d-%02d-%02d %02d:%02d:%02d", lt->tm_year + 1900, lt->tm_mon + 1, lt->tm_mday, lt->tm_hour, lt->tm_min, sec); - return bucket_id; + + return new_bucket_id; } - return pgss->current_wbucket; + + return pg_atomic_read_u64(&pgss->current_wbucket); } #if PG_VERSION_NUM < 140000 @@ -2805,7 +2999,7 @@ intarray_get_datum(int32 arr[], int len) } uint64 -read_query(unsigned char *buf, uint64 bucketid, uint64 queryid, char * query) +read_query(unsigned char *buf, uint64 queryid, char * query, size_t pos) { bool found = false; uint64 query_id = 0; @@ -2817,6 +3011,27 @@ read_query(unsigned char *buf, uint64 bucketid, uint64 queryid, char * query) if (buf_len <= 0) goto exit; + /* If a position hint is given, try to locate the query directly. */ + if (pos != 0 && (pos + sizeof(uint64) + sizeof(uint64)) < buf_len) + { + memcpy(&query_id, &buf[pos], sizeof(uint64)); + if (query_id != queryid) + return 0; + + pos += sizeof(uint64); + + memcpy(&query_len, &buf[pos], sizeof(uint64)); /* query len */ + pos += sizeof(uint64); + + if (pos + query_len > buf_len) /* avoid reading past buffer's length. */ + return 0; + + memcpy(query, &buf[pos], query_len); /* Actual query */ + query[query_len] = '\0'; + + return queryid; + } + rlen = sizeof (uint64); /* Move forwad to skip length bytes */ for(;;) { @@ -2855,44 +3070,13 @@ exit: return 0; } -static pgssQueryEntry* -pgss_store_query_info(uint64 bucketid, - uint64 queryid, - uint64 dbid, - uint64 userid, - uint64 ip, - uint64 appid, - const char *query, - uint64 query_len, - pgssStoreKind kind) -{ - pgssSharedState *pgss = pgsm_get_ss(); - unsigned char *buf = pgss_qbuf[pgss->current_wbucket]; - pgssQueryEntry *entry; - - if (query_len > PGSM_QUERY_MAX_LEN) - query_len = PGSM_QUERY_MAX_LEN; - - /* Already have query in the shared buffer, there - * is no need to add that again. - */ - entry = hash_find_query_entry(bucketid, queryid, dbid, userid, ip, appid); - if (entry) - return entry; - - entry = hash_create_query_entry(bucketid, queryid, dbid, userid, ip, appid); - if (!entry) - return NULL; - entry->state = kind; - - if(!SaveQueryText(bucketid, queryid, buf, query, query_len)) - return NULL; - - return entry; -} - bool -SaveQueryText(uint64 bucketid, uint64 queryid, unsigned char *buf, const char *query, uint64 query_len) +SaveQueryText(uint64 bucketid, + uint64 queryid, + unsigned char *buf, + const char *query, + uint64 query_len, + size_t *query_pos) { uint64 buf_len = 0; @@ -2917,6 +3101,8 @@ SaveQueryText(uint64 bucketid, uint64 queryid, unsigned char *buf, const char *q } } + *query_pos = buf_len; + memcpy(&buf[buf_len], &queryid, sizeof (uint64)); /* query id */ buf_len += sizeof (uint64); @@ -2992,18 +3178,93 @@ pg_stat_monitor_settings(PG_FUNCTION_ARGS) return (Datum)0; } +Datum +pg_stat_monitor_hook_stats(PG_FUNCTION_ARGS) +{ +#ifdef BENCHMARK + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate *tupstore; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + enum pg_hook_stats_id hook_id; + + /* Safety check... */ + if (!IsSystemInitialized()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("pg_stat_monitor: must be loaded via shared_preload_libraries"))); + + /* check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("pg_stat_monitor: set-valued function called in context that cannot accept a set"))); + + /* Switch into long-lived context to construct returned data structures */ + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "pg_stat_monitor: return type must be a row type"); + + if (tupdesc->natts != 5) + elog(ERROR, "pg_stat_monitor: incorrect number of output arguments, required %d", tupdesc->natts); + + tupstore = tuplestore_begin_heap(true, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; + + MemoryContextSwitchTo(oldcontext); + + for (hook_id = 0; hook_id < STATS_END; hook_id++) + { + Datum values[5]; + bool nulls[5]; + int j = 0; + memset(values, 0, sizeof(values)); + memset(nulls, 0, sizeof(nulls)); + + values[j++] = CStringGetTextDatum(pg_hook_stats[hook_id].hook_name); + values[j++] = Float8GetDatumFast(pg_hook_stats[hook_id].min_time); + values[j++] = Float8GetDatumFast(pg_hook_stats[hook_id].max_time); + values[j++] = Float8GetDatumFast(pg_hook_stats[hook_id].total_time); + values[j++] = Int64GetDatumFast(pg_hook_stats[hook_id].ncalls); + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } + /* clean up and return the tuplestore */ + tuplestore_donestoring(tupstore); +#endif /* #ifdef BENCHMARK */ + return (Datum)0; +} + void set_qbuf(int i, unsigned char *buf) { pgss_qbuf[i] = buf; } +#ifdef BENCHMARK +static void +pgsm_emit_log_hook_benchmark(ErrorData *edata) +{ + double start_time = (double)clock(); + pgsm_emit_log_hook(edata); + double elapsed = ((double)clock() - start_time) / CLOCKS_PER_SEC; + update_hook_stats(STATS_PGSM_EMIT_LOG_HOOK, elapsed); +} +#endif void pgsm_emit_log_hook(ErrorData *edata) { if (!IsSystemInitialized() || edata == NULL) goto exit; + if (IsParallelWorker()) + return; + if ((edata->elevel == ERROR || edata->elevel == WARNING || edata->elevel == INFO || edata->elevel == DEBUG1)) { uint64 queryid = 0; @@ -3079,7 +3340,7 @@ read_query_buffer(int bucket_id, uint64 queryid, char *query_txt) break; } off += buf_len; - if (read_query(buf, bucket_id, queryid, query_txt)) + if (read_query(buf, queryid, query_txt, 0)) break; } if (fd > 0) @@ -3194,29 +3455,45 @@ get_histogram_timings(PG_FUNCTION_ARGS) return CStringGetTextDatum(text_str); } -char * -extract_query_comments(const char *query) +static void +extract_query_comments(const char *query, char *comments, size_t max_len) { - regex_t preg; - char *pattern = "/\\*.*\\*/"; int rc; size_t nmatch = 1; regmatch_t pmatch; - char *comments = palloc0(512); + regoff_t comment_len, total_len = 0; + const char *s = query; - rc = regcomp(&preg, pattern, 0); - if (rc != 0) - { - printf("regcomp() failed, returning nonzero (%d)\n", rc); - return ""; - } - rc = regexec(&preg, query, nmatch, &pmatch, 0); - if (rc != 0) - return ""; - sprintf(comments, "%.*s", pmatch.rm_eo - pmatch.rm_so -4, &query[pmatch.rm_so + 2]); - regfree(&preg); - return comments; + while (total_len < max_len) + { + rc = regexec(&preg_query_comments, s, nmatch, &pmatch, 0); + if (rc != 0) + break; + + comment_len = pmatch.rm_eo - pmatch.rm_so; + + if (total_len + comment_len > max_len) + break; /* TODO: log error in error view, insufficient space for comment. */ + + total_len += comment_len; + + /* Not 1st iteration, append ", " before next comment. */ + if (s != query) + { + if (total_len + 2 > max_len) + break; /* TODO: log error in error view, insufficient space for ", " + comment. */ + + memcpy(comments, ", ", 2); + comments += 2; + total_len += 2; + } + + memcpy(comments, s + pmatch.rm_so, comment_len); + comments += comment_len; + s += pmatch.rm_eo; + } } + #if PG_VERSION_NUM < 140000 static uint64 get_query_id(JumbleState *jstate, Query *query) @@ -3247,3 +3524,45 @@ static uint64 djb2_hash(unsigned char *str, size_t len) return hash; } + +#ifdef BENCHMARK +void init_hook_stats(void) +{ + bool found = false; + pg_hook_stats = ShmemInitStruct("pg_stat_monitor_hook_stats", HOOK_STATS_SIZE, &found); + if (!found) + { + memset(pg_hook_stats, 0, HOOK_STATS_SIZE); + +#define SET_HOOK_NAME(hook, name) \ + snprintf(pg_hook_stats[hook].hook_name, sizeof(pg_hook_stats->hook_name), name); + + SET_HOOK_NAME(STATS_PGSS_POST_PARSE_ANALYZE, "pgss_post_parse_analyze"); + SET_HOOK_NAME(STATS_PGSS_EXECUTORSTART, "pgss_ExecutorStart"); + SET_HOOK_NAME(STATS_PGSS_EXECUTORUN, "pgss_ExecutorRun"); + SET_HOOK_NAME(STATS_PGSS_EXECUTORFINISH, "pgss_ExecutorFinish"); + SET_HOOK_NAME(STATS_PGSS_EXECUTOREND, "pgss_ExecutorEnd"); + SET_HOOK_NAME(STATS_PGSS_PROCESSUTILITY, "pgss_ProcessUtility"); +#if PG_VERSION_NUM >= 130000 + SET_HOOK_NAME(STATS_PGSS_PLANNER_HOOK, "pgss_planner_hook"); +#endif + SET_HOOK_NAME(STATS_PGSM_EMIT_LOG_HOOK, "pgsm_emit_log_hook"); + SET_HOOK_NAME(STATS_PGSS_EXECUTORCHECKPERMS, "pgss_ExecutorCheckPerms"); + } +} + +void update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed) +{ + Assert(hook_id > STATS_START && hook_id < STATS_END); + + struct pg_hook_stats_t *p = &pg_hook_stats[hook_id]; + if (time_elapsed < p->min_time) + p->min_time = time_elapsed; + + if (time_elapsed > p->max_time) + p->max_time = time_elapsed; + + p->total_time += time_elapsed; + p->ncalls++; +} +#endif diff --git a/pg_stat_monitor.control b/pg_stat_monitor.control index cc93476..2b826e9 100644 --- a/pg_stat_monitor.control +++ b/pg_stat_monitor.control @@ -1,5 +1,5 @@ # pg_stat_monitor extension -comment = 'track execution statistics of all SQL statements executed' +comment = 'The pg_stat_monitor is a PostgreSQL Query Performance Monitoring tool, based on PostgreSQL contrib module pg_stat_statements. pg_stat_monitor provides aggregated statistics, client information, plan details including plan, and histogram information.' default_version = '1.0' module_pathname = '$libdir/pg_stat_monitor' relocatable = true diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index 3717849..2ddfe01 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -294,6 +294,7 @@ typedef struct pgssEntry Counters counters; /* the statistics for this query */ int encoding; /* query text encoding */ slock_t mutex; /* protects the counters only */ + size_t query_pos; /* query location within query buffer */ } pgssEntry; /* @@ -301,18 +302,16 @@ typedef struct pgssEntry */ typedef struct pgssSharedState { - LWLock *lock; /* protects hashtable search/modification */ - double cur_median_usage; /* current median usage in hashtable */ - slock_t mutex; /* protects following fields only: */ - Size extent; /* current extent of query file */ - int64 n_writers; /* number of active writers to query file */ - uint64 current_wbucket; - uint64 prev_bucket_usec; - uint64 bucket_entry[MAX_BUCKETS]; - int64 query_buf_size_bucket; - char relations[REL_LST][REL_LEN]; - int num_relations; /* Number of relation in the query */ - char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */ + LWLock *lock; /* protects hashtable search/modification */ + double cur_median_usage; /* current median usage in hashtable */ + slock_t mutex; /* protects following fields only: */ + Size extent; /* current extent of query file */ + int64 n_writers; /* number of active writers to query file */ + pg_atomic_uint64 current_wbucket; + pg_atomic_uint64 prev_bucket_usec; + uint64 bucket_entry[MAX_BUCKETS]; + int64 query_buf_size_bucket; + char bucket_start_time[MAX_BUCKETS][60]; /* start time of the bucket */ } pgssSharedState; #define ResetSharedState(x) \ @@ -320,8 +319,8 @@ do { \ x->cur_median_usage = ASSUMED_MEDIAN_INIT; \ x->cur_median_usage = ASSUMED_MEDIAN_INIT; \ x->n_writers = 0; \ - x->current_wbucket = 0; \ - x->prev_bucket_usec = 0; \ + pg_atomic_init_u64(&x->current_wbucket, 0); \ + pg_atomic_init_u64(&x->prev_bucket_usec, 0); \ memset(&x->bucket_entry, 0, MAX_BUCKETS * sizeof(uint64)); \ } while(0) @@ -363,7 +362,12 @@ typedef struct JumbleState /* Links to shared memory state */ -bool SaveQueryText(uint64 bucketid, uint64 queryid, unsigned char *buf, const char *query, uint64 query_len); +bool SaveQueryText(uint64 bucketid, + uint64 queryid, + unsigned char *buf, + const char *query, + uint64 query_len, + size_t *query_pos); /* guc.c */ void init_guc(void); @@ -376,20 +380,18 @@ void pgss_shmem_shutdown(int code, Datum arg); int pgsm_get_bucket_size(void); pgssSharedState* pgsm_get_ss(void); HTAB *pgsm_get_plan_hash(void); -HTAB *pgsm_get_query_hash(void); HTAB *pgsm_get_hash(void); HTAB *pgsm_get_plan_hash(void); -HTAB* pgsm_get_query_hash(void); void hash_entry_reset(void); void hash_query_entryies_reset(void); void hash_query_entries(); -void hash_query_entry_dealloc(int bucket, unsigned char *buf); -bool hash_entry_dealloc(int bucket); +void hash_query_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]); +void hash_entry_dealloc(int new_bucket_id, int old_bucket_id, unsigned char *query_buffer[]); pgssEntry* hash_entry_alloc(pgssSharedState *pgss, pgssHashKey *key, int encoding); Size hash_memsize(void); int read_query_buffer(int bucket_id, uint64 queryid, char *query_txt); -uint64 read_query(unsigned char *buf, uint64 bucketid, uint64 queryid, char * query); +uint64 read_query(unsigned char *buf, uint64 queryid, char * query, size_t pos); pgssQueryEntry* hash_find_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid); pgssQueryEntry* hash_create_query_entry(uint64 bucket_id, uint64 queryid, uint64 dbid, uint64 userid, uint64 ip, uint64 appid); void pgss_startup(void); @@ -413,4 +415,76 @@ void pgss_startup(void); #define PGSM_QUERY_PLAN get_conf(12)->guc_variable #define PGSM_TRACK_PLANNING get_conf(13)->guc_variable +/*---- Benchmarking ----*/ +#ifdef BENCHMARK +/* + * These enumerator values are used as index in the hook stats array. + * STATS_START and STATS_END are used only to delimit the range. + * STATS_END is also the length of the valid items in the enum. + */ +enum pg_hook_stats_id { + STATS_START = -1, + STATS_PGSS_POST_PARSE_ANALYZE, + STATS_PGSS_EXECUTORSTART, + STATS_PGSS_EXECUTORUN, + STATS_PGSS_EXECUTORFINISH, + STATS_PGSS_EXECUTOREND, + STATS_PGSS_PROCESSUTILITY, +#if PG_VERSION_NUM >= 130000 + STATS_PGSS_PLANNER_HOOK, +#endif + STATS_PGSM_EMIT_LOG_HOOK, + STATS_PGSS_EXECUTORCHECKPERMS, + STATS_END +}; + +/* Hold time to execute statistics for a hook. */ +struct pg_hook_stats_t { + char hook_name[64]; + double min_time; + double max_time; + double total_time; + uint64 ncalls; +}; + +#define HOOK_STATS_SIZE MAXALIGN((size_t)STATS_END * sizeof(struct pg_hook_stats_t)) + +/* Allocate a pg_hook_stats_t array of size HOOK_STATS_SIZE on shared memory. */ +void init_hook_stats(void); + +/* Update hook time execution statistics. */ +void update_hook_stats(enum pg_hook_stats_id hook_id, double time_elapsed); + +/* + * Macro used to declare a hook function: + * Example: + * DECLARE_HOOK(void my_hook, const char *query, size_t length); + * Will expand to: + * static void my_hook(const char *query, size_t length); + * static void my_hook_benchmark(const char *query, size_t length); + */ +#define DECLARE_HOOK(hook, ...) \ + static hook(__VA_ARGS__); \ + static hook##_benchmark(__VA_ARGS__); + +/* + * Macro used to wrap a hook when pg_stat_monitor is compiled with -DBENCHMARK. + * + * It is intended to be used as follows in _PG_init(): + * pg_hook_function = HOOK(my_hook_function); + * Then, if pg_stat_monitor is compiled with -DBENCHMARK this will expand to: + * pg_hook_name = my_hook_function_benchmark; + * Otherwise it will simple expand to: + * pg_hook_name = my_hook_function; + */ +#define HOOK(name) name##_benchmark + +#else /* #ifdef BENCHMARK */ + +#define DECLARE_HOOK(hook, ...) \ + static hook(__VA_ARGS__); +#define HOOK(name) name +#define HOOK_STATS_SIZE 0 +#endif + #endif diff --git a/regression/expected/counters.out b/regression/expected/counters.out index c96775e..ebf26f7 100644 --- a/regression/expected/counters.out +++ b/regression/expected/counters.out @@ -68,7 +68,7 @@ end $$; SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; query | calls ---------------------------------------------------------------------------------------------------+------- - SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; | 1000 + SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a | 1000 SELECT pg_stat_monitor_reset(); | 1 SELECT query,calls FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 1 do $$ +| 1 diff --git a/regression/expected/error.out b/regression/expected/error.out index 1c278c4..ccf7968 100644 --- a/regression/expected/error.out +++ b/regression/expected/error.out @@ -20,18 +20,18 @@ BEGIN RAISE WARNING 'warning message'; END $$; WARNING: warning message -SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | elevel | sqlcode | message ------------------------------------------------------------------------------------------+--------+---------+----------------------------------- - ELECET * FROM unknown; | 20 | 42601 | syntax error at or near "ELECET" - SELECT * FROM unknown; | 20 | 42P01 | relation "unknown" does not exist - SELECT 1/0; | 20 | 22012 | division by zero - SELECT pg_stat_monitor_reset(); | 0 | | - SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 0 | | - do $$ +| 19 | 01000 | warning message - BEGIN +| | | - RAISE WARNING 'warning message'; +| | | - END $$; | | | +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; + query | elevel | sqlcode | message +------------------------------------------------------------------------------------------------+--------+---------+----------------------------------- + ELECET * FROM unknown; | 20 | 42601 | syntax error at or near "ELECET" + SELECT * FROM unknown; | 20 | 42P01 | relation "unknown" does not exist + SELECT 1/0; | 20 | 22012 | division by zero + SELECT pg_stat_monitor_reset(); | 0 | | + SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | + do $$ +| 19 | 01000 | warning message + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$; | | | (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/error_1.out b/regression/expected/error_1.out index 7a3f002..102a92a 100644 --- a/regression/expected/error_1.out +++ b/regression/expected/error_1.out @@ -20,18 +20,18 @@ BEGIN RAISE WARNING 'warning message'; END $$; WARNING: warning message -SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | elevel | sqlcode | message ------------------------------------------------------------------------------------------+--------+---------+----------------------------------- - ELECET * FROM unknown; | 21 | 42601 | syntax error at or near "ELECET" - SELECT * FROM unknown; | 21 | 42P01 | relation "unknown" does not exist - SELECT 1/0; | 21 | 22012 | division by zero - SELECT pg_stat_monitor_reset(); | 0 | | - SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 0 | | - do $$ +| 19 | 01000 | warning message - BEGIN +| | | - RAISE WARNING 'warning message'; +| | | - END $$; | | | +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; + query | elevel | sqlcode | message +------------------------------------------------------------------------------------------------+--------+---------+----------------------------------- + ELECET * FROM unknown; | 21 | 42601 | syntax error at or near "ELECET" + SELECT * FROM unknown; | 21 | 42P01 | relation "unknown" does not exist + SELECT 1/0; | 21 | 22012 | division by zero + SELECT pg_stat_monitor_reset(); | 0 | | + SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; | 0 | | + do $$ +| 19 | 01000 | warning message + BEGIN +| | | + RAISE WARNING 'warning message'; +| | | + END $$; | | | (6 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/guc.out b/regression/expected/guc.out index 9fa592c..934c719 100644 --- a/regression/expected/guc.out +++ b/regression/expected/guc.out @@ -14,7 +14,7 @@ select pg_sleep(.5); SELECT * FROM pg_stat_monitor_settings ORDER BY name COLLATE "C"; name | value | default_value | description | minimum | maximum | restart ------------------------------------------+--------+---------------+----------------------------------------------------------------------------------------------------------+---------+------------+--------- - pg_stat_monitor.pgsm_bucket_time | 300 | 300 | Sets the time in seconds per bucket. | 1 | 2147483647 | 1 + pg_stat_monitor.pgsm_bucket_time | 60 | 60 | Sets the time in seconds per bucket. | 1 | 2147483647 | 1 pg_stat_monitor.pgsm_enable | 1 | 1 | Enable/Disable statistics collector. | 0 | 0 | 0 pg_stat_monitor.pgsm_enable_query_plan | 0 | 0 | Enable/Disable query plan monitoring | 0 | 0 | 0 pg_stat_monitor.pgsm_histogram_buckets | 10 | 10 | Sets the maximum number of histogram buckets | 2 | 2147483647 | 1 diff --git a/regression/expected/guc_1.out b/regression/expected/guc_1.out index 6abf062..1425109 100644 --- a/regression/expected/guc_1.out +++ b/regression/expected/guc_1.out @@ -14,7 +14,7 @@ select pg_sleep(.5); SELECT * FROM pg_stat_monitor_settings ORDER BY name COLLATE "C"; name | value | default_value | description | minimum | maximum | restart ------------------------------------------+--------+---------------+----------------------------------------------------------------------------------------------------------+---------+------------+--------- - pg_stat_monitor.pgsm_bucket_time | 300 | 300 | Sets the time in seconds per bucket. | 1 | 2147483647 | 1 + pg_stat_monitor.pgsm_bucket_time | 60 | 60 | Sets the time in seconds per bucket. | 1 | 2147483647 | 1 pg_stat_monitor.pgsm_enable | 1 | 1 | Enable/Disable statistics collector. | 0 | 0 | 0 pg_stat_monitor.pgsm_enable_query_plan | 0 | 0 | Enable/Disable query plan monitoring | 0 | 0 | 0 pg_stat_monitor.pgsm_histogram_buckets | 10 | 10 | Sets the maximum number of histogram buckets | 2 | 2147483647 | 1 diff --git a/regression/expected/state.out b/regression/expected/state.out index 147054a..b051091 100644 --- a/regression/expected/state.out +++ b/regression/expected/state.out @@ -16,7 +16,7 @@ ERROR: division by zero SELECT query, state_code, state FROM pg_stat_monitor ORDER BY query COLLATE "C"; query | state_code | state ----------------------------------------------------------------------------------+------------+--------------------- - SELECT $1 AS num | 3 | FINISHED + SELECT $1 | 3 | FINISHED SELECT 1/0; | 4 | FINISHED WITH ERROR SELECT pg_stat_monitor_reset(); | 3 | FINISHED SELECT query, state_code, state FROM pg_stat_monitor ORDER BY query COLLATE "C"; | 2 | ACTIVE diff --git a/regression/expected/tags.out b/regression/expected/tags.out index 9da2b13..b2cda90 100644 --- a/regression/expected/tags.out +++ b/regression/expected/tags.out @@ -12,11 +12,11 @@ SELECT 1 AS num /* { "application", psql_app, "real_ip", 192.168.1.3) */; (1 row) SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; - query | comments --------------------------------------------------------------------------+------------------------------------------------------ - SELECT $1 AS num | { "application", psql_app, "real_ip", 192.168.1.3) - SELECT pg_stat_monitor_reset(); | - SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; | + query | comments +---------------------------------------------------------------------------+---------------------------------------------------------- + SELECT $1 AS num /* { "application", psql_app, "real_ip", 192.168.1.3) */ | /* { "application", psql_app, "real_ip", 192.168.1.3) */ + SELECT pg_stat_monitor_reset(); | + SELECT query, comments FROM pg_stat_monitor ORDER BY query COLLATE "C"; | (3 rows) SELECT pg_stat_monitor_reset(); diff --git a/regression/expected/version.out b/regression/expected/version.out index b9920ea..77ff028 100644 --- a/regression/expected/version.out +++ b/regression/expected/version.out @@ -2,7 +2,7 @@ CREATE EXTENSION pg_stat_monitor; SELECT pg_stat_monitor_version(); pg_stat_monitor_version ------------------------- - devel + 1.0.0-beta-2 (1 row) DROP EXTENSION pg_stat_monitor; diff --git a/regression/sql/error.sql b/regression/sql/error.sql index 5102782..98870f2 100644 --- a/regression/sql/error.sql +++ b/regression/sql/error.sql @@ -9,6 +9,6 @@ BEGIN RAISE WARNING 'warning message'; END $$; -SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C"; +SELECT query, elevel, sqlcode, message FROM pg_stat_monitor ORDER BY query COLLATE "C",elevel; SELECT pg_stat_monitor_reset(); DROP EXTENSION pg_stat_monitor;