Compare commits
7 Commits
Author | SHA1 | Date |
---|---|---|
|
22ad698133 | |
|
8e4dc948f0 | |
|
bb5e6a4e88 | |
|
e2d4603a13 | |
|
1301f96146 | |
|
af251628b0 | |
|
ca771b9ad6 |
|
@ -5,7 +5,7 @@ jobs:
|
|||
build:
|
||||
name: pg-11-pgsm-pmm-integration-test
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Clone QA Integration repository
|
||||
uses: actions/checkout@v2
|
||||
|
@ -18,7 +18,7 @@ jobs:
|
|||
run: echo 'The branch and Repo Name is' ${{ github.head_ref }} ${{ github.actor }}/pg_stat_monitor
|
||||
|
||||
- name: Run PMM & PGSM Setup, E2E Tests
|
||||
run: bash -xe ./pmm_pgsm_setup/pmm_pgsm_setup.sh --pgsql-version=11
|
||||
run: bash -xe ./pmm_pgsm_setup/pmm_pgsm_setup.sh --pgsql-version=11 --pgstat-monitor-branch=REL_1_1_0
|
||||
|
||||
- name: Get PMM-Agent Logs from the Container
|
||||
if: success() || failure() # run this step even if previous step failed
|
||||
|
|
|
@ -5,7 +5,7 @@ jobs:
|
|||
build:
|
||||
name: pg-12-pgsm-pmm-integration-test
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Clone QA Integration repository
|
||||
uses: actions/checkout@v2
|
||||
|
@ -18,7 +18,7 @@ jobs:
|
|||
run: echo 'The branch and Repo Name is' ${{ github.head_ref }} ${{ github.actor }}/pg_stat_monitor
|
||||
|
||||
- name: Run PMM & PGSM Setup, E2E Tests
|
||||
run: bash -xe ./pmm_pgsm_setup/pmm_pgsm_setup.sh --pgsql-version=12
|
||||
run: bash -xe ./pmm_pgsm_setup/pmm_pgsm_setup.sh --pgsql-version=12 --pgstat-monitor-branch=REL_1_1_0
|
||||
|
||||
- name: Get PMM-Agent Logs from the Container
|
||||
if: success() || failure() # run this step even if previous step failed
|
||||
|
|
|
@ -5,7 +5,7 @@ jobs:
|
|||
build:
|
||||
name: pg-13-pgsm-pmm-integration-test
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Clone QA Integration repository
|
||||
uses: actions/checkout@v2
|
||||
|
@ -18,7 +18,7 @@ jobs:
|
|||
run: echo 'The branch and Repo Name is' ${{ github.head_ref }} ${{ github.actor }}/pg_stat_monitor
|
||||
|
||||
- name: Run PMM & PGSM Setup, E2E Tests
|
||||
run: bash -xe ./pmm_pgsm_setup/pmm_pgsm_setup.sh --pgsql-version=13
|
||||
run: bash -xe ./pmm_pgsm_setup/pmm_pgsm_setup.sh --pgsql-version=13 --pgstat-monitor-branch=REL_1_1_0
|
||||
|
||||
- name: Get PMM-Agent Logs from the Container
|
||||
if: success() || failure() # run this step even if previous step failed
|
||||
|
|
|
@ -5,7 +5,7 @@ jobs:
|
|||
build:
|
||||
name: pg-14-pgsm-pmm-integration-test
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Clone QA Integration repository
|
||||
uses: actions/checkout@v2
|
||||
|
@ -18,7 +18,7 @@ jobs:
|
|||
run: echo 'The branch and Repo Name is' ${{ github.head_ref }} ${{ github.actor }}/pg_stat_monitor
|
||||
|
||||
- name: Run PMM & PGSM Setup, E2E Tests
|
||||
run: bash -xe ./pmm_pgsm_setup/pmm_pgsm_setup.sh --pgsql-version=14
|
||||
run: bash -xe ./pmm_pgsm_setup/pmm_pgsm_setup.sh --pgsql-version=14 --pgstat-monitor-branch=REL_1_1_0
|
||||
|
||||
- name: Get PMM-Agent Logs from the Container
|
||||
if: success() || failure() # run this step even if previous step failed
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
"name": "pg_stat_monitor",
|
||||
"abstract": "PostgreSQL Query Performance Monitoring Tool",
|
||||
"description": "pg_stat_monitor is a PostgreSQL Query Performance Monitoring tool, based on PostgreSQL's contrib module pg_stat_statements. PostgreSQL’s pg_stat_statements provides the basic statistics, which is sometimes not enough. The major shortcoming in pg_stat_statements is that it accumulates all the queries and their statistics and does not provide aggregated statistics nor histogram information. In this case, a user would need to calculate the aggregates, which is quite an expensive operation.",
|
||||
"version": "1.1.0-dev",
|
||||
"version": "1.1.0",
|
||||
"maintainer": [
|
||||
"ibrar.ahmed@percona.com"
|
||||
],
|
||||
|
@ -12,7 +12,7 @@
|
|||
"abstract": "PostgreSQL Query Performance Monitoring Tool",
|
||||
"file": "pg_stat_monitor--1.0.sql",
|
||||
"docfile": "README.md",
|
||||
"version": "1.1.0-dev"
|
||||
"version": "1.1.0"
|
||||
}
|
||||
},
|
||||
"prereqs": {
|
||||
|
|
|
@ -2,6 +2,26 @@
|
|||
|
||||
Below is the complete list of release notes for every version of ``pg_stat_monitor``.
|
||||
|
||||
## 1.1.0
|
||||
|
||||
### Improvements
|
||||
|
||||
[PG-474](https://jira.percona.com/browse/PG-474): Make pg_stat_monitor compiled with CLANG
|
||||
[PG-159](https://jira.percona.com/browse/PG-159): Change the bucket start time scheme to align with the bucket time size
|
||||
[PG-293](https://jira.percona.com/browse/PG-293): Add the ability to control features added on top of `pg_stat_monitor` using GUC (Grand Unified Configuration) parameters
|
||||
[PG-300](https://jira.percona.com/browse/PG-300): Improve compatibility with PMM by making QueryIDs persistent for the same queries across different buckets and regardless of the node / client a query is executed on.
|
||||
[PG-362](https://jira.percona.com/browse/PG-362): Fix the `pgsm_normalized_query` default value to provide query examples in the `pg_stat_monitor` view by default.
|
||||
[PG-439](https://jira.percona.com/browse/PG-439): Remove warning of comparison of unsigned enum expression
|
||||
|
||||
### Bugs Fixed
|
||||
|
||||
[PG-221](https://jira.percona.com/browse/PG-221): Fixed the issue with pg_stat_monitor crashing when querying JSON with parallel workers enabled
|
||||
[PG-289](https://jira.percona.com/browse/PG-289): Fixed the issue with pg_stat_monitor failing to build on C11 compilers by removing 'for' loop initial declarations
|
||||
[PG-449](https://jira.percona.com/browse/PG-449): Fix comments visibility by correcting the behavior of the `pgsm_extract_comments` parameter
|
||||
[PG-453](https://jira.percona.com/browse/PG-453): Fixed query normalization for INSERT statements in PostgreSQL 13 and earlier versions
|
||||
[PG-455](https://jira.percona.com/browse/PG-455): Fixed the issue with data collection for any value specified for `pgsm_bucket_time` parameter within the min / max range
|
||||
|
||||
|
||||
## 1.0.1
|
||||
|
||||
### Bugs Fixed
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
PG_MODULE_MAGIC;
|
||||
|
||||
#define BUILD_VERSION "1.1.0-dev"
|
||||
#define BUILD_VERSION "1.1.0"
|
||||
#define PG_STAT_STATEMENTS_COLS 53 /* maximum of above */
|
||||
#define PGSM_TEXT_FILE PGSTAT_STAT_PERMANENT_DIRECTORY "pg_stat_monitor_query"
|
||||
|
||||
|
@ -2146,15 +2146,13 @@ static uint64
|
|||
get_next_wbucket(pgssSharedState *pgss)
|
||||
{
|
||||
struct timeval tv;
|
||||
uint64 current_sec;
|
||||
uint64 current_bucket_sec;
|
||||
uint64 new_bucket_id;
|
||||
uint64 prev_bucket_id;
|
||||
struct tm *lt;
|
||||
char file_name[1024];
|
||||
bool update_bucket = false;
|
||||
|
||||
gettimeofday(&tv, NULL);
|
||||
current_sec = (TimestampTz) tv.tv_sec - ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY);
|
||||
current_bucket_sec = pg_atomic_read_u64(&pgss->prev_bucket_sec);
|
||||
|
||||
/*
|
||||
|
@ -2172,53 +2170,61 @@ get_next_wbucket(pgssSharedState *pgss)
|
|||
* definitely make the while condition to fail, we can stop the loop as
|
||||
* another thread has already updated prev_bucket_sec.
|
||||
*/
|
||||
if ((current_sec - current_bucket_sec) < (uint64)PGSM_BUCKET_TIME)
|
||||
while ((tv.tv_sec - (uint)current_bucket_sec) >= ((uint)PGSM_BUCKET_TIME))
|
||||
{
|
||||
return pg_atomic_read_u64(&pgss->current_wbucket);
|
||||
if (pg_atomic_compare_exchange_u64(&pgss->prev_bucket_sec, ¤t_bucket_sec, (uint64)tv.tv_sec))
|
||||
{
|
||||
update_bucket = true;
|
||||
break;
|
||||
}
|
||||
|
||||
current_bucket_sec = pg_atomic_read_u64(&pgss->prev_bucket_sec);
|
||||
}
|
||||
|
||||
new_bucket_id = (tv.tv_sec / PGSM_BUCKET_TIME) % PGSM_MAX_BUCKETS;
|
||||
|
||||
/* Update bucket id and retrieve the previous one. */
|
||||
prev_bucket_id = pg_atomic_exchange_u64(&pgss->current_wbucket, new_bucket_id);
|
||||
|
||||
tv.tv_sec = (tv.tv_sec) - (tv.tv_sec % PGSM_BUCKET_TIME);
|
||||
lt = localtime(&tv.tv_sec);
|
||||
|
||||
LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
|
||||
|
||||
/* Reconfirm that no other backend has created the bucket while we waited */
|
||||
if (new_bucket_id == prev_bucket_id)
|
||||
if (update_bucket)
|
||||
{
|
||||
char file_name[1024];
|
||||
|
||||
new_bucket_id = (tv.tv_sec / PGSM_BUCKET_TIME) % PGSM_MAX_BUCKETS;
|
||||
|
||||
/* Update bucket id and retrieve the previous one. */
|
||||
prev_bucket_id = pg_atomic_exchange_u64(&pgss->current_wbucket, new_bucket_id);
|
||||
|
||||
LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
|
||||
hash_entry_dealloc(new_bucket_id, prev_bucket_id, pgss_qbuf);
|
||||
|
||||
if (pgss->overflow)
|
||||
{
|
||||
pgss->n_bucket_cycles += 1;
|
||||
if (pgss->n_bucket_cycles >= PGSM_MAX_BUCKETS)
|
||||
{
|
||||
/*
|
||||
* A full rotation of PGSM_MAX_BUCKETS buckets happened since
|
||||
* we detected a query buffer overflow.
|
||||
* Reset overflow state and remove the dump file.
|
||||
*/
|
||||
pgss->overflow = false;
|
||||
pgss->n_bucket_cycles = 0;
|
||||
snprintf(file_name, 1024, "%s", PGSM_TEXT_FILE);
|
||||
unlink(file_name);
|
||||
}
|
||||
}
|
||||
|
||||
LWLockRelease(pgss->lock);
|
||||
|
||||
tv.tv_sec = (tv.tv_sec) - (tv.tv_sec % PGSM_BUCKET_TIME);
|
||||
lt = localtime(&tv.tv_sec);
|
||||
|
||||
/* Allign the value in prev_bucket_sec to the bucket start time */
|
||||
pg_atomic_exchange_u64(&pgss->prev_bucket_sec, (uint64)tv.tv_sec);
|
||||
|
||||
snprintf(pgss->bucket_start_time[new_bucket_id], sizeof(pgss->bucket_start_time[new_bucket_id]),
|
||||
"%04d-%02d-%02d %02d:%02d:%02d", lt->tm_year + 1900, lt->tm_mon + 1, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec);
|
||||
|
||||
return new_bucket_id;
|
||||
}
|
||||
|
||||
hash_entry_dealloc(new_bucket_id, prev_bucket_id, pgss_qbuf);
|
||||
|
||||
if (pgss->overflow)
|
||||
{
|
||||
pgss->n_bucket_cycles += 1;
|
||||
if (pgss->n_bucket_cycles >= PGSM_MAX_BUCKETS)
|
||||
{
|
||||
/*
|
||||
* A full rotation of PGSM_MAX_BUCKETS buckets happened since we
|
||||
* detected a query buffer overflow. Reset overflow state and
|
||||
* remove the dump file.
|
||||
*/
|
||||
pgss->overflow = false;
|
||||
pgss->n_bucket_cycles = 0;
|
||||
snprintf(file_name, 1024, "%s", PGSM_TEXT_FILE);
|
||||
unlink(file_name);
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(pgss->bucket_start_time[new_bucket_id], sizeof(pgss->bucket_start_time[new_bucket_id]),
|
||||
"%04d-%02d-%02d %02d:%02d:%02d", lt->tm_year + 1900, lt->tm_mon + 1, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec);
|
||||
|
||||
LWLockRelease(pgss->lock);
|
||||
|
||||
return new_bucket_id;
|
||||
return pg_atomic_read_u64(&pgss->current_wbucket);
|
||||
}
|
||||
|
||||
#if PG_VERSION_NUM < 140000
|
||||
|
|
|
@ -2,7 +2,7 @@ CREATE EXTENSION pg_stat_monitor;
|
|||
SELECT pg_stat_monitor_version();
|
||||
pg_stat_monitor_version
|
||||
-------------------------
|
||||
1.1.0-dev
|
||||
1.1.0
|
||||
(1 row)
|
||||
|
||||
DROP EXTENSION pg_stat_monitor;
|
||||
|
|
Loading…
Reference in New Issue