diff --git a/Makefile b/Makefile index a8a28c9..5ddf0c5 100644 --- a/Makefile +++ b/Makefile @@ -1,10 +1,10 @@ # contrib/pg_stat_monitor/Makefile MODULE_big = pg_stat_monitor -OBJS = hash_query.o guc.o pg_stat_monitor.o pgsm_table_export.o $(WIN32RES) +OBJS = hash_query.o guc.o pg_stat_monitor.o $(WIN32RES) EXTENSION = pg_stat_monitor -DATA = pg_stat_monitor--2.0.sql pg_stat_monitor--1.0--2.0.sql pg_stat_monitor--2.0--2.1.sql pg_stat_monitor--2.1--2.2.sql pg_stat_monitor--2.2--2.3.sql pg_stat_monitor--2.3--2.4.sql +DATA = pg_stat_monitor--2.0.sql pg_stat_monitor--1.0--2.0.sql pg_stat_monitor--2.0--2.1.sql pg_stat_monitor--2.1--2.2.sql pg_stat_monitor--2.2--2.3.sql PGFILEDESC = "pg_stat_monitor - execution statistics of SQL statements" diff --git a/guc.c b/guc.c index 0f31b65..3438f46 100644 --- a/guc.c +++ b/guc.c @@ -36,6 +36,7 @@ bool pgsm_track_utility; bool pgsm_track_application_names; bool pgsm_enable_pgsm_query_id; int pgsm_track; +bool pgsm_enable_json_log; static int pgsm_overflow_target; /* Not used since 2.0 */ /* Check hooks to ensure histogram_min < histogram_max */ @@ -287,6 +288,18 @@ init_guc(void) NULL /* show_hook */ ); + DefineCustomBoolVariable("pg_stat_monitor.pgsm_enable_json_log", /* name */ + "Enable/Disable JSON logging of query statistics.", /* short_desc */ + NULL, /* long_desc */ + &pgsm_enable_json_log, /* value address */ + false, /* boot value */ + PGC_SUSET, /* context */ + 0, /* flags */ + NULL, /* check_hook */ + NULL, /* assign_hook */ + NULL /* show_hook */ + ); + } /* Maximum value must be greater or equal to minimum + 1.0 */ diff --git a/pg_stat_monitor--2.3--2.4.sql b/pg_stat_monitor--2.3--2.4.sql deleted file mode 100644 index 3a7f522..0000000 --- a/pg_stat_monitor--2.3--2.4.sql +++ /dev/null @@ -1,128 +0,0 @@ -/* contrib/pg_stat_monitor/pg_stat_monitor--2.3--2.4.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "ALTER EXTENSION pg_stat_monitor" to load this file. \quit - --- Create partitioned table with exact pg_stat_monitor API structure --- Table columns match the expected API exactly - direct table access -DROP VIEW IF EXISTS pg_stat_monitor CASCADE; - -CREATE TABLE pg_stat_monitor ( - bucket bigint, -- renamed from bucket_id - bucket_start_time timestamptz, - userid oid, - username text, - dbid oid, - datname text, - client_ip inet, - pgsm_query_id bigint, - queryid bigint, - toplevel boolean, - top_queryid bigint, - - -- Query texts limited to 1.5KB each - query text, - comments text, - planid bigint, - query_plan text, - top_query text, - - application_name text, - relations text[], -- array instead of comma-separated string - cmd_type int, - cmd_type_text text, -- computed at write time - elevel int, - sqlcode text, - message text, - - -- Execution stats - calls bigint, - total_exec_time double precision, - min_exec_time double precision, - max_exec_time double precision, - mean_exec_time double precision, - stddev_exec_time double precision, - rows bigint, - - -- Planning stats - plans bigint, - total_plan_time double precision, - min_plan_time double precision, - max_plan_time double precision, - mean_plan_time double precision, - stddev_plan_time double precision, - - -- Block stats - shared_blks_hit bigint, - shared_blks_read bigint, - shared_blks_dirtied bigint, - shared_blks_written bigint, - local_blks_hit bigint, - local_blks_read bigint, - local_blks_dirtied bigint, - local_blks_written bigint, - temp_blks_read bigint, - temp_blks_written bigint, - shared_blk_read_time double precision, - shared_blk_write_time double precision, - local_blk_read_time double precision, - local_blk_write_time double precision, - temp_blk_read_time double precision, - temp_blk_write_time double precision, - - -- System stats - cpu_user_time double precision, - cpu_sys_time double precision, - - -- WAL stats - wal_records bigint, - wal_fpi bigint, - wal_bytes numeric, - - -- JIT stats - jit_functions bigint, - jit_generation_time double precision, - jit_inlining_count bigint, - jit_inlining_time double precision, - jit_optimization_count bigint, - jit_optimization_time double precision, - jit_emission_count bigint, - jit_emission_time double precision, - jit_deform_count bigint, - jit_deform_time double precision, - - -- Response time histogram - resp_calls text[], - - -- Metadata - stats_since timestamptz, - minmax_stats_since timestamptz, - bucket_done boolean DEFAULT false, - exported_at timestamptz DEFAULT now() -) PARTITION BY RANGE (exported_at); - --- Create initial partition for today -CREATE TABLE pg_stat_monitor_default -PARTITION OF pg_stat_monitor DEFAULT; - --- Create indexes for query performance -CREATE INDEX ON pg_stat_monitor (queryid); -CREATE INDEX ON pg_stat_monitor (exported_at); -CREATE INDEX ON pg_stat_monitor (bucket, queryid); -- Composite for time-series queries - --- Configure memory and query text limits (set these manually in postgresql.conf): --- pg_stat_monitor.pgsm_max = '10MB' --- pg_stat_monitor.pgsm_max_buckets = 2 --- pg_stat_monitor.pgsm_query_shared_buffer = '1MB' --- pg_stat_monitor.pgsm_query_max_len = 1536 - --- Create user-callable export function following PostgreSQL extension best practices -CREATE OR REPLACE FUNCTION pg_stat_monitor_export() -RETURNS int -AS 'MODULE_PATHNAME', 'pg_stat_monitor_export' -LANGUAGE C STRICT VOLATILE; - --- Grant execute permission to public -GRANT EXECUTE ON FUNCTION pg_stat_monitor_export() TO PUBLIC; - --- Table structure matches API exactly - implementation complete \ No newline at end of file diff --git a/pg_stat_monitor.c b/pg_stat_monitor.c index d0f0db7..d957361 100644 --- a/pg_stat_monitor.c +++ b/pg_stat_monitor.c @@ -2572,6 +2572,9 @@ get_next_wbucket(pgsmSharedState *pgsm) pgsm_lock_release(pgsm); + /* Log the previous bucket data as JSON if enabled */ + pgsm_log_bucket_json(prev_bucket_id); + /* Allign the value in prev_bucket_sec to the bucket start time */ tv.tv_sec = (tv.tv_sec) - (tv.tv_sec % pgsm_bucket_time); @@ -3962,3 +3965,48 @@ pgsm_lock_release(pgsmSharedState *pgsm) disable_error_capture = false; LWLockRelease(pgsm->lock); } + +/* + * Log bucket data as JSON to PostgreSQL log + */ +static void +pgsm_log_bucket_json(uint64 bucket_id) +{ + pgsmSharedState *pgsm = pgsm_get_ss(); + HASH_SEQ_STATUS hash_seq; + pgsmEntry *entry; + bool first_entry = true; + + if (!pgsm_enable_json_log) + return; + + /* Start JSON object for the bucket */ + elog(LOG, "[pg_stat_monitor] JSON export bucket %lu: {\"bucket_id\": %lu, \"queries\": [", + bucket_id, bucket_id); + + pgsm_lock_aquire(pgsm, LW_SHARED); + + hash_seq_init(&hash_seq, pgsm->hash); + while ((entry = hash_seq_search(&hash_seq)) != NULL) + { + /* Only export entries from the specified bucket */ + if (entry->key.bucket_id != bucket_id) + continue; + + if (!first_entry) + elog(LOG, "[pg_stat_monitor] JSON export: ,"); + else + first_entry = false; + + /* Log a simplified JSON object for each query */ + elog(LOG, "[pg_stat_monitor] JSON export: {\"query_id\": %lu, \"calls\": %lu, \"total_time\": %.3f, \"query\": \"%s\"}", + entry->key.queryid, entry->counters.calls, + entry->counters.total_exec_time, + entry->query_len > 0 ? pgsm_get_query_text(entry, NULL, NULL) : ""); + } + + pgsm_lock_release(pgsm); + + /* Close JSON array and object */ + elog(LOG, "[pg_stat_monitor] JSON export: ]}"); +} diff --git a/pg_stat_monitor.control b/pg_stat_monitor.control index a59dcad..562bf53 100644 --- a/pg_stat_monitor.control +++ b/pg_stat_monitor.control @@ -1,5 +1,5 @@ # pg_stat_monitor extension comment = 'The pg_stat_monitor is a PostgreSQL Query Performance Monitoring tool, based on PostgreSQL contrib module pg_stat_statements. pg_stat_monitor provides aggregated statistics, client information, plan details including plan, and histogram information.' -default_version = '2.4' +default_version = '2.3' module_pathname = '$libdir/pg_stat_monitor' relocatable = true diff --git a/pg_stat_monitor.h b/pg_stat_monitor.h index c5e8062..5550730 100644 --- a/pg_stat_monitor.h +++ b/pg_stat_monitor.h @@ -490,6 +490,7 @@ extern bool pgsm_track_utility; extern bool pgsm_track_application_names; extern bool pgsm_enable_pgsm_query_id; extern int pgsm_track; +extern bool pgsm_enable_json_log; #define DECLARE_HOOK(hook, ...) \ static hook(__VA_ARGS__); @@ -504,5 +505,3 @@ void *pgsm_hash_seq_next(PGSM_HASH_SEQ_STATUS * hstat); void pgsm_hash_seq_term(PGSM_HASH_SEQ_STATUS * hstat); void pgsm_hash_delete_current(PGSM_HASH_SEQ_STATUS * hstat, PGSM_HASH_TABLE * shared_hash, void *key); -/* Table export functions */ -bool pgsm_export_bucket_to_table(uint64 bucket_id); diff --git a/pgsm_table_export.c b/pgsm_table_export.c deleted file mode 100644 index 7e1f562..0000000 --- a/pgsm_table_export.c +++ /dev/null @@ -1,723 +0,0 @@ -/* - * pgsm_table_export.c - Export pg_stat_monitor data to partitioned tables - * Low-memory implementation with query text limits and top queries filtering - */ -#include "postgres.h" -#include "pg_stat_monitor.h" -#include "executor/spi.h" -#include "utils/builtins.h" -#include "utils/timestamp.h" -#include "catalog/pg_authid.h" -#include "lib/dshash.h" -#include "utils/dsa.h" - -/* Helper macro for repeated truncate and escape pattern */ -#define APPEND_ESCAPED_STRING(sql, str, max_len) \ - do { \ - if ((str) && strlen(str) > 0) { \ - char *escaped = truncate_and_escape_string((str), (max_len)); \ - appendStringInfo(&(sql), "'%s', ", escaped); \ - pfree(escaped); \ - } else { \ - appendStringInfo(&(sql), "NULL, "); \ - } \ - } while(0) - -/* Function declarations */ -bool pgsm_export_bucket_to_table(uint64 bucket_id); - -/* Configuration */ -#define MAX_QUERY_LENGTH 1536 /* 1.5KB limit for query text */ -#define MAX_QUERIES_PER_BUCKET 300 /* Top 300 queries per bucket */ - -/* External references */ -extern pgsmSharedState *pgsm_get_ss(void); -extern dsa_area *get_dsa_area_for_query_text(void); - -/* Structure for sorting entries by total_time */ -typedef struct -{ - pgsmEntry *entry; - double total_time; -} EntryWithTime; - - -/* - * Helper function to truncate and escape strings for SQL - */ -static char * -truncate_and_escape_string(const char *str, int max_len) -{ - int len; - bool truncated; - char *result; - int src_len; - int j = 0; - - if (!str || *str == '\0') - return pstrdup(""); - - len = strlen(str); - truncated = (len > max_len); - - /* Simple approach: allocate generous buffer */ - src_len = truncated ? max_len : len; - result = palloc(src_len * 2 + 10); - - /* Copy and escape quotes */ - for (int i = 0; i < src_len; i++) - { - if (str[i] == '\'') - { - result[j++] = '\''; - result[j++] = '\''; - } - else - { - result[j++] = str[i]; - } - } - - /* Add truncation marker if needed */ - if (truncated) - { - result[j++] = '.'; - result[j++] = '.'; - result[j++] = '.'; - } - - result[j] = '\0'; - return result; -} - -/* - * Helper to convert response calls array to PostgreSQL array format - */ -static char * -resp_calls_to_array(int *resp_calls, int count) -{ - StringInfoData str; - char *result; - - - initStringInfo(&str); - - appendStringInfo(&str, "ARRAY["); - for (int i = 0; i < count; i++) - { - if (i > 0) - appendStringInfoChar(&str, ','); - appendStringInfo(&str, "%d", resp_calls[i]); - } - appendStringInfo(&str, "]"); - - /* Create a copy in the calling context and free the StringInfo */ - result = pstrdup(str.data); - - pfree(str.data); - - return result; -} - -/* - * Comparison function for sorting entries by total_time (descending) - */ -static int -compare_entries_by_time(const void *a, const void *b) -{ - EntryWithTime *ea = (EntryWithTime *)a; - EntryWithTime *eb = (EntryWithTime *)b; - - if (ea->total_time > eb->total_time) - return -1; - else if (ea->total_time < eb->total_time) - return 1; - else - return 0; -} - - -/* - * Helper function to collect and sort entries from a bucket - */ -static EntryWithTime * -collect_and_sort_entries(uint64 bucket_id, int *entry_count) -{ - PGSM_HASH_SEQ_STATUS hstat; - pgsmEntry *entry; - EntryWithTime *entries_array = NULL; - int array_size = 1000; /* Initial allocation */ - - *entry_count = 0; - - /* Allocate array for sorting entries */ - entries_array = palloc(array_size * sizeof(EntryWithTime)); - - /* First pass: collect all entries from this bucket */ - pgsm_hash_seq_init(&hstat, get_pgsmHash(), false); - while ((entry = pgsm_hash_seq_next(&hstat)) != NULL) - { - if (entry->key.bucket_id != bucket_id) - continue; - - /* Expand array if needed */ - if (*entry_count >= array_size) - { - array_size *= 2; - entries_array = repalloc(entries_array, array_size * sizeof(EntryWithTime)); - } - - entries_array[*entry_count].entry = entry; - entries_array[*entry_count].total_time = entry->counters.time.total_time; - (*entry_count)++; - } - pgsm_hash_seq_term(&hstat); - - elog(LOG, "pg_stat_monitor: Found %d entries for bucket %lu", *entry_count, (unsigned long)bucket_id); - - /* Sort entries by total_time descending */ - qsort(entries_array, *entry_count, sizeof(EntryWithTime), compare_entries_by_time); - - return entries_array; -} - -/* - * Helper function to build INSERT SQL statement - */ -/* - * Build the INSERT statement header with all column names - */ -static void -build_insert_header(StringInfo sql) -{ - appendStringInfo(sql, - "INSERT INTO pg_stat_monitor " - "(bucket, bucket_start_time, userid, username, dbid, datname, " - "client_ip, pgsm_query_id, queryid, toplevel, top_queryid, " - "query, comments, planid, query_plan, top_query, application_name, relations, " - "cmd_type, cmd_type_text, elevel, sqlcode, message, " - "calls, total_exec_time, min_exec_time, max_exec_time, mean_exec_time, stddev_exec_time, rows, " - "plans, total_plan_time, min_plan_time, max_plan_time, mean_plan_time, stddev_plan_time, " - "shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, " - "local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, " - "temp_blks_read, temp_blks_written, " - "shared_blk_read_time, shared_blk_write_time, local_blk_read_time, local_blk_write_time, " - "temp_blk_read_time, temp_blk_write_time, " - "cpu_user_time, cpu_sys_time, " - "wal_records, wal_fpi, wal_bytes, " - "jit_functions, jit_generation_time, jit_inlining_count, jit_inlining_time, " - "jit_optimization_count, jit_optimization_time, jit_emission_count, jit_emission_time, " - "jit_deform_count, jit_deform_time, " - "resp_calls, stats_since, minmax_stats_since, bucket_done, exported_at" - ") VALUES "); -} - -static char * -build_insert_statement(EntryWithTime *entries_array, int queries_to_export, - uint64 bucket_id, pgsmSharedState *pgsm, dsa_area *query_dsa_area) -{ - StringInfoData sql; - bool first = true; - - /* Build batch INSERT for top queries */ - initStringInfo(&sql); - build_insert_header(&sql); - - /* Export top N entries using original incremental approach */ - for (int i = 0; i < queries_to_export; i++) - { - char *query_txt = NULL; - char *parent_query_txt = NULL; - char *query_plan_txt = NULL; - char *relations_str = NULL; - double exec_stddev = 0.0; - double plan_stddev = 0.0; - double plan_mean __attribute__((unused)) = 0.0; - char *resp_calls_str; - pgsmEntry *entry; - - entry = entries_array[i].entry; - - if (!first) - appendStringInfoChar(&sql, ','); - first = false; - - /* Get query text - truncated to MAX_QUERY_LENGTH */ - if (DsaPointerIsValid(entry->query_text.query_pos)) - { - char *query_ptr = dsa_get_address(query_dsa_area, entry->query_text.query_pos); - query_txt = truncate_and_escape_string(query_ptr, MAX_QUERY_LENGTH); - } - else - { - query_txt = pstrdup("Query string not available"); - } - - /* Get parent query text if exists - also truncated */ - if (entry->key.parentid != UINT64CONST(0) && - DsaPointerIsValid(entry->counters.info.parent_query)) - { - char *query_ptr = dsa_get_address(query_dsa_area, entry->counters.info.parent_query); - parent_query_txt = truncate_and_escape_string(query_ptr, MAX_QUERY_LENGTH); - } - - /* Get relations as comma-separated string */ - if (entry->counters.info.num_relations > 0) - { - StringInfoData rel_str; - initStringInfo(&rel_str); - for (int j = 0; j < entry->counters.info.num_relations; j++) - { - if (j > 0) - appendStringInfoChar(&rel_str, ','); - appendStringInfo(&rel_str, "%s", entry->counters.info.relations[j]); - } - /* Create a copy in the calling context and free the StringInfo */ - relations_str = pstrdup(rel_str.data); - pfree(rel_str.data); - } - - /* Calculate stddev for exec and plan times */ - if (entry->counters.calls.calls > 1) - exec_stddev = sqrt(entry->counters.time.sum_var_time / entry->counters.calls.calls); - if (entry->counters.plancalls.calls > 1) - { - plan_stddev = sqrt(entry->counters.plantime.sum_var_time / entry->counters.plancalls.calls); - plan_mean = entry->counters.plantime.mean_time; - } - - /* Convert response calls array to PostgreSQL array */ - resp_calls_str = resp_calls_to_array(entry->counters.resp_calls, MAX_RESPONSE_BUCKET); - - /* Get query plan if exists - also truncated */ - if (entry->key.planid && entry->counters.planinfo.plan_text[0]) - { - query_plan_txt = truncate_and_escape_string(entry->counters.planinfo.plan_text, MAX_QUERY_LENGTH); - } - - /* Build the VALUES clause for this entry */ - appendStringInfo(&sql, - "(" - "%ld, ", /* bucket_id */ - (int64)bucket_id); - - /* bucket_start_time */ - if (pgsm->bucket_start_time[bucket_id] != 0) - appendStringInfo(&sql, "to_timestamp(%f)::timestamptz, ", - (double)(pgsm->bucket_start_time[bucket_id] / 1000000.0)); - else - appendStringInfo(&sql, "NULL, "); - - { - char *username_escaped = truncate_and_escape_string(entry->username, NAMEDATALEN); - char *datname_escaped = truncate_and_escape_string(entry->datname, NAMEDATALEN); - - appendStringInfo(&sql, "%u, '%s', %u, '%s', ", - entry->key.userid, username_escaped, entry->key.dbid, datname_escaped); - - pfree(username_escaped); - pfree(datname_escaped); - } - - /* client_ip */ - if (entry->key.ip != 0) - appendStringInfo(&sql, "'0.0.0.0'::inet + %ld, ", (int64)entry->key.ip); - else - appendStringInfo(&sql, "NULL, "); - - /* pgsm_query_id, queryid */ - appendStringInfo(&sql, "%ld, %ld, ", - (int64)entry->pgsm_query_id, - (int64)entry->key.queryid); - - /* toplevel */ - appendStringInfo(&sql, "%s, ", entry->key.toplevel ? "true" : "false"); - - /* top_queryid */ - if (entry->key.parentid != UINT64CONST(0)) - appendStringInfo(&sql, "%ld, ", (int64)entry->key.parentid); - else - appendStringInfo(&sql, "NULL, "); - - /* query */ - appendStringInfo(&sql, "'%s', ", query_txt); - - /* comments */ - APPEND_ESCAPED_STRING(sql, entry->counters.info.comments, 256); - - /* planid */ - if (entry->key.planid) - appendStringInfo(&sql, "%ld, ", (int64)entry->key.planid); - else - appendStringInfo(&sql, "NULL, "); - - /* query_plan */ - if (query_plan_txt) - appendStringInfo(&sql, "'%s', ", query_plan_txt); - else - appendStringInfo(&sql, "NULL, "); - - /* top_query */ - if (parent_query_txt) - appendStringInfo(&sql, "'%s', ", parent_query_txt); - else - appendStringInfo(&sql, "NULL, "); - - /* application_name */ - APPEND_ESCAPED_STRING(sql, entry->counters.info.application_name, NAMEDATALEN); - - /* relations - convert to PostgreSQL array format */ - if (relations_str && strlen(relations_str) > 0) - { - char *relations_copy; - char *token; - bool first_rel = true; - - appendStringInfo(&sql, "ARRAY["); - relations_copy = pstrdup(relations_str); - token = strtok(relations_copy, ","); - while (token != NULL) - { - if (!first_rel) - appendStringInfoChar(&sql, ','); - appendStringInfo(&sql, "'%s'", token); - first_rel = false; - token = strtok(NULL, ","); - } - appendStringInfo(&sql, "], "); - pfree(relations_copy); - } - else - appendStringInfo(&sql, "NULL, "); - - /* cmd_type */ - if (entry->counters.info.cmd_type != CMD_NOTHING) - appendStringInfo(&sql, "%d, ", entry->counters.info.cmd_type); - else - appendStringInfo(&sql, "NULL, "); - - /* cmd_type_text - convert to text */ - switch (entry->counters.info.cmd_type) - { - case CMD_SELECT: - appendStringInfo(&sql, "'SELECT', "); - break; - case CMD_UPDATE: - appendStringInfo(&sql, "'UPDATE', "); - break; - case CMD_INSERT: - appendStringInfo(&sql, "'INSERT', "); - break; - case CMD_DELETE: - appendStringInfo(&sql, "'DELETE', "); - break; - case CMD_UTILITY: - appendStringInfo(&sql, "'UTILITY', "); - break; - case CMD_NOTHING: - default: - appendStringInfo(&sql, "NULL, "); - break; - } - - /* elevel, sqlcode, message */ - appendStringInfo(&sql, "%ld, ", (long)entry->counters.error.elevel); - - APPEND_ESCAPED_STRING(sql, entry->counters.error.sqlcode, 5); - APPEND_ESCAPED_STRING(sql, entry->counters.error.message, 256); - - /* Execution stats */ - appendStringInfo(&sql, - "%ld, %.3f, %.3f, %.3f, %.3f, %.3f, %ld, ", - entry->counters.calls.calls, - entry->counters.time.total_time, - entry->counters.time.min_time, - entry->counters.time.max_time, - entry->counters.time.mean_time, - exec_stddev, - entry->counters.calls.rows); - - /* Planning stats */ - appendStringInfo(&sql, - "%ld, %.3f, %.3f, %.3f, %.3f, %.3f, ", - entry->counters.plancalls.calls, - entry->counters.plantime.total_time, - entry->counters.plantime.min_time, - entry->counters.plantime.max_time, - entry->counters.plantime.mean_time, - plan_stddev); - - /* Block stats */ - appendStringInfo(&sql, - "%ld, %ld, %ld, %ld, %ld, %ld, %ld, %ld, %ld, %ld, " - "%.3f, %.3f, %.3f, %.3f, %.3f, %.3f, ", - entry->counters.blocks.shared_blks_hit, - entry->counters.blocks.shared_blks_read, - entry->counters.blocks.shared_blks_dirtied, - entry->counters.blocks.shared_blks_written, - entry->counters.blocks.local_blks_hit, - entry->counters.blocks.local_blks_read, - entry->counters.blocks.local_blks_dirtied, - entry->counters.blocks.local_blks_written, - entry->counters.blocks.temp_blks_read, - entry->counters.blocks.temp_blks_written, - entry->counters.blocks.shared_blk_read_time, - entry->counters.blocks.shared_blk_write_time, - entry->counters.blocks.local_blk_read_time, - entry->counters.blocks.local_blk_write_time, - entry->counters.blocks.temp_blk_read_time, - entry->counters.blocks.temp_blk_write_time); - - /* System stats */ - appendStringInfo(&sql, - "%.3f, %.3f, ", - entry->counters.sysinfo.utime, - entry->counters.sysinfo.stime); - - /* WAL stats */ - appendStringInfo(&sql, - "%ld, %ld, %ld, ", - entry->counters.walusage.wal_records, - entry->counters.walusage.wal_fpi, - (int64)entry->counters.walusage.wal_bytes); - - /* JIT stats */ - appendStringInfo(&sql, - "%ld, %.3f, %ld, %.3f, %ld, %.3f, %ld, %.3f, %ld, %.3f, ", - entry->counters.jitinfo.jit_functions, - entry->counters.jitinfo.jit_generation_time, - entry->counters.jitinfo.jit_inlining_count, - entry->counters.jitinfo.jit_inlining_time, - entry->counters.jitinfo.jit_optimization_count, - entry->counters.jitinfo.jit_optimization_time, - entry->counters.jitinfo.jit_emission_count, - entry->counters.jitinfo.jit_emission_time, - entry->counters.jitinfo.jit_deform_count, - entry->counters.jitinfo.jit_deform_time); - - /* resp_calls */ - appendStringInfo(&sql, "%s, ", resp_calls_str); - - /* stats_since, minmax_stats_since */ - if (entry->stats_since != 0) - appendStringInfo(&sql, "to_timestamp(%f)::timestamptz, ", - (double)(entry->stats_since / 1000000.0)); - else - appendStringInfo(&sql, "NULL, "); - - if (entry->minmax_stats_since != 0) - appendStringInfo(&sql, "to_timestamp(%f)::timestamptz, ", - (double)(entry->minmax_stats_since / 1000000.0)); - else - appendStringInfo(&sql, "NULL, "); - - /* bucket_done - mark as false since bucket is still active */ - appendStringInfo(&sql, "false, "); - - /* exported_at - use current timestamp */ - appendStringInfo(&sql, "now())"); - - /* Clean up */ - if (query_txt) - pfree(query_txt); - if (parent_query_txt) - pfree(parent_query_txt); - if (query_plan_txt) - pfree(query_plan_txt); - if (relations_str) - pfree(relations_str); - if (resp_calls_str) - pfree(resp_calls_str); - } - - { - char *result = pstrdup(sql.data); - /* StringInfo manages its own memory - don't call pfree on sql.data */ - return result; - } -} - -/* - * Helper function to execute SQL statement - */ -static bool -execute_export_sql(const char *sql, uint64 bucket_id, int exported) -{ - int spi_result; - int ret; - bool export_successful = false; - bool spi_connected = false; - - elog(LOG, "pg_stat_monitor: About to export %d queries for bucket %lu", exported, (unsigned long)bucket_id); - - /* Attempt SPI execution with proper error handling */ - PG_TRY(); - { - /* Try to connect to SPI manager */ - spi_result = SPI_connect(); - if (spi_result != SPI_OK_CONNECT) - { - elog(DEBUG1, "pg_stat_monitor: Deferring export of bucket %lu (SPI connect failed: %d)", - (unsigned long)bucket_id, spi_result); - export_successful = false; - } - else - { - spi_connected = true; - - /* Execute the INSERT statement */ - elog(LOG, "pg_stat_monitor: Executing SQL for bucket %lu", (unsigned long)bucket_id); - elog(LOG, "pg_stat_monitor: FULL SQL: %s", sql); - - /* Add pre-execution debugging */ - elog(LOG, "pg_stat_monitor: About to call SPI_execute"); - elog(LOG, "pg_stat_monitor: SPI connection status: %d", SPI_result); - - ret = SPI_execute(sql, false, 0); - - elog(LOG, "pg_stat_monitor: SPI_execute returned: %d", ret); - elog(LOG, "pg_stat_monitor: SPI_OK_INSERT value is: %d", SPI_OK_INSERT); - elog(LOG, "pg_stat_monitor: SPI_processed: %lu", (unsigned long)SPI_processed); - - /* Check result */ - if (ret == SPI_OK_INSERT) - { - elog(LOG, "pg_stat_monitor: Successfully exported %d queries for bucket %lu", - exported, (unsigned long)bucket_id); - export_successful = true; - } - else - { - elog(WARNING, "pg_stat_monitor: Failed to insert bucket %lu data, SPI result: %d", - (unsigned long)bucket_id, ret); - export_successful = false; - } - - /* Always call SPI_finish() after successful SPI_connect() */ - SPI_finish(); - spi_connected = false; - } - } - PG_CATCH(); - { - /* Handle SPI context errors gracefully */ - ErrorData *errdata; - - /* Clean up SPI connection if it was established */ - if (spi_connected) - { - SPI_finish(); - } - - errdata = CopyErrorData(); - elog(DEBUG1, "pg_stat_monitor: Export deferred for bucket %lu: %s", - (unsigned long)bucket_id, errdata->message); - FreeErrorData(errdata); - FlushErrorState(); - export_successful = false; - } - PG_END_TRY(); - - return export_successful; -} - -/* - * Export bucket data to partitioned table - * Called from ExecutorEnd when a bucket needs export - * Returns true if export succeeded or no data to export, false if deferred - */ -bool -pgsm_export_bucket_to_table(uint64 bucket_id) -{ - EntryWithTime *entries_array = NULL; - char *sql = NULL; - pgsmSharedState *pgsm; - dsa_area *query_dsa_area; - int entry_count, queries_to_export; - bool export_successful = false; - - /* Log when export function is called */ - elog(DEBUG1, "pg_stat_monitor: Export function called for bucket %lu", (unsigned long)bucket_id); - - /* Get shared memory areas */ - pgsm = pgsm_get_ss(); - query_dsa_area = get_dsa_area_for_query_text(); - - /* Step 1: Collect and sort entries from this bucket */ - entries_array = collect_and_sort_entries(bucket_id, &entry_count); - - /* Step 2: Limit to top N queries */ - queries_to_export = (entry_count > MAX_QUERIES_PER_BUCKET) ? - MAX_QUERIES_PER_BUCKET : entry_count; - - if (queries_to_export == 0) - { - elog(DEBUG1, "pg_stat_monitor: No data to export for bucket %lu (found %d entries)", - (unsigned long)bucket_id, entry_count); - export_successful = true; /* No data to export is success */ - goto cleanup; - } - - /* Step 3: Build INSERT SQL statement */ - sql = build_insert_statement(entries_array, queries_to_export, bucket_id, pgsm, query_dsa_area); - - /* Step 4: Execute SQL statement */ - export_successful = execute_export_sql(sql, bucket_id, queries_to_export); - elog(LOG, "pg_stat_monitor: execute_export_sql returned %s for bucket %lu", - export_successful ? "true" : "false", (unsigned long)bucket_id); - -cleanup: - /* Single cleanup point to avoid double-free errors */ - if (sql) - pfree(sql); - if (entries_array) - pfree(entries_array); - - elog(LOG, "pg_stat_monitor: pgsm_export_bucket_to_table returning %s for bucket %lu", - export_successful ? "true" : "false", (unsigned long)bucket_id); - return export_successful; -} - -/* - * User-callable export function following PostgreSQL extension best practices - * This is the safe way to trigger exports - from a proper transaction context - * Similar to pg_stat_statements_reset() pattern - */ -PG_FUNCTION_INFO_V1(pg_stat_monitor_export); - -Datum -pg_stat_monitor_export(PG_FUNCTION_ARGS) -{ - pgsmSharedState *pgsm = pgsm_get_ss(); - uint64 bucket_to_export; - uint64 current_bucket_id; - int exported_buckets = 0; - - /* Check if user has appropriate permissions */ - if (!superuser()) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to export pg_stat_monitor data"))); - - /* Get current bucket ID */ - current_bucket_id = pg_atomic_read_u64(&pgsm->current_wbucket); - - /* Check for pending exports */ - bucket_to_export = pg_atomic_read_u64(&pgsm->bucket_to_export); - - /* Determine which bucket to export */ - { - uint64 target_bucket = (bucket_to_export == (uint64)-1) ? current_bucket_id : bucket_to_export; - - /* Export the target bucket */ - if (pgsm_export_bucket_to_table(target_bucket)) - { - exported_buckets = 1; - /* Clear export flag if it was a pending export */ - if (bucket_to_export != (uint64)-1) - pg_atomic_compare_exchange_u64(&pgsm->bucket_to_export, &bucket_to_export, (uint64)-1); - } - } - - PG_RETURN_INT32(exported_buckets); -} \ No newline at end of file diff --git a/regression.diffs b/regression.diffs new file mode 100644 index 0000000..0db5bf6 --- /dev/null +++ b/regression.diffs @@ -0,0 +1,134 @@ +diff -U3 /Users/oliverrice/Documents/supabase/pg_stat_monitor/regression/expected/basic.out /Users/oliverrice/Documents/supabase/pg_stat_monitor/results/basic.out +--- /Users/oliverrice/Documents/supabase/pg_stat_monitor/regression/expected/basic.out 2025-09-20 13:48:01 ++++ /Users/oliverrice/Documents/supabase/pg_stat_monitor/results/basic.out 2025-09-20 13:54:55 +@@ -12,11 +12,9 @@ + (1 row) + + SELECT query FROM pg_stat_monitor ORDER BY query COLLATE "C"; +- query +--------------------------------- +- SELECT 1 AS num +- SELECT pg_stat_monitor_reset() +-(2 rows) ++ query ++------- ++(0 rows) + + SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +diff -U3 /Users/oliverrice/Documents/supabase/pg_stat_monitor/regression/expected/counters.out /Users/oliverrice/Documents/supabase/pg_stat_monitor/results/counters.out +--- /Users/oliverrice/Documents/supabase/pg_stat_monitor/regression/expected/counters.out 2025-09-20 13:48:01 ++++ /Users/oliverrice/Documents/supabase/pg_stat_monitor/results/counters.out 2025-09-20 13:54:55 +@@ -37,11 +37,9 @@ + (0 rows) + + SELECT query, sum(calls) as calls FROM pg_stat_monitor GROUP BY query ORDER BY query COLLATE "C"; +- query | calls +----------------------------------------------------------------------------------+------- +- SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a | 4 +- SELECT pg_stat_monitor_reset() | 1 +-(2 rows) ++ query | calls ++-------+------- ++(0 rows) + + SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +@@ -60,21 +58,9 @@ + end loop; + end $$; + SELECT query, sum(calls) as calls FROM pg_stat_monitor GROUP BY query ORDER BY query COLLATE "C"; +- query | calls +----------------------------------------------------------------------------------------------------+------- +- SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a | 1000 +- SELECT pg_stat_monitor_reset() | 1 +- do $$ +| 1 +- declare +| +- n integer:= 1; +| +- begin +| +- loop +| +- PERFORM a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a;+| +- exit when n = 1000; +| +- n := n + 1; +| +- end loop; +| +- end $$ | +-(3 rows) ++ query | calls ++-------+------- ++(0 rows) + + DROP TABLE t1; + DROP TABLE t2; +diff -U3 /Users/oliverrice/Documents/supabase/pg_stat_monitor/regression/expected/relations.out /Users/oliverrice/Documents/supabase/pg_stat_monitor/results/relations.out +--- /Users/oliverrice/Documents/supabase/pg_stat_monitor/regression/expected/relations.out 2025-09-20 13:48:01 ++++ /Users/oliverrice/Documents/supabase/pg_stat_monitor/results/relations.out 2025-09-20 13:54:55 +@@ -37,14 +37,9 @@ + (0 rows) + + SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; +- query | relations +---------------------------------------+--------------------------------------------------- +- SELECT * FROM foo1 | {public.foo1} +- SELECT * FROM foo1, foo2 | {public.foo1,public.foo2} +- SELECT * FROM foo1, foo2, foo3 | {public.foo1,public.foo2,public.foo3} +- SELECT * FROM foo1, foo2, foo3, foo4 | {public.foo1,public.foo2,public.foo3,public.foo4} +- SELECT pg_stat_monitor_reset() | +-(5 rows) ++ query | relations ++-------+----------- ++(0 rows) + + SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +@@ -88,14 +83,9 @@ + (0 rows) + + SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; +- query | relations +-----------------------------------------------------------+------------------------------------------- +- SELECT * FROM sch1.foo1 | {sch1.foo1} +- SELECT * FROM sch1.foo1, sch2.foo2 | {sch1.foo1,sch2.foo2} +- SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3 | {sch1.foo1,sch2.foo2,sch3.foo3} +- SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3, sch4.foo4 | {sch1.foo1,sch2.foo2,sch3.foo3,sch4.foo4} +- SELECT pg_stat_monitor_reset() | +-(5 rows) ++ query | relations ++-------+----------- ++(0 rows) + + SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +@@ -120,12 +110,9 @@ + (0 rows) + + SELECT query, relations from pg_stat_monitor ORDER BY query; +- query | relations +-------------------------------------------------+----------------------------------------------- +- SELECT * FROM sch1.foo1, foo1 | {sch1.foo1,public.foo1} +- SELECT * FROM sch1.foo1, sch2.foo2, foo1, foo2 | {sch1.foo1,sch2.foo2,public.foo1,public.foo2} +- SELECT pg_stat_monitor_reset() | +-(3 rows) ++ query | relations ++-------+----------- ++(0 rows) + + SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +@@ -165,14 +152,9 @@ + (0 rows) + + SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; +- query | relations +---------------------------------+----------------------------------------------------------------------------------------------- +- SELECT * FROM v1 | {public.v1*,public.foo1} +- SELECT * FROM v1,v2 | {public.v1*,public.foo1,public.v2*,public.foo2} +- SELECT * FROM v1,v2,v3 | {public.v1*,public.foo1,public.v2*,public.foo2,public.v3*,public.foo3} +- SELECT * FROM v1,v2,v3,v4 | {public.v1*,public.foo1,public.v2*,public.foo2,public.v3*,public.foo3,public.v4*,public.foo4} +- SELECT pg_stat_monitor_reset() | +-(5 rows) ++ query | relations ++-------+----------- ++(0 rows) + + SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset diff --git a/regression.out b/regression.out new file mode 100644 index 0000000..ecd1ad9 --- /dev/null +++ b/regression.out @@ -0,0 +1,8 @@ +# using postmaster on Unix socket, default port +not ok 1 - basic 41 ms +not ok 2 - counters 36 ms +not ok 3 - relations 41 ms +1..3 +# 3 of 3 tests failed. +# The differences that caused some tests to fail can be viewed in the file "/Users/oliverrice/Documents/supabase/pg_stat_monitor/regression.diffs". +# A copy of the test summary that you see above is saved in the file "/Users/oliverrice/Documents/supabase/pg_stat_monitor/regression.out". diff --git a/results/basic.out b/results/basic.out new file mode 100644 index 0000000..d2b2034 --- /dev/null +++ b/results/basic.out @@ -0,0 +1,25 @@ +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +SELECT 1 AS num; + num +----- + 1 +(1 row) + +SELECT query FROM pg_stat_monitor ORDER BY query COLLATE "C"; + query +------- +(0 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +DROP EXTENSION pg_stat_monitor; diff --git a/results/counters.out b/results/counters.out new file mode 100644 index 0000000..7eef9c6 --- /dev/null +++ b/results/counters.out @@ -0,0 +1,69 @@ +CREATE EXTENSION pg_stat_monitor; +Set pg_stat_monitor.pgsm_track='all'; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +CREATE TABLE t1 (a INTEGER); +CREATE TABLE t2 (b INTEGER); +CREATE TABLE t3 (c INTEGER); +CREATE TABLE t4 (d INTEGER); +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; + a | b | c | d +---+---+---+--- +(0 rows) + +SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; + a | b | c | d +---+---+---+--- +(0 rows) + +SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; + a | b | c | d +---+---+---+--- +(0 rows) + +SELECT a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; + a | b | c | d +---+---+---+--- +(0 rows) + +SELECT query, sum(calls) as calls FROM pg_stat_monitor GROUP BY query ORDER BY query COLLATE "C"; + query | calls +-------+------- +(0 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +do $$ +declare + n integer:= 1; +begin + loop + PERFORM a,b,c,d FROM t1, t2, t3, t4 WHERE t1.a = t2.b AND t3.c = t4.d ORDER BY a; + exit when n = 1000; + n := n + 1; + end loop; +end $$; +SELECT query, sum(calls) as calls FROM pg_stat_monitor GROUP BY query ORDER BY query COLLATE "C"; + query | calls +-------+------- +(0 rows) + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; +DROP EXTENSION pg_stat_monitor; diff --git a/results/relations.out b/results/relations.out new file mode 100644 index 0000000..9ef38e0 --- /dev/null +++ b/results/relations.out @@ -0,0 +1,181 @@ +CREATE EXTENSION pg_stat_monitor; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +CREATE TABLE foo1(a int); +CREATE TABLE foo2(b int); +CREATE TABLE foo3(c int); +CREATE TABLE foo4(d int); +-- test the simple table names +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +SELECT * FROM foo1; + a +--- +(0 rows) + +SELECT * FROM foo1, foo2; + a | b +---+--- +(0 rows) + +SELECT * FROM foo1, foo2, foo3; + a | b | c +---+---+--- +(0 rows) + +SELECT * FROM foo1, foo2, foo3, foo4; + a | b | c | d +---+---+---+--- +(0 rows) + +SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; + query | relations +-------+----------- +(0 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +-- test the schema qualified table +CREATE SCHEMA sch1; +CREATE SCHEMA sch2; +CREATE SCHEMA sch3; +CREATE SCHEMA sch4; +CREATE TABLE sch1.foo1(a int); +CREATE TABLE sch2.foo2(b int); +CREATE TABLE sch3.foo3(c int); +CREATE TABLE sch4.foo4(d int); +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +SELECT * FROM sch1.foo1; + a +--- +(0 rows) + +SELECT * FROM sch1.foo1, sch2.foo2; + a | b +---+--- +(0 rows) + +SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3; + a | b | c +---+---+--- +(0 rows) + +SELECT * FROM sch1.foo1, sch2.foo2, sch3.foo3, sch4.foo4; + a | b | c | d +---+---+---+--- +(0 rows) + +SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; + query | relations +-------+----------- +(0 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +SELECT * FROM sch1.foo1, foo1; + a | a +---+--- +(0 rows) + +SELECT * FROM sch1.foo1, sch2.foo2, foo1, foo2; + a | b | a | b +---+---+---+--- +(0 rows) + +SELECT query, relations from pg_stat_monitor ORDER BY query; + query | relations +-------+----------- +(0 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +-- test the view +CREATE VIEW v1 AS SELECT * from foo1; +CREATE VIEW v2 AS SELECT * from foo1,foo2; +CREATE VIEW v3 AS SELECT * from foo1,foo2,foo3; +CREATE VIEW v4 AS SELECT * from foo1,foo2,foo3,foo4; +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +SELECT * FROM v1; + a +--- +(0 rows) + +SELECT * FROM v1,v2; + a | a | b +---+---+--- +(0 rows) + +SELECT * FROM v1,v2,v3; + a | a | b | a | b | c +---+---+---+---+---+--- +(0 rows) + +SELECT * FROM v1,v2,v3,v4; + a | a | b | a | b | c | a | b | c | d +---+---+---+---+---+---+---+---+---+--- +(0 rows) + +SELECT query, relations from pg_stat_monitor ORDER BY query collate "C"; + query | relations +-------+----------- +(0 rows) + +SELECT pg_stat_monitor_reset(); + pg_stat_monitor_reset +----------------------- + +(1 row) + +DROP VIEW v1; +DROP VIEW v2; +DROP VIEW v3; +DROP VIEW v4; +DROP TABLE foo1; +DROP TABLE foo2; +DROP TABLE foo3; +DROP TABLE foo4; +DROP TABLE sch1.foo1; +DROP TABLE sch2.foo2; +DROP TABLE sch3.foo3; +DROP TABLE sch4.foo4; +DROP SCHEMA sch1; +DROP SCHEMA sch2; +DROP SCHEMA sch3; +DROP SCHEMA sch4; +DROP EXTENSION pg_stat_monitor; diff --git a/test_lifecycle.sql b/test_lifecycle.sql deleted file mode 100644 index 90523f7..0000000 --- a/test_lifecycle.sql +++ /dev/null @@ -1,67 +0,0 @@ --- Integration test for pg_stat_monitor low-memory fork --- Tests table export functionality, API compatibility, and memory configuration - --- Reset stats and verify clean state -SELECT pg_stat_monitor_reset(); - --- Wait briefly for bucket to initialize -\! sleep 1 - --- Generate some test queries to capture -SET pg_stat_monitor.pgsm_track_utility = on; -SELECT count(*) FROM pg_tables WHERE schemaname = 'public'; -SELECT 'lifecycle test query' as test_message, current_timestamp; - --- Verify table structure matches expected API -\d pg_stat_monitor - --- Test key column existence -SELECT EXISTS ( - SELECT 1 FROM information_schema.columns - WHERE table_name = 'pg_stat_monitor' AND column_name = 'bucket' -) as has_bucket_column; - -SELECT EXISTS ( - SELECT 1 FROM information_schema.columns - WHERE table_name = 'pg_stat_monitor' AND column_name = 'cmd_type_text' -) as has_cmd_type_text_column; - --- Test export function exists and is callable -SELECT EXISTS ( - SELECT 1 FROM pg_proc - WHERE proname = 'pg_stat_monitor_export' -) as export_function_exists; - --- Verify expected column count (should be 72 columns total) -SELECT count(*) as total_columns -FROM information_schema.columns -WHERE table_name = 'pg_stat_monitor'; - --- Test that queries are being captured in memory -SELECT CASE - WHEN EXISTS ( - SELECT 1 FROM pg_stat_monitor_internal() - WHERE query LIKE '%lifecycle test query%' - ) - THEN 'SUCCESS: Queries captured in memory' - ELSE 'ERROR: No queries found in memory' -END as capture_status; - --- Test export function is callable -SELECT CASE - WHEN pg_stat_monitor_export() >= 0 - THEN 'Export function callable: YES' - ELSE 'Export function callable: NO' -END as export_function_test; - --- Verify bucket system is working -SELECT CASE - WHEN count(*) > 0 - THEN 'SUCCESS: Bucket system working' - ELSE 'ERROR: No buckets found' -END as bucket_system_status -FROM pg_stat_monitor_internal() -WHERE bucket_start_time IS NOT NULL; - --- Final success message -SELECT 'SUCCESS: Simplified codebase maintains full compatibility and functionality' as message; \ No newline at end of file