From 088ba7505798acbfca6334cc2645c93215ab14d8 Mon Sep 17 00:00:00 2001 From: Alper Kocatas Date: Wed, 14 May 2025 15:05:12 +0300 Subject: [PATCH] Add citus_nodes view (#7968) DESCRIPTION: Adds `citus_nodes` view that displays the node name, port, role, and "active" for nodes in the cluster. This PR adds `citus_nodes` view to the `pg_catalog` schema. The `citus_nodes` view is created in the `citus` schema and is used to display the node name, port, role, and active status of each node in the `pg_dist_node` table. The view is granted `SELECT` permission to the `PUBLIC` role and is set to the `pg_catalog` schema. Test cases was added to `multi_cluster_management` tests. structs.py was modified to add white spaces as `citus_indent` required. --------- Co-authored-by: Alper Kocatas --- .../distributed/sql/citus--13.0-1--13.1-1.sql | 1 + .../sql/downgrades/citus--13.1-1--13.0-1.sql | 1 + .../sql/udfs/citus_nodes/13.1-1.sql | 18 +++++ .../sql/udfs/citus_nodes/latest.sql | 18 +++++ .../expected/multi_cluster_management.out | 67 +++++++++++++++++++ src/test/regress/expected/multi_extension.out | 3 +- .../expected/upgrade_list_citus_objects.out | 3 +- src/test/regress/mitmscripts/structs.py | 3 + .../regress/sql/multi_cluster_management.sql | 29 ++++++++ 9 files changed, 141 insertions(+), 2 deletions(-) create mode 100644 src/backend/distributed/sql/udfs/citus_nodes/13.1-1.sql create mode 100644 src/backend/distributed/sql/udfs/citus_nodes/latest.sql diff --git a/src/backend/distributed/sql/citus--13.0-1--13.1-1.sql b/src/backend/distributed/sql/citus--13.0-1--13.1-1.sql index 4eefdf60a..4d0dc19a3 100644 --- a/src/backend/distributed/sql/citus--13.0-1--13.1-1.sql +++ b/src/backend/distributed/sql/citus--13.0-1--13.1-1.sql @@ -50,3 +50,4 @@ DROP VIEW IF EXISTS pg_catalog.citus_lock_waits; #include "udfs/citus_is_primary_node/13.1-1.sql" #include "udfs/citus_stat_counters/13.1-1.sql" #include "udfs/citus_stat_counters_reset/13.1-1.sql" +#include "udfs/citus_nodes/13.1-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--13.1-1--13.0-1.sql b/src/backend/distributed/sql/downgrades/citus--13.1-1--13.0-1.sql index 38f195743..8e18cd33b 100644 --- a/src/backend/distributed/sql/downgrades/citus--13.1-1--13.0-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--13.1-1--13.0-1.sql @@ -45,3 +45,4 @@ DROP FUNCTION citus_internal.is_replication_origin_tracking_active(); DROP VIEW pg_catalog.citus_stat_counters; DROP FUNCTION pg_catalog.citus_stat_counters(oid); DROP FUNCTION pg_catalog.citus_stat_counters_reset(oid); +DROP VIEW IF EXISTS pg_catalog.citus_nodes; diff --git a/src/backend/distributed/sql/udfs/citus_nodes/13.1-1.sql b/src/backend/distributed/sql/udfs/citus_nodes/13.1-1.sql new file mode 100644 index 000000000..4d129d79e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_nodes/13.1-1.sql @@ -0,0 +1,18 @@ +SET search_path = 'pg_catalog'; +DROP VIEW IF EXISTS pg_catalog.citus_nodes; + +CREATE OR REPLACE VIEW citus.citus_nodes AS +SELECT + nodename, + nodeport, + CASE + WHEN groupid = 0 THEN 'coordinator' + ELSE 'worker' + END AS role, + isactive AS active +FROM pg_dist_node; + +ALTER VIEW citus.citus_nodes SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_nodes TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_nodes/latest.sql b/src/backend/distributed/sql/udfs/citus_nodes/latest.sql new file mode 100644 index 000000000..4d129d79e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_nodes/latest.sql @@ -0,0 +1,18 @@ +SET search_path = 'pg_catalog'; +DROP VIEW IF EXISTS pg_catalog.citus_nodes; + +CREATE OR REPLACE VIEW citus.citus_nodes AS +SELECT + nodename, + nodeport, + CASE + WHEN groupid = 0 THEN 'coordinator' + ELSE 'worker' + END AS role, + isactive AS active +FROM pg_dist_node; + +ALTER VIEW citus.citus_nodes SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_nodes TO PUBLIC; + +RESET search_path; diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index e6634d5c6..352a2ddfe 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -72,6 +72,45 @@ SELECT master_get_active_worker_nodes(); (localhost,57637) (2 rows) +-- get all nodes +SELECT * from citus_nodes; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57637 | worker | t + localhost | 57638 | worker | t + localhost | 57636 | coordinator | t +(3 rows) + +-- get get active nodes +SELECT * from citus_nodes where active = 't'; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57637 | worker | t + localhost | 57638 | worker | t + localhost | 57636 | coordinator | t +(3 rows) + +-- get coordinator nodes +SELECT * from citus_nodes where role = 'coordinator'; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57636 | coordinator | t +(1 row) + +-- get worker nodes +SELECT * from citus_nodes where role = 'worker'; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57637 | worker | t + localhost | 57638 | worker | t +(2 rows) + +-- get nodes with unknown role +SELECT * from citus_nodes where role = 'foo'; + nodename | nodeport | role | active +--------------------------------------------------------------------- +(0 rows) + -- try to add a node that is already in the cluster SELECT * FROM master_add_node('localhost', :worker_1_port); master_add_node @@ -126,6 +165,34 @@ SELECT master_get_active_worker_nodes(); (localhost,57637) (1 row) +-- get get active nodes +SELECT * from citus_nodes where active = 't'; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57636 | coordinator | t + localhost | 57637 | worker | t +(2 rows) + +-- get get inactive nodes +SELECT * from citus_nodes where active = 'f'; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57638 | worker | f +(1 row) + +-- make sure non-superusers can access the view +CREATE ROLE normaluser; +SET ROLE normaluser; +SELECT * FROM citus_nodes; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57636 | coordinator | t + localhost | 57638 | worker | f + localhost | 57637 | worker | t +(3 rows) + +SET ROLE postgres; +DROP ROLE normaluser; -- add some shard placements to the cluster SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 88104a46a..c8468883b 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1483,8 +1483,9 @@ SELECT * FROM multi_extension.print_extension_changes(); | function citus_stat_counters(oid) SETOF record | function citus_stat_counters_reset(oid) void | function citus_unmark_object_distributed(oid,oid,integer,boolean) void + | view citus_nodes | view citus_stat_counters -(30 rows) +(31 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 70c71093d..3390dc387 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -380,6 +380,7 @@ ORDER BY 1; view citus_dist_stat_activity view citus_lock_waits view citus_locks + view citus_nodes view citus_schema.citus_schemas view citus_schema.citus_tables view citus_shard_indexes_on_worker @@ -392,6 +393,6 @@ ORDER BY 1; view citus_stat_tenants_local view pg_dist_shard_placement view time_partitions -(361 rows) +(362 rows) DROP TABLE extension_basic_types; diff --git a/src/test/regress/mitmscripts/structs.py b/src/test/regress/mitmscripts/structs.py index 2be3a6a2e..f799662d8 100644 --- a/src/test/regress/mitmscripts/structs.py +++ b/src/test/regress/mitmscripts/structs.py @@ -137,18 +137,21 @@ class Message: class SharedMessage(Message, metaclass=MessageMeta): "A message which could be sent by either the frontend or the backend" + _msgtypes = dict() _classes = dict() class FrontendMessage(Message, metaclass=MessageMeta): "A message which will only be sent be a backend" + _msgtypes = dict() _classes = dict() class BackendMessage(Message, metaclass=MessageMeta): "A message which will only be sent be a frontend" + _msgtypes = dict() _classes = dict() diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index a1e0e9b09..71078adab 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -32,6 +32,21 @@ SELECT result FROM run_command_on_workers('SELECT citus_is_primary_node()'); -- get the active nodes SELECT master_get_active_worker_nodes(); +-- get all nodes +SELECT * from citus_nodes; + +-- get get active nodes +SELECT * from citus_nodes where active = 't'; + +-- get coordinator nodes +SELECT * from citus_nodes where role = 'coordinator'; + +-- get worker nodes +SELECT * from citus_nodes where role = 'worker'; + +-- get nodes with unknown role +SELECT * from citus_nodes where role = 'foo'; + -- try to add a node that is already in the cluster SELECT * FROM master_add_node('localhost', :worker_1_port); @@ -51,6 +66,20 @@ SELECT citus_disable_node('localhost', :worker_2_port); SELECT public.wait_until_metadata_sync(20000); SELECT master_get_active_worker_nodes(); +-- get get active nodes +SELECT * from citus_nodes where active = 't'; + +-- get get inactive nodes +SELECT * from citus_nodes where active = 'f'; + +-- make sure non-superusers can access the view +CREATE ROLE normaluser; +SET ROLE normaluser; +SELECT * FROM citus_nodes; + +SET ROLE postgres; +DROP ROLE normaluser; + -- add some shard placements to the cluster SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1;