Add citus_nodes view (#7968)

DESCRIPTION: Adds `citus_nodes` view that displays the node name, port,
role, and "active" for nodes in the cluster.

This PR adds `citus_nodes` view to the `pg_catalog` schema. The
`citus_nodes` view is created in the `citus` schema and is used to
display the node name, port, role, and active status of each node in the
`pg_dist_node` table.

The view is granted `SELECT` permission to the `PUBLIC` role and is set
to the `pg_catalog` schema.

Test cases was added to `multi_cluster_management` tests. 

structs.py was modified to add white spaces as `citus_indent` required.

---------

Co-authored-by: Alper Kocatas <alperkocatas@microsoft.com>
pull/7997/head
Alper Kocatas 2025-05-14 15:05:12 +03:00 committed by Onur Tirtir
parent a18040869a
commit 662628fe7d
9 changed files with 141 additions and 2 deletions

View File

@ -50,3 +50,4 @@ DROP VIEW IF EXISTS pg_catalog.citus_lock_waits;
#include "udfs/citus_is_primary_node/13.1-1.sql"
#include "udfs/citus_stat_counters/13.1-1.sql"
#include "udfs/citus_stat_counters_reset/13.1-1.sql"
#include "udfs/citus_nodes/13.1-1.sql"

View File

@ -45,3 +45,4 @@ DROP FUNCTION citus_internal.is_replication_origin_tracking_active();
DROP VIEW pg_catalog.citus_stat_counters;
DROP FUNCTION pg_catalog.citus_stat_counters(oid);
DROP FUNCTION pg_catalog.citus_stat_counters_reset(oid);
DROP VIEW IF EXISTS pg_catalog.citus_nodes;

View File

@ -0,0 +1,18 @@
SET search_path = 'pg_catalog';
DROP VIEW IF EXISTS pg_catalog.citus_nodes;
CREATE OR REPLACE VIEW citus.citus_nodes AS
SELECT
nodename,
nodeport,
CASE
WHEN groupid = 0 THEN 'coordinator'
ELSE 'worker'
END AS role,
isactive AS active
FROM pg_dist_node;
ALTER VIEW citus.citus_nodes SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_nodes TO PUBLIC;
RESET search_path;

View File

@ -0,0 +1,18 @@
SET search_path = 'pg_catalog';
DROP VIEW IF EXISTS pg_catalog.citus_nodes;
CREATE OR REPLACE VIEW citus.citus_nodes AS
SELECT
nodename,
nodeport,
CASE
WHEN groupid = 0 THEN 'coordinator'
ELSE 'worker'
END AS role,
isactive AS active
FROM pg_dist_node;
ALTER VIEW citus.citus_nodes SET SCHEMA pg_catalog;
GRANT SELECT ON pg_catalog.citus_nodes TO PUBLIC;
RESET search_path;

View File

@ -72,6 +72,45 @@ SELECT master_get_active_worker_nodes();
(localhost,57637)
(2 rows)
-- get all nodes
SELECT * from citus_nodes;
nodename | nodeport | role | active
---------------------------------------------------------------------
localhost | 57637 | worker | t
localhost | 57638 | worker | t
localhost | 57636 | coordinator | t
(3 rows)
-- get get active nodes
SELECT * from citus_nodes where active = 't';
nodename | nodeport | role | active
---------------------------------------------------------------------
localhost | 57637 | worker | t
localhost | 57638 | worker | t
localhost | 57636 | coordinator | t
(3 rows)
-- get coordinator nodes
SELECT * from citus_nodes where role = 'coordinator';
nodename | nodeport | role | active
---------------------------------------------------------------------
localhost | 57636 | coordinator | t
(1 row)
-- get worker nodes
SELECT * from citus_nodes where role = 'worker';
nodename | nodeport | role | active
---------------------------------------------------------------------
localhost | 57637 | worker | t
localhost | 57638 | worker | t
(2 rows)
-- get nodes with unknown role
SELECT * from citus_nodes where role = 'foo';
nodename | nodeport | role | active
---------------------------------------------------------------------
(0 rows)
-- try to add a node that is already in the cluster
SELECT * FROM master_add_node('localhost', :worker_1_port);
master_add_node
@ -126,6 +165,34 @@ SELECT master_get_active_worker_nodes();
(localhost,57637)
(1 row)
-- get get active nodes
SELECT * from citus_nodes where active = 't';
nodename | nodeport | role | active
---------------------------------------------------------------------
localhost | 57636 | coordinator | t
localhost | 57637 | worker | t
(2 rows)
-- get get inactive nodes
SELECT * from citus_nodes where active = 'f';
nodename | nodeport | role | active
---------------------------------------------------------------------
localhost | 57638 | worker | f
(1 row)
-- make sure non-superusers can access the view
CREATE ROLE normaluser;
SET ROLE normaluser;
SELECT * FROM citus_nodes;
nodename | nodeport | role | active
---------------------------------------------------------------------
localhost | 57636 | coordinator | t
localhost | 57638 | worker | f
localhost | 57637 | worker | t
(3 rows)
SET ROLE postgres;
DROP ROLE normaluser;
-- add some shard placements to the cluster
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;

View File

@ -1483,8 +1483,9 @@ SELECT * FROM multi_extension.print_extension_changes();
| function citus_stat_counters(oid) SETOF record
| function citus_stat_counters_reset(oid) void
| function citus_unmark_object_distributed(oid,oid,integer,boolean) void
| view citus_nodes
| view citus_stat_counters
(30 rows)
(31 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version

View File

@ -380,6 +380,7 @@ ORDER BY 1;
view citus_dist_stat_activity
view citus_lock_waits
view citus_locks
view citus_nodes
view citus_schema.citus_schemas
view citus_schema.citus_tables
view citus_shard_indexes_on_worker
@ -392,6 +393,6 @@ ORDER BY 1;
view citus_stat_tenants_local
view pg_dist_shard_placement
view time_partitions
(361 rows)
(362 rows)
DROP TABLE extension_basic_types;

View File

@ -137,18 +137,21 @@ class Message:
class SharedMessage(Message, metaclass=MessageMeta):
"A message which could be sent by either the frontend or the backend"
_msgtypes = dict()
_classes = dict()
class FrontendMessage(Message, metaclass=MessageMeta):
"A message which will only be sent be a backend"
_msgtypes = dict()
_classes = dict()
class BackendMessage(Message, metaclass=MessageMeta):
"A message which will only be sent be a frontend"
_msgtypes = dict()
_classes = dict()

View File

@ -32,6 +32,21 @@ SELECT result FROM run_command_on_workers('SELECT citus_is_primary_node()');
-- get the active nodes
SELECT master_get_active_worker_nodes();
-- get all nodes
SELECT * from citus_nodes;
-- get get active nodes
SELECT * from citus_nodes where active = 't';
-- get coordinator nodes
SELECT * from citus_nodes where role = 'coordinator';
-- get worker nodes
SELECT * from citus_nodes where role = 'worker';
-- get nodes with unknown role
SELECT * from citus_nodes where role = 'foo';
-- try to add a node that is already in the cluster
SELECT * FROM master_add_node('localhost', :worker_1_port);
@ -51,6 +66,20 @@ SELECT citus_disable_node('localhost', :worker_2_port);
SELECT public.wait_until_metadata_sync(20000);
SELECT master_get_active_worker_nodes();
-- get get active nodes
SELECT * from citus_nodes where active = 't';
-- get get inactive nodes
SELECT * from citus_nodes where active = 'f';
-- make sure non-superusers can access the view
CREATE ROLE normaluser;
SET ROLE normaluser;
SELECT * FROM citus_nodes;
SET ROLE postgres;
DROP ROLE normaluser;
-- add some shard placements to the cluster
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;