diff --git a/src/backend/distributed/sql/citus--13.0-1--13.1-1.sql b/src/backend/distributed/sql/citus--13.0-1--13.1-1.sql index 4eefdf60a..4d0dc19a3 100644 --- a/src/backend/distributed/sql/citus--13.0-1--13.1-1.sql +++ b/src/backend/distributed/sql/citus--13.0-1--13.1-1.sql @@ -50,3 +50,4 @@ DROP VIEW IF EXISTS pg_catalog.citus_lock_waits; #include "udfs/citus_is_primary_node/13.1-1.sql" #include "udfs/citus_stat_counters/13.1-1.sql" #include "udfs/citus_stat_counters_reset/13.1-1.sql" +#include "udfs/citus_nodes/13.1-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--13.1-1--13.0-1.sql b/src/backend/distributed/sql/downgrades/citus--13.1-1--13.0-1.sql index 38f195743..8e18cd33b 100644 --- a/src/backend/distributed/sql/downgrades/citus--13.1-1--13.0-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--13.1-1--13.0-1.sql @@ -45,3 +45,4 @@ DROP FUNCTION citus_internal.is_replication_origin_tracking_active(); DROP VIEW pg_catalog.citus_stat_counters; DROP FUNCTION pg_catalog.citus_stat_counters(oid); DROP FUNCTION pg_catalog.citus_stat_counters_reset(oid); +DROP VIEW IF EXISTS pg_catalog.citus_nodes; diff --git a/src/backend/distributed/sql/udfs/citus_nodes/13.1-1.sql b/src/backend/distributed/sql/udfs/citus_nodes/13.1-1.sql new file mode 100644 index 000000000..4d129d79e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_nodes/13.1-1.sql @@ -0,0 +1,18 @@ +SET search_path = 'pg_catalog'; +DROP VIEW IF EXISTS pg_catalog.citus_nodes; + +CREATE OR REPLACE VIEW citus.citus_nodes AS +SELECT + nodename, + nodeport, + CASE + WHEN groupid = 0 THEN 'coordinator' + ELSE 'worker' + END AS role, + isactive AS active +FROM pg_dist_node; + +ALTER VIEW citus.citus_nodes SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_nodes TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_nodes/latest.sql b/src/backend/distributed/sql/udfs/citus_nodes/latest.sql new file mode 100644 index 000000000..4d129d79e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_nodes/latest.sql @@ -0,0 +1,18 @@ +SET search_path = 'pg_catalog'; +DROP VIEW IF EXISTS pg_catalog.citus_nodes; + +CREATE OR REPLACE VIEW citus.citus_nodes AS +SELECT + nodename, + nodeport, + CASE + WHEN groupid = 0 THEN 'coordinator' + ELSE 'worker' + END AS role, + isactive AS active +FROM pg_dist_node; + +ALTER VIEW citus.citus_nodes SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_nodes TO PUBLIC; + +RESET search_path; diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index e6634d5c6..352a2ddfe 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -72,6 +72,45 @@ SELECT master_get_active_worker_nodes(); (localhost,57637) (2 rows) +-- get all nodes +SELECT * from citus_nodes; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57637 | worker | t + localhost | 57638 | worker | t + localhost | 57636 | coordinator | t +(3 rows) + +-- get get active nodes +SELECT * from citus_nodes where active = 't'; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57637 | worker | t + localhost | 57638 | worker | t + localhost | 57636 | coordinator | t +(3 rows) + +-- get coordinator nodes +SELECT * from citus_nodes where role = 'coordinator'; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57636 | coordinator | t +(1 row) + +-- get worker nodes +SELECT * from citus_nodes where role = 'worker'; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57637 | worker | t + localhost | 57638 | worker | t +(2 rows) + +-- get nodes with unknown role +SELECT * from citus_nodes where role = 'foo'; + nodename | nodeport | role | active +--------------------------------------------------------------------- +(0 rows) + -- try to add a node that is already in the cluster SELECT * FROM master_add_node('localhost', :worker_1_port); master_add_node @@ -126,6 +165,34 @@ SELECT master_get_active_worker_nodes(); (localhost,57637) (1 row) +-- get get active nodes +SELECT * from citus_nodes where active = 't'; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57636 | coordinator | t + localhost | 57637 | worker | t +(2 rows) + +-- get get inactive nodes +SELECT * from citus_nodes where active = 'f'; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57638 | worker | f +(1 row) + +-- make sure non-superusers can access the view +CREATE ROLE normaluser; +SET ROLE normaluser; +SELECT * FROM citus_nodes; + nodename | nodeport | role | active +--------------------------------------------------------------------- + localhost | 57636 | coordinator | t + localhost | 57638 | worker | f + localhost | 57637 | worker | t +(3 rows) + +SET ROLE postgres; +DROP ROLE normaluser; -- add some shard placements to the cluster SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1; diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 88104a46a..c8468883b 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1483,8 +1483,9 @@ SELECT * FROM multi_extension.print_extension_changes(); | function citus_stat_counters(oid) SETOF record | function citus_stat_counters_reset(oid) void | function citus_unmark_object_distributed(oid,oid,integer,boolean) void + | view citus_nodes | view citus_stat_counters -(30 rows) +(31 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 70c71093d..3390dc387 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -380,6 +380,7 @@ ORDER BY 1; view citus_dist_stat_activity view citus_lock_waits view citus_locks + view citus_nodes view citus_schema.citus_schemas view citus_schema.citus_tables view citus_shard_indexes_on_worker @@ -392,6 +393,6 @@ ORDER BY 1; view citus_stat_tenants_local view pg_dist_shard_placement view time_partitions -(361 rows) +(362 rows) DROP TABLE extension_basic_types; diff --git a/src/test/regress/mitmscripts/structs.py b/src/test/regress/mitmscripts/structs.py index 2be3a6a2e..f799662d8 100644 --- a/src/test/regress/mitmscripts/structs.py +++ b/src/test/regress/mitmscripts/structs.py @@ -137,18 +137,21 @@ class Message: class SharedMessage(Message, metaclass=MessageMeta): "A message which could be sent by either the frontend or the backend" + _msgtypes = dict() _classes = dict() class FrontendMessage(Message, metaclass=MessageMeta): "A message which will only be sent be a backend" + _msgtypes = dict() _classes = dict() class BackendMessage(Message, metaclass=MessageMeta): "A message which will only be sent be a frontend" + _msgtypes = dict() _classes = dict() diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index a1e0e9b09..71078adab 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -32,6 +32,21 @@ SELECT result FROM run_command_on_workers('SELECT citus_is_primary_node()'); -- get the active nodes SELECT master_get_active_worker_nodes(); +-- get all nodes +SELECT * from citus_nodes; + +-- get get active nodes +SELECT * from citus_nodes where active = 't'; + +-- get coordinator nodes +SELECT * from citus_nodes where role = 'coordinator'; + +-- get worker nodes +SELECT * from citus_nodes where role = 'worker'; + +-- get nodes with unknown role +SELECT * from citus_nodes where role = 'foo'; + -- try to add a node that is already in the cluster SELECT * FROM master_add_node('localhost', :worker_1_port); @@ -51,6 +66,20 @@ SELECT citus_disable_node('localhost', :worker_2_port); SELECT public.wait_until_metadata_sync(20000); SELECT master_get_active_worker_nodes(); +-- get get active nodes +SELECT * from citus_nodes where active = 't'; + +-- get get inactive nodes +SELECT * from citus_nodes where active = 'f'; + +-- make sure non-superusers can access the view +CREATE ROLE normaluser; +SET ROLE normaluser; +SELECT * FROM citus_nodes; + +SET ROLE postgres; +DROP ROLE normaluser; + -- add some shard placements to the cluster SET citus.shard_count TO 16; SET citus.shard_replication_factor TO 1;