mirror of https://github.com/citusdata/citus.git
Add a GUC to require coordinator in the metadata
parent
d06146360d
commit
64c65b6234
|
@ -201,6 +201,9 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve
|
||||||
/* enable citus_add_local_table_to_metadata on an empty node */
|
/* enable citus_add_local_table_to_metadata on an empty node */
|
||||||
InsertCoordinatorIfClusterEmpty();
|
InsertCoordinatorIfClusterEmpty();
|
||||||
|
|
||||||
|
/* make sure the coordinator is in the metadata */
|
||||||
|
EnsureCoordinatorInMetadata();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Creating Citus local tables relies on functions that accesses
|
* Creating Citus local tables relies on functions that accesses
|
||||||
* shards locally (e.g., ExecuteAndLogUtilityCommand()). As long as
|
* shards locally (e.g., ExecuteAndLogUtilityCommand()). As long as
|
||||||
|
|
|
@ -218,6 +218,9 @@ create_distributed_table(PG_FUNCTION_ARGS)
|
||||||
/* enable create_distributed_table on an empty node */
|
/* enable create_distributed_table on an empty node */
|
||||||
InsertCoordinatorIfClusterEmpty();
|
InsertCoordinatorIfClusterEmpty();
|
||||||
|
|
||||||
|
/* make sure the coordinator is in the metadata */
|
||||||
|
EnsureCoordinatorInMetadata();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock target relation with an exclusive lock - there's no way to make
|
* Lock target relation with an exclusive lock - there's no way to make
|
||||||
* sense of this table until we've committed, and we don't want multiple
|
* sense of this table until we've committed, and we don't want multiple
|
||||||
|
@ -273,6 +276,9 @@ create_reference_table(PG_FUNCTION_ARGS)
|
||||||
/* enable create_reference_table on an empty node */
|
/* enable create_reference_table on an empty node */
|
||||||
InsertCoordinatorIfClusterEmpty();
|
InsertCoordinatorIfClusterEmpty();
|
||||||
|
|
||||||
|
/* make sure the coordinator is in the metadata */
|
||||||
|
EnsureCoordinatorInMetadata();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock target relation with an exclusive lock - there's no way to make
|
* Lock target relation with an exclusive lock - there's no way to make
|
||||||
* sense of this table until we've committed, and we don't want multiple
|
* sense of this table until we've committed, and we don't want multiple
|
||||||
|
|
|
@ -77,6 +77,13 @@ bool ReplicateReferenceTablesOnActivate = true;
|
||||||
/* did current transaction modify pg_dist_node? */
|
/* did current transaction modify pg_dist_node? */
|
||||||
bool TransactionModifiedNodeMetadata = false;
|
bool TransactionModifiedNodeMetadata = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IsCoordinatorInMetadataRequired is a GUC that specifies whether operations
|
||||||
|
* like create_distributed_table should fail if the coordinator is not in the
|
||||||
|
* metadata.
|
||||||
|
*/
|
||||||
|
bool IsCoordinatorInMetadataRequired = true;
|
||||||
|
|
||||||
bool EnableMetadataSync = true;
|
bool EnableMetadataSync = true;
|
||||||
|
|
||||||
typedef struct NodeMetadata
|
typedef struct NodeMetadata
|
||||||
|
@ -2356,6 +2363,34 @@ EnsureCoordinator(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* EnsureCoordinatorInMetadata throws an error if the coordinator is not
|
||||||
|
* in the metadata.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
EnsureCoordinatorInMetadata(void)
|
||||||
|
{
|
||||||
|
if (!IsCoordinatorInMetadataRequired)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isCoordinatorInMetadata = false;
|
||||||
|
|
||||||
|
PrimaryNodeForGroup(COORDINATOR_GROUP_ID, &isCoordinatorInMetadata);
|
||||||
|
|
||||||
|
if (!isCoordinatorInMetadata)
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||||
|
errmsg("coordinator is not yet added to the Citus node metadata"),
|
||||||
|
errdetail("Worker nodes need to be able to connect to the "
|
||||||
|
"coordinator to transfer data."),
|
||||||
|
errhint("Use SELECT citus_set_coordinator_host('<hostname>') "
|
||||||
|
"to configure the coordinator hostname")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* InsertCoordinatorIfClusterEmpty can be used to ensure Citus tables can be
|
* InsertCoordinatorIfClusterEmpty can be used to ensure Citus tables can be
|
||||||
* created even on a node that has just performed CREATE EXTENSION citus;
|
* created even on a node that has just performed CREATE EXTENSION citus;
|
||||||
|
|
|
@ -1642,6 +1642,21 @@ RegisterCitusConfigVariables(void)
|
||||||
GUC_NO_SHOW_ALL,
|
GUC_NO_SHOW_ALL,
|
||||||
WarnIfReplicationModelIsSet, NULL, NULL);
|
WarnIfReplicationModelIsSet, NULL, NULL);
|
||||||
|
|
||||||
|
DefineCustomBoolVariable(
|
||||||
|
"citus.require_coordinator_in_metadata",
|
||||||
|
gettext_noop("Sets whether Citus requires that the coordinator is in the "
|
||||||
|
"metadata"),
|
||||||
|
gettext_noop("Various Citus features depend on the coordinator being in the "
|
||||||
|
"metadata. By default, we check whether this is the case when "
|
||||||
|
"creating a Citus table. You can circumvent this check by "
|
||||||
|
"disabling this setting."),
|
||||||
|
&IsCoordinatorInMetadataRequired,
|
||||||
|
true,
|
||||||
|
PGC_SUSET,
|
||||||
|
GUC_NO_SHOW_ALL,
|
||||||
|
NULL, NULL, NULL);
|
||||||
|
|
||||||
|
|
||||||
DefineCustomBoolVariable(
|
DefineCustomBoolVariable(
|
||||||
"citus.running_under_isolation_test",
|
"citus.running_under_isolation_test",
|
||||||
gettext_noop(
|
gettext_noop(
|
||||||
|
|
|
@ -62,6 +62,7 @@ extern int MaxWorkerNodesTracked;
|
||||||
extern char *WorkerListFileName;
|
extern char *WorkerListFileName;
|
||||||
extern char *CurrentCluster;
|
extern char *CurrentCluster;
|
||||||
extern bool ReplicateReferenceTablesOnActivate;
|
extern bool ReplicateReferenceTablesOnActivate;
|
||||||
|
extern bool IsCoordinatorInMetadataRequired;
|
||||||
|
|
||||||
extern int ActivateNode(char *nodeName, int nodePort);
|
extern int ActivateNode(char *nodeName, int nodePort);
|
||||||
|
|
||||||
|
@ -89,6 +90,7 @@ extern WorkerNode * FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePor
|
||||||
extern WorkerNode * FindNodeWithNodeId(int nodeId, bool missingOk);
|
extern WorkerNode * FindNodeWithNodeId(int nodeId, bool missingOk);
|
||||||
extern List * ReadDistNode(bool includeNodesFromOtherClusters);
|
extern List * ReadDistNode(bool includeNodesFromOtherClusters);
|
||||||
extern void EnsureCoordinator(void);
|
extern void EnsureCoordinator(void);
|
||||||
|
void EnsureCoordinatorInMetadata(void);
|
||||||
extern void InsertCoordinatorIfClusterEmpty(void);
|
extern void InsertCoordinatorIfClusterEmpty(void);
|
||||||
extern uint32 GroupForNode(char *nodeName, int32 nodePort);
|
extern uint32 GroupForNode(char *nodeName, int32 nodePort);
|
||||||
extern WorkerNode * PrimaryNodeForGroup(int32 groupId, bool *groupContainsNodes);
|
extern WorkerNode * PrimaryNodeForGroup(int32 groupId, bool *groupContainsNodes);
|
||||||
|
|
|
@ -115,6 +115,13 @@ SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
||||||
|
-- coordinator was not yet added to the metadata yet, should fail
|
||||||
|
SET citus.require_coordinator_in_metadata TO on;
|
||||||
|
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
||||||
|
ERROR: coordinator is not yet added to the Citus node metadata
|
||||||
|
DETAIL: Worker nodes need to be able to connect to the coordinator to transfer data.
|
||||||
|
HINT: Use SELECT citus_set_coordinator_host('<hostname>') to configure the coordinator hostname
|
||||||
|
RESET citus.require_coordinator_in_metadata;
|
||||||
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
|
@ -465,6 +465,9 @@ push(@pgOptions, "citus.node_connection_timeout=${connectionTimeout}");
|
||||||
push(@pgOptions, "citus.explain_analyze_sort_method='taskId'");
|
push(@pgOptions, "citus.explain_analyze_sort_method='taskId'");
|
||||||
push(@pgOptions, "citus.enable_manual_changes_to_shards=on");
|
push(@pgOptions, "citus.enable_manual_changes_to_shards=on");
|
||||||
|
|
||||||
|
# We currently have too many tests that omit coordinator from metadata
|
||||||
|
push(@pgOptions, "citus.require_coordinator_in_metadata=off");
|
||||||
|
|
||||||
# Some tests look at shards in pg_class, make sure we can usually see them:
|
# Some tests look at shards in pg_class, make sure we can usually see them:
|
||||||
push(@pgOptions, "citus.hide_shards_from_app_name_prefixes='psql,pg_dump'");
|
push(@pgOptions, "citus.hide_shards_from_app_name_prefixes='psql,pg_dump'");
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,12 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1390000;
|
||||||
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
|
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
|
||||||
|
|
||||||
|
-- coordinator was not yet added to the metadata yet, should fail
|
||||||
|
SET citus.require_coordinator_in_metadata TO on;
|
||||||
|
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
||||||
|
RESET citus.require_coordinator_in_metadata;
|
||||||
|
|
||||||
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
|
||||||
|
|
||||||
-- see that there are some active placements in the candidate node
|
-- see that there are some active placements in the candidate node
|
||||||
|
|
Loading…
Reference in New Issue