diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index edd72e71f..d133c50b2 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -2232,7 +2232,7 @@ SetWorkerColumnOptional(WorkerNode *workerNode, int columnIndex, Datum value) { /* metadata out of sync, mark the worker as not synced */ ereport(WARNING, (errmsg("Updating the metadata of the node %s:%d " - "is failed on node %s:%d." + "is failed on node %s:%d. " "Metadata on %s:%d is marked as out of sync.", workerNode->workerName, workerNode->workerPort, worker->workerName, worker->workerPort, diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c index 9e777f2a1..4c20c0433 100644 --- a/src/backend/distributed/operations/shard_transfer.c +++ b/src/backend/distributed/operations/shard_transfer.c @@ -553,7 +553,7 @@ CheckSpaceConstraints(MultiConnection *connection, uint64 colocationSizeInBytes) { ereport(ERROR, (errmsg("not enough empty space on node if the shard is moved, " "actual available space after move will be %ld bytes, " - "desired available space after move is %ld bytes," + "desired available space after move is %ld bytes, " "estimated size increase on node after move is %ld bytes.", diskAvailableInBytesAfterShardMove, desiredNewDiskAvailableInBytes, colocationSizeInBytes), diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index 051c88735..0157bb545 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -604,7 +604,7 @@ CreateReplicaIdentitiesOnNode(List *shardList, char *nodeName, int32 nodePort) if (commandList != NIL) { - ereport(DEBUG1, (errmsg("Creating replica identity for shard %ld on" + ereport(DEBUG1, (errmsg("Creating replica identity for shard %ld on " "target node %s:%d", shardId, nodeName, nodePort))); SendCommandListToWorkerOutsideTransaction(nodeName, nodePort, diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 49eaf3063..c15268056 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -1054,8 +1054,8 @@ RegisterCitusConfigVariables(void) gettext_noop( "Sets how many percentage of free disk space should be after a shard move"), gettext_noop( - "This setting controls how much free space should be available after a shard move." - "If the free disk space will be lower than this parameter, then shard move will result in" + "This setting controls how much free space should be available after a shard move. " + "If the free disk space will be lower than this parameter, then shard move will result in " "an error."), &DesiredPercentFreeAfterMove, 10.0, 0.0, 100.0, @@ -1448,7 +1448,7 @@ RegisterCitusConfigVariables(void) "parallelization"), gettext_noop("When enabled, Citus will force the executor to use " "as many connections as possible while executing a " - "parallel distributed query. If not enabled, the executor" + "parallel distributed query. If not enabled, the executor " "might choose to use less connections to optimize overall " "query execution throughput. Internally, setting this true " "will end up with using one connection per task."), @@ -1487,7 +1487,7 @@ RegisterCitusConfigVariables(void) DefineCustomBoolVariable( "citus.hide_citus_dependent_objects", gettext_noop( - "Hides some objects, which depends on citus extension, from pg meta class queries." + "Hides some objects, which depends on citus extension, from pg meta class queries. " "It is intended to be used only before postgres vanilla tests to not break them."), NULL, &HideCitusDependentObjects, @@ -1594,10 +1594,10 @@ RegisterCitusConfigVariables(void) gettext_noop("defines the behaviour when a distributed table " "is joined with a local table"), gettext_noop( - "There are 4 values available. The default, 'auto' will recursively plan" - "distributed tables if there is a constant filter on a unique index." - "'prefer-local' will choose local tables if possible." - "'prefer-distributed' will choose distributed tables if possible" + "There are 4 values available. The default, 'auto' will recursively plan " + "distributed tables if there is a constant filter on a unique index. " + "'prefer-local' will choose local tables if possible. " + "'prefer-distributed' will choose distributed tables if possible. " "'never' will basically skip local table joins." ), &LocalTableJoinPolicy, @@ -1816,11 +1816,11 @@ RegisterCitusConfigVariables(void) "health status are tracked in a shared hash table on " "the master node. This configuration value limits the " "size of the hash table, and consequently the maximum " - "number of worker nodes that can be tracked." + "number of worker nodes that can be tracked. " "Citus keeps some information about the worker nodes " "in the shared memory for certain optimizations. The " "optimizations are enforced up to this number of worker " - "nodes. Any additional worker nodes may not benefit from" + "nodes. Any additional worker nodes may not benefit from " "the optimizations."), &MaxWorkerNodesTracked, 2048, 1024, INT_MAX, @@ -1994,7 +1994,7 @@ RegisterCitusConfigVariables(void) gettext_noop("When enabled, the executor waits until all the connections " "are successfully established."), gettext_noop("Under some load, the executor may decide to establish some " - "extra connections to further parallelize the execution. However," + "extra connections to further parallelize the execution. However, " "before the connection establishment is done, the execution might " "have already finished. When this GUC is set to true, the execution " "waits for such connections to be established."), @@ -2092,7 +2092,7 @@ RegisterCitusConfigVariables(void) "citus.replication_model", gettext_noop("Deprecated. Please use citus.shard_replication_factor instead"), gettext_noop( - "Shard replication model is determined by the shard replication factor." + "Shard replication model is determined by the shard replication factor. " "'statement' replication is used only when the replication factor is one."), &ReplicationModel, REPLICATION_MODEL_STREAMING, @@ -2178,7 +2178,7 @@ RegisterCitusConfigVariables(void) "citus.skip_advisory_lock_permission_checks", gettext_noop("Postgres would normally enforce some " "ownership checks while acquiring locks. " - "When this setting is 'on', Citus skips" + "When this setting is 'on', Citus skips " "ownership checks on internal advisory " "locks."), NULL, @@ -2225,7 +2225,7 @@ RegisterCitusConfigVariables(void) gettext_noop("This feature is not intended for users. It is developed " "to get consistent regression test outputs. When enabled, " "the RETURNING clause returns the tuples sorted. The sort " - "is done for all the entries, starting from the first one." + "is done for all the entries, starting from the first one. " "Finally, the sorting is done in ASC order."), &SortReturning, false, @@ -2283,7 +2283,7 @@ RegisterCitusConfigVariables(void) "It means that the queries are likely to return wrong results " "unless the user is absolutely sure that pushing down the " "subquery is safe. This GUC is maintained only for backward " - "compatibility, no new users are supposed to use it. The planner" + "compatibility, no new users are supposed to use it. The planner " "is capable of pushing down as much computation as possible to the " "shards depending on the query."), &SubqueryPushdown, diff --git a/src/backend/distributed/utils/background_jobs.c b/src/backend/distributed/utils/background_jobs.c index 45f7a842f..2032b7e65 100644 --- a/src/backend/distributed/utils/background_jobs.c +++ b/src/backend/distributed/utils/background_jobs.c @@ -543,7 +543,7 @@ CheckAndResetLastWorkerAllocationFailure( GetCurrentTimestamp(), &secs, µsecs); ereport(LOG, (errmsg( - "able to start a background worker with %ld seconds" + "able to start a background worker with %ld seconds " "delay", secs))); queueMonitorExecutionContext->backgroundWorkerFailedStartTime = 0;