mirror of https://github.com/citusdata/citus.git
Merge pull request #4779 from citusdata/typos
Fix various typos due to zealous repetitionpull/4345/head
commit
ce296ac62e
|
@ -272,7 +272,7 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values,
|
|||
/*
|
||||
* Declare local params for readability;
|
||||
*
|
||||
* assignment is done directly to not loose the pointers if any of the later
|
||||
* assignment is done directly to not lose the pointers if any of the later
|
||||
* allocations cause an error. FreeConnParamsHashEntryFields knows about the
|
||||
* possibility of half initialized keywords or values and correctly reclaims them when
|
||||
* the cache is reused.
|
||||
|
|
|
@ -370,7 +370,7 @@ StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port,
|
|||
* remote node as it might not have any space in
|
||||
* max_connections for this connection establishment.
|
||||
*
|
||||
* Still, we keep track of the connnection counter.
|
||||
* Still, we keep track of the connection counter.
|
||||
*/
|
||||
IncrementSharedConnectionCounter(hostname, port);
|
||||
}
|
||||
|
|
|
@ -219,7 +219,7 @@ ClearResultsIfReady(MultiConnection *connection)
|
|||
if (!(resultStatus == PGRES_SINGLE_TUPLE || resultStatus == PGRES_TUPLES_OK ||
|
||||
resultStatus == PGRES_COMMAND_OK))
|
||||
{
|
||||
/* an error occcurred just when we were aborting */
|
||||
/* an error occurred just when we were aborting */
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* executes "unassigned" tasks from a queue.
|
||||
* - WorkerSession:
|
||||
* Connection to a worker that is used to execute "assigned" tasks
|
||||
* from a queue and may execute unasssigned tasks from the WorkerPool.
|
||||
* from a queue and may execute unassigned tasks from the WorkerPool.
|
||||
* - ShardCommandExecution:
|
||||
* Execution of a Task across a list of placements.
|
||||
* - TaskPlacementExecution:
|
||||
|
|
|
@ -268,7 +268,7 @@ RemoteFileDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
|
||||
/*
|
||||
* PrepareIntermediateResultBroadcast gets a RemoteFileDestReceiver and does
|
||||
* the necessary initilizations including initiating the remote connnections
|
||||
* the necessary initilizations including initiating the remote connections
|
||||
* and creating the local file, which is necessary (it might be both).
|
||||
*/
|
||||
static void
|
||||
|
|
|
@ -101,7 +101,7 @@ typedef struct DependencyDefinition
|
|||
* address is used for dependencies that are artificially added during the
|
||||
* chasing. Since they are added by citus code we assume the dependency needs to
|
||||
* be chased anyway, ofcourse it will only actually be chased if the object is a
|
||||
* suppported object by citus
|
||||
* supported object by citus
|
||||
*/
|
||||
ObjectAddress address;
|
||||
} data;
|
||||
|
|
|
@ -40,7 +40,7 @@ static void ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray,
|
|||
StringInfo *commandStringArray,
|
||||
bool *statusArray,
|
||||
StringInfo *resultStringArray,
|
||||
int commmandCount);
|
||||
int commandCount);
|
||||
static bool GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus,
|
||||
StringInfo queryResultString);
|
||||
static bool EvaluateQueryResult(MultiConnection *connection, PGresult *queryResult,
|
||||
|
@ -51,7 +51,7 @@ static void ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray,
|
|||
StringInfo *commandStringArray,
|
||||
bool *statusArray,
|
||||
StringInfo *resultStringArray,
|
||||
int commmandCount);
|
||||
int commandCount);
|
||||
static bool ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort,
|
||||
char *queryString, StringInfo queryResult);
|
||||
static Tuplestorestate * CreateTupleStore(TupleDesc tupleDescriptor,
|
||||
|
@ -216,14 +216,14 @@ static void
|
|||
ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray,
|
||||
StringInfo *commandStringArray,
|
||||
bool *statusArray, StringInfo *resultStringArray,
|
||||
int commmandCount)
|
||||
int commandCount)
|
||||
{
|
||||
MultiConnection **connectionArray =
|
||||
palloc0(commmandCount * sizeof(MultiConnection *));
|
||||
palloc0(commandCount * sizeof(MultiConnection *));
|
||||
int finishedCount = 0;
|
||||
|
||||
/* start connections asynchronously */
|
||||
for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commandCount; commandIndex++)
|
||||
{
|
||||
char *nodeName = nodeNameArray[commandIndex]->data;
|
||||
int nodePort = nodePortArray[commandIndex];
|
||||
|
@ -233,7 +233,7 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor
|
|||
}
|
||||
|
||||
/* establish connections */
|
||||
for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commandCount; commandIndex++)
|
||||
{
|
||||
MultiConnection *connection = connectionArray[commandIndex];
|
||||
StringInfo queryResultString = resultStringArray[commandIndex];
|
||||
|
@ -257,7 +257,7 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor
|
|||
}
|
||||
|
||||
/* send queries at once */
|
||||
for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commandCount; commandIndex++)
|
||||
{
|
||||
MultiConnection *connection = connectionArray[commandIndex];
|
||||
char *queryString = commandStringArray[commandIndex]->data;
|
||||
|
@ -284,9 +284,9 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor
|
|||
}
|
||||
|
||||
/* check for query results */
|
||||
while (finishedCount < commmandCount)
|
||||
while (finishedCount < commandCount)
|
||||
{
|
||||
for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commandCount; commandIndex++)
|
||||
{
|
||||
MultiConnection *connection = connectionArray[commandIndex];
|
||||
StringInfo queryResultString = resultStringArray[commandIndex];
|
||||
|
@ -311,7 +311,7 @@ ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePor
|
|||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
if (finishedCount < commmandCount)
|
||||
if (finishedCount < commandCount)
|
||||
{
|
||||
long sleepIntervalPerCycle = RemoteTaskCheckInterval * 1000L;
|
||||
pg_usleep(sleepIntervalPerCycle);
|
||||
|
@ -468,9 +468,9 @@ StoreErrorMessage(MultiConnection *connection, StringInfo queryResultString)
|
|||
static void
|
||||
ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray,
|
||||
StringInfo *commandStringArray, bool *statusArray,
|
||||
StringInfo *resultStringArray, int commmandCount)
|
||||
StringInfo *resultStringArray, int commandCount)
|
||||
{
|
||||
for (int commandIndex = 0; commandIndex < commmandCount; commandIndex++)
|
||||
for (int commandIndex = 0; commandIndex < commandCount; commandIndex++)
|
||||
{
|
||||
char *nodeName = nodeNameArray[commandIndex]->data;
|
||||
int32 nodePort = nodePortArray[commandIndex];
|
||||
|
|
|
@ -331,7 +331,7 @@ CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes)
|
|||
/*
|
||||
* For dropped columns, we generate a dummy null column because
|
||||
* varattno in relation and subquery are different things, however if
|
||||
* we put the NULL columns to the subquery for the droppped columns,
|
||||
* we put the NULL columns to the subquery for the dropped columns,
|
||||
* they will point to the same variable.
|
||||
*/
|
||||
TargetEntry *nullTargetEntry = CreateUnusedTargetEntry(resNo);
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
* - All multi-shard modifications (DDLs, COPY, UPDATE, DELETE, INSERT .. SELECT)
|
||||
* - All multi-shard queries with CTEs (modifying CTEs, read-only CTEs)
|
||||
* - All recursively planned subqueries
|
||||
* - All queries within transaction blocks (BEGIN; query; COMMMIT;)
|
||||
* - All queries within transaction blocks (BEGIN; query; COMMIT;)
|
||||
*
|
||||
* In other words, the following types of queries won't be observed in these
|
||||
* views:
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* are intended to track the relation accesses within a transaction. The
|
||||
* logic here is mostly useful when a reference table is referred by
|
||||
* a distributed table via a foreign key. Whenever such a pair of tables
|
||||
* are acccesed inside a transaction, Citus should detect and act
|
||||
* are accessed inside a transaction, Citus should detect and act
|
||||
* accordingly.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
|
@ -798,7 +798,7 @@ CheckConflictingRelationAccesses(Oid relationId, ShardPlacementAccessType access
|
|||
/*
|
||||
* Switching to sequential mode is admittedly confusing and, could be useless
|
||||
* and less performant in some cases. However, if we do not switch to
|
||||
* sequential mode at this point, we'd loose the opportunity to do so
|
||||
* sequential mode at this point, we'd lose the opportunity to do so
|
||||
* later when a parallel query is executed on the hash distributed relations
|
||||
* that are referencing this reference table.
|
||||
*/
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* worker_merge_protocol.c
|
||||
*
|
||||
* Routines for merging partitioned files into a single file or table. Merging
|
||||
* files is one of the threee distributed execution primitives that we apply on
|
||||
* files is one of the three distributed execution primitives that we apply on
|
||||
* worker nodes.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
|
|
|
@ -17,10 +17,10 @@ BEGIN
|
|||
END;
|
||||
$$LANGUAGE plpgsql;
|
||||
-- Create a function to ignore worker plans in explain output
|
||||
CREATE OR REPLACE FUNCTION coordinator_plan(explain_commmand text, out query_plan text)
|
||||
CREATE OR REPLACE FUNCTION coordinator_plan(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_commmand LOOP
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
RETURN next;
|
||||
IF query_plan LIKE '%Task Count:%'
|
||||
THEN
|
||||
|
@ -30,12 +30,12 @@ BEGIN
|
|||
RETURN;
|
||||
END; $$ language plpgsql;
|
||||
-- helper function that returns true if output of given explain has "is not null" (case in-sensitive)
|
||||
CREATE OR REPLACE FUNCTION explain_has_is_not_null(explain_commmand text)
|
||||
CREATE OR REPLACE FUNCTION explain_has_is_not_null(explain_command text)
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
query_plan text;
|
||||
BEGIN
|
||||
FOR query_plan IN EXECUTE explain_commmand LOOP
|
||||
FOR query_plan IN EXECUTE explain_command LOOP
|
||||
IF query_plan ILIKE '%is not null%'
|
||||
THEN
|
||||
RETURN true;
|
||||
|
@ -44,12 +44,12 @@ BEGIN
|
|||
RETURN false;
|
||||
END; $$ language plpgsql;
|
||||
-- helper function that returns true if output of given explain has "is not null" (case in-sensitive)
|
||||
CREATE OR REPLACE FUNCTION explain_has_distributed_subplan(explain_commmand text)
|
||||
CREATE OR REPLACE FUNCTION explain_has_distributed_subplan(explain_command text)
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
query_plan text;
|
||||
BEGIN
|
||||
FOR query_plan IN EXECUTE explain_commmand LOOP
|
||||
FOR query_plan IN EXECUTE explain_command LOOP
|
||||
IF query_plan ILIKE '%Distributed Subplan %_%'
|
||||
THEN
|
||||
RETURN true;
|
||||
|
|
|
@ -20,10 +20,10 @@ END;
|
|||
$$LANGUAGE plpgsql;
|
||||
|
||||
-- Create a function to ignore worker plans in explain output
|
||||
CREATE OR REPLACE FUNCTION coordinator_plan(explain_commmand text, out query_plan text)
|
||||
CREATE OR REPLACE FUNCTION coordinator_plan(explain_command text, out query_plan text)
|
||||
RETURNS SETOF TEXT AS $$
|
||||
BEGIN
|
||||
FOR query_plan IN execute explain_commmand LOOP
|
||||
FOR query_plan IN execute explain_command LOOP
|
||||
RETURN next;
|
||||
IF query_plan LIKE '%Task Count:%'
|
||||
THEN
|
||||
|
@ -34,12 +34,12 @@ BEGIN
|
|||
END; $$ language plpgsql;
|
||||
|
||||
-- helper function that returns true if output of given explain has "is not null" (case in-sensitive)
|
||||
CREATE OR REPLACE FUNCTION explain_has_is_not_null(explain_commmand text)
|
||||
CREATE OR REPLACE FUNCTION explain_has_is_not_null(explain_command text)
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
query_plan text;
|
||||
BEGIN
|
||||
FOR query_plan IN EXECUTE explain_commmand LOOP
|
||||
FOR query_plan IN EXECUTE explain_command LOOP
|
||||
IF query_plan ILIKE '%is not null%'
|
||||
THEN
|
||||
RETURN true;
|
||||
|
@ -49,12 +49,12 @@ BEGIN
|
|||
END; $$ language plpgsql;
|
||||
|
||||
-- helper function that returns true if output of given explain has "is not null" (case in-sensitive)
|
||||
CREATE OR REPLACE FUNCTION explain_has_distributed_subplan(explain_commmand text)
|
||||
CREATE OR REPLACE FUNCTION explain_has_distributed_subplan(explain_command text)
|
||||
RETURNS BOOLEAN AS $$
|
||||
DECLARE
|
||||
query_plan text;
|
||||
BEGIN
|
||||
FOR query_plan IN EXECUTE explain_commmand LOOP
|
||||
FOR query_plan IN EXECUTE explain_command LOOP
|
||||
IF query_plan ILIKE '%Distributed Subplan %_%'
|
||||
THEN
|
||||
RETURN true;
|
||||
|
|
Loading…
Reference in New Issue