mirror of https://github.com/citusdata/citus.git
Fix typos:
VAR_SET_VALUE_KIND -> VAR_SET_VALUE kind beginnig -> beginning plannig -> planning the the -> the er then -> er thanpull/3227/head
parent
4b0ac4b0dd
commit
261a9de42d
|
@ -231,7 +231,7 @@ ReplicateAllDependenciesToNode(const char *nodeName, int nodePort)
|
|||
|
||||
/*
|
||||
* When dependency lists are getting longer we see a delay in the creation time on the
|
||||
* workers. We would like to inform the user. Currently we warn for lists greater then
|
||||
* workers. We would like to inform the user. Currently we warn for lists greater than
|
||||
* 100 items, where 100 is an arbitrarily chosen number. If we find it too high or too
|
||||
* low we can adjust this based on experience.
|
||||
*/
|
||||
|
|
|
@ -522,7 +522,7 @@ PlanAlterExtensionUpdateStmt(AlterExtensionStmt *alterExtensionStmt, const
|
|||
* EnsureSequentialModeForExtensionDDL makes sure that the current transaction is already in
|
||||
* sequential mode, or can still safely be put in sequential mode, it errors if that is
|
||||
* not possible. The error contains information for the user to retry the transaction with
|
||||
* sequential mode set from the beginnig.
|
||||
* sequential mode set from the beginning.
|
||||
*
|
||||
* As extensions are node scoped objects there exists only 1 instance of the
|
||||
* extension used by potentially multiple shards. To make sure all shards in
|
||||
|
|
|
@ -989,7 +989,7 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace)
|
|||
* EnsureSequentialModeForFunctionDDL makes sure that the current transaction is already in
|
||||
* sequential mode, or can still safely be put in sequential mode, it errors if that is
|
||||
* not possible. The error contains information for the user to retry the transaction with
|
||||
* sequential mode set from the beginnig.
|
||||
* sequential mode set from the beginning.
|
||||
*
|
||||
* As functions are node scoped objects there exists only 1 instance of the function used by
|
||||
* potentially multiple shards. To make sure all shards in the transaction can interact
|
||||
|
|
|
@ -368,7 +368,7 @@ PlanAlterEnumStmt(AlterEnumStmt *stmt, const char *queryString)
|
|||
/*
|
||||
* ProcessAlterEnumStmt is called after the AlterEnumStmt has been applied locally.
|
||||
*
|
||||
* This function is used for ALTER ENUM ... ADD VALUE for postgres versions lower then 12
|
||||
* This function is used for ALTER ENUM ... ADD VALUE for postgres versions lower than 12
|
||||
* to distribute the call. Before pg12 these statements could not be called in a
|
||||
* transaction. If we would plan the distirbution of these statements the same as we do
|
||||
* with the other statements they would get executed in a transaction to perform 2PC, that
|
||||
|
|
|
@ -695,7 +695,7 @@ multi_ProcessUtility(PlannedStmt *pstmt,
|
|||
* CommandCounterIncrement twice, as the call is a no-op if the command id is not
|
||||
* used yet.
|
||||
*
|
||||
* Once versions older then above are not deemed important anymore this patch can
|
||||
* Once versions older than above are not deemed important anymore this patch can
|
||||
* be remove from citus.
|
||||
*/
|
||||
CommandCounterIncrement();
|
||||
|
|
|
@ -315,7 +315,7 @@ AppendDefElemSet(StringInfo buf, DefElem *def)
|
|||
|
||||
|
||||
/*
|
||||
* AppendVarSetValue deparses a VariableSetStmt with VAR_SET_VALUE_KIND
|
||||
* AppendVarSetValue deparses a VariableSetStmt with VAR_SET_VALUE kind.
|
||||
* It takes from flatten_set_variable_args in postgres's utils/misc/guc.c,
|
||||
* however flatten_set_variable_args does not apply correct quoting.
|
||||
*/
|
||||
|
|
|
@ -2771,7 +2771,7 @@ UpdateConnectionWaitFlags(WorkerSession *session, int waitFlags)
|
|||
|
||||
|
||||
/*
|
||||
* CheckConnectionReady returns true if the the connection is ready to
|
||||
* CheckConnectionReady returns true if the connection is ready to
|
||||
* read or write, or false if it still has bytes to send/receive.
|
||||
*/
|
||||
static bool
|
||||
|
|
|
@ -1620,7 +1620,7 @@ CheckAvailableVersion(int elevel)
|
|||
|
||||
|
||||
/*
|
||||
* CheckInstalledVersion compares CITUS_EXTENSIONVERSION and the the
|
||||
* CheckInstalledVersion compares CITUS_EXTENSIONVERSION and the
|
||||
* extension's current version from the pg_extemsion catalog table. If they
|
||||
* are not compatible, this function logs an error with the specified elevel,
|
||||
* otherwise it returns true.
|
||||
|
|
|
@ -1707,7 +1707,7 @@ CreateAndPushPlannerRestrictionContext(void)
|
|||
|
||||
|
||||
/*
|
||||
* CurrentRestrictionContext returns the the most recently added
|
||||
* CurrentRestrictionContext returns the most recently added
|
||||
* PlannerRestrictionContext from the plannerRestrictionContextList list.
|
||||
*/
|
||||
static PlannerRestrictionContext *
|
||||
|
|
|
@ -366,7 +366,7 @@ ExplainTaskList(List *taskList, ExplainState *es)
|
|||
|
||||
|
||||
/*
|
||||
* RemoteExplain fetches the the remote EXPLAIN output for a single
|
||||
* RemoteExplain fetches the remote EXPLAIN output for a single
|
||||
* task. It tries each shard placement until one succeeds or all
|
||||
* failed.
|
||||
*/
|
||||
|
|
|
@ -2322,7 +2322,7 @@ ProcessHavingClauseForWorkerQuery(Node *originalHavingQual,
|
|||
/*
|
||||
* PrcoessDistinctClauseForWorkerQuery gets the inputs and modifies the outputs
|
||||
* such that worker query's DISTINCT and DISTINCT ON clauses are set accordingly.
|
||||
* Note the the function may or may not decide to pushdown the DISTINCT and DISTINCT
|
||||
* Note the function may or may not decide to pushdown the DISTINCT and DISTINCT
|
||||
* on clauses based on the inputs.
|
||||
*
|
||||
* See the detailed comments in the function for the rules of pushing down DISTINCT
|
||||
|
|
|
@ -90,7 +90,7 @@ typedef struct InsertValues
|
|||
|
||||
|
||||
/*
|
||||
* A ModifyRoute encapsulates the the information needed to route modifications
|
||||
* A ModifyRoute encapsulates the information needed to route modifications
|
||||
* to the appropriate shard. For a single-shard modification, only one route
|
||||
* is needed, but in the case of e.g. a multi-row INSERT, lists of these values
|
||||
* will help divide the rows by their destination shards, permitting later
|
||||
|
@ -2429,7 +2429,7 @@ WorkersContainingAllShards(List *prunedShardIntervalsList)
|
|||
|
||||
/*
|
||||
* Bail out if placement list becomes empty. This means there is no worker
|
||||
* containing all shards referecend by the query, hence we can not forward
|
||||
* containing all shards referenced by the query, hence we can not forward
|
||||
* this query directly to any worker.
|
||||
*/
|
||||
if (currentPlacementList == NIL)
|
||||
|
|
|
@ -462,7 +462,7 @@ PrunableExpressions(Node *node, ClauseWalkerContext *context)
|
|||
* be pending.
|
||||
*
|
||||
* We copy the partial PruningInstance, and continue adding information by
|
||||
* calling PrunableExpressionsWalker() on the copy, continuing at the the
|
||||
* calling PrunableExpressionsWalker() on the copy, continuing at the
|
||||
* node stored in PendingPruningInstance->continueAt.
|
||||
*/
|
||||
while (context->pendingInstances != NIL)
|
||||
|
|
|
@ -255,7 +255,7 @@ StartRemoteTransactionCommit(MultiConnection *connection)
|
|||
/*
|
||||
* For a moment there I thought we were in trouble.
|
||||
*
|
||||
* Failing in this state means that we don't know whether the the
|
||||
* Failing in this state means that we don't know whether the
|
||||
* commit has succeeded.
|
||||
*/
|
||||
HandleRemoteTransactionConnectionError(connection, raiseErrors);
|
||||
|
@ -288,7 +288,7 @@ FinishRemoteTransactionCommit(MultiConnection *connection)
|
|||
|
||||
/*
|
||||
* Failing in this state means that we will often not know whether
|
||||
* the the commit has succeeded (particularly in case of network
|
||||
* the commit has succeeded (particularly in case of network
|
||||
* troubles).
|
||||
*
|
||||
* XXX: It might be worthwhile to discern cases where we got a
|
||||
|
|
|
@ -65,7 +65,7 @@ typedef enum AdvisoryLocktagClass
|
|||
ADV_LOCKTAG_CLASS_CITUS_JOB)
|
||||
|
||||
/* reuse advisory lock, but with different, unused field 4 (7)
|
||||
* Also it has the the database hardcoded to MyDatabaseId, to ensure the locks
|
||||
* Also it has the database hardcoded to MyDatabaseId, to ensure the locks
|
||||
* are local to each database */
|
||||
#define SET_LOCKTAG_REBALANCE_COLOCATION(tag, colocationOrTableId) \
|
||||
SET_LOCKTAG_ADVISORY(tag, \
|
||||
|
|
|
@ -1986,7 +1986,7 @@ FROM
|
|||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 10, 'Freund'::text AS last_name, 20 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) OPERATOR(pg_catalog.>=) '-2147483648'::integer) AND (worker_hash(store_id) OPERATOR(pg_catalog.<=) '-1'::integer))
|
||||
DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 10, 'Freund'::text AS last_name, 20 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) OPERATOR(pg_catalog.>=) 0) AND (worker_hash(store_id) OPERATOR(pg_catalog.<=) 2147483647))
|
||||
DEBUG: Plan is router executable
|
||||
-- some of the the ultimate queries where we have constants,
|
||||
-- some of the ultimate queries where we have constants,
|
||||
-- defaults and group by entry is not on the target entry
|
||||
INSERT INTO table_with_defaults (default_2, store_id, first_name)
|
||||
SELECT
|
||||
|
|
|
@ -25,7 +25,7 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
-- We use the l_linenumber field for the following aggregations. We need to use
|
||||
-- an integer type, as aggregations on numerics or big integers return numerics
|
||||
-- of unknown length. When the numerics are read into our temporary table, they
|
||||
-- trigger the the creation of toasted tables and indexes. This in turn prints
|
||||
-- trigger the creation of toasted tables and indexes. This in turn prints
|
||||
-- non-deterministic debug messages. To avoid this chain, we use l_linenumber.
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
sum | avg
|
||||
|
|
|
@ -765,7 +765,7 @@ DEBUG: Plan 80 query after replacing subqueries and CTEs: SELECT a.user_id, foo
|
|||
t
|
||||
(1 row)
|
||||
|
||||
-- this time the the plan is optimial, we are
|
||||
-- this time the plan is optimial, we are
|
||||
-- able to keep the UNION query given that foo
|
||||
-- is the anchor
|
||||
SELECT true AS valid FROM explain_json_2($$
|
||||
|
@ -943,7 +943,7 @@ DEBUG: skipping recursive planning for the subquery since it contains reference
|
|||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
ERROR: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
-- similar to the above, make sure that we skip recursive plannig when
|
||||
-- similar to the above, make sure that we skip recursive planning when
|
||||
-- the subquery doesn't have any tables
|
||||
SELECT true AS valid FROM explain_json_2($$
|
||||
SELECT *
|
||||
|
@ -962,7 +962,7 @@ DEBUG: skipping recursive planning for the subquery since it contains reference
|
|||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
DEBUG: skipping recursive planning for the subquery since it contains references to outer queries
|
||||
ERROR: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator
|
||||
-- similar to the above, make sure that we skip recursive plannig when
|
||||
-- similar to the above, make sure that we skip recursive planning when
|
||||
-- the subquery contains only intermediate results
|
||||
SELECT *
|
||||
FROM
|
||||
|
|
|
@ -24,7 +24,7 @@ SET search_path TO public;
|
|||
|
||||
-- These copies were intended to test copying data to single sharded table from
|
||||
-- worker nodes, yet in order to remove broadcast logic related codes we change
|
||||
-- the the table to reference table and copy data from master. Should be updated
|
||||
-- the table to reference table and copy data from master. Should be updated
|
||||
-- when worker nodes gain capability to run dml commands on reference tables.
|
||||
\c - - - :master_port
|
||||
SET search_path TO public;
|
||||
|
|
|
@ -18,7 +18,7 @@ SET search_path TO public;
|
|||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
-- These copies were intended to test copying data to single sharded table from
|
||||
-- worker nodes, yet in order to remove broadcast logic related codes we change
|
||||
-- the the table to reference table and copy data from master. Should be updated
|
||||
-- the table to reference table and copy data from master. Should be updated
|
||||
-- when worker nodes gain capability to run dml commands on reference tables.
|
||||
\c - - - :master_port
|
||||
SET search_path TO public;
|
||||
|
|
|
@ -400,7 +400,7 @@ permutation "s1-begin" "s2-begin" "s1-insert-dist-10" "s2-insert-local-10" "s2-i
|
|||
# daedlock with reference tables only
|
||||
permutation "s1-begin" "s2-begin" "s2-insert-ref-10" "s1-insert-ref-11" "s2-insert-ref-11" "s1-insert-ref-10" "deadlock-checker-call" "s1-commit" "s2-commit"
|
||||
|
||||
# deadlock with referecen + distributed tables
|
||||
# deadlock with reference + distributed tables
|
||||
permutation "s1-begin" "s2-begin" "s2-insert-ref-10" "s1-update-1" "deadlock-checker-call" "s2-update-1" "s1-insert-ref-10" "deadlock-checker-call" "s1-commit" "s2-commit"
|
||||
|
||||
# slightly more complex case, loop with three nodes
|
||||
|
|
|
@ -1598,7 +1598,7 @@ SELECT
|
|||
FROM
|
||||
table_with_defaults;
|
||||
|
||||
-- some of the the ultimate queries where we have constants,
|
||||
-- some of the ultimate queries where we have constants,
|
||||
-- defaults and group by entry is not on the target entry
|
||||
INSERT INTO table_with_defaults (default_2, store_id, first_name)
|
||||
SELECT
|
||||
|
|
|
@ -14,7 +14,7 @@ SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 903
|
|||
-- We use the l_linenumber field for the following aggregations. We need to use
|
||||
-- an integer type, as aggregations on numerics or big integers return numerics
|
||||
-- of unknown length. When the numerics are read into our temporary table, they
|
||||
-- trigger the the creation of toasted tables and indexes. This in turn prints
|
||||
-- trigger the creation of toasted tables and indexes. This in turn prints
|
||||
-- non-deterministic debug messages. To avoid this chain, we use l_linenumber.
|
||||
|
||||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030;
|
||||
|
|
|
@ -578,7 +578,7 @@ SELECT true AS valid FROM explain_json_2($$
|
|||
);
|
||||
$$);
|
||||
|
||||
-- this time the the plan is optimial, we are
|
||||
-- this time the plan is optimial, we are
|
||||
-- able to keep the UNION query given that foo
|
||||
-- is the anchor
|
||||
SELECT true AS valid FROM explain_json_2($$
|
||||
|
@ -707,7 +707,7 @@ JOIN LATERAL
|
|||
WHERE user_id = users_table.user_id) AS bar
|
||||
LEFT JOIN users_table u2 ON u2.user_id = bar.value_2) AS foo ON TRUE;
|
||||
|
||||
-- similar to the above, make sure that we skip recursive plannig when
|
||||
-- similar to the above, make sure that we skip recursive planning when
|
||||
-- the subquery doesn't have any tables
|
||||
SELECT true AS valid FROM explain_json_2($$
|
||||
SELECT *
|
||||
|
@ -722,7 +722,7 @@ JOIN LATERAL
|
|||
LEFT JOIN users_table u2 ON u2.user_id = bar.value_2) AS foo ON TRUE
|
||||
$$);
|
||||
|
||||
-- similar to the above, make sure that we skip recursive plannig when
|
||||
-- similar to the above, make sure that we skip recursive planning when
|
||||
-- the subquery contains only intermediate results
|
||||
SELECT *
|
||||
FROM
|
||||
|
|
Loading…
Reference in New Issue