mirror of https://github.com/citusdata/citus.git
Fix some more master->coordinator comments
parent
b4fec63bc0
commit
bdfeb380d3
|
@ -44,11 +44,3 @@ TODO: to be written by someone with enough knowledge to write how, when and why
|
|||
Implemented in `multi_copy.c`
|
||||
|
||||
TODO: to be written by someone with enough knowledge to write how, when and why it is used.
|
||||
|
||||
## MASTER_HOST host
|
||||
|
||||
Implemented in `multi_copy.c`
|
||||
|
||||
Triggered by the `MASTER_HOST` option being set on the copy command. Also accepts `MASTER_PORT`
|
||||
|
||||
TODO: to be written by someone with enough knowledge to write how, when and why it is used.
|
||||
|
|
|
@ -102,7 +102,7 @@ IsIndexRenameStmt(RenameStmt *renameStmt)
|
|||
* PreprocessIndexStmt determines whether a given CREATE INDEX statement involves
|
||||
* a distributed table. If so (and if the statement does not use unsupported
|
||||
* options), it modifies the input statement to ensure proper execution against
|
||||
* the master node table and creates a DDLJob to encapsulate information needed
|
||||
* the coordinator node table and creates a DDLJob to encapsulate information needed
|
||||
* during the worker node portion of DDL execution before returning that DDLJob
|
||||
* in a List. If no distributed table is involved, this function returns NIL.
|
||||
*/
|
||||
|
@ -194,7 +194,7 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand)
|
|||
* PreprocessReindexStmt determines whether a given REINDEX statement involves
|
||||
* a distributed table. If so (and if the statement does not use unsupported
|
||||
* options), it modifies the input statement to ensure proper execution against
|
||||
* the master node table and creates a DDLJob to encapsulate information needed
|
||||
* the coordinator node table and creates a DDLJob to encapsulate information needed
|
||||
* during the worker node portion of DDL execution before returning that DDLJob
|
||||
* in a List. If no distributed table is involved, this function returns NIL.
|
||||
*/
|
||||
|
@ -302,7 +302,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand)
|
|||
* PreprocessDropIndexStmt determines whether a given DROP INDEX statement involves
|
||||
* a distributed table. If so (and if the statement does not use unsupported
|
||||
* options), it modifies the input statement to ensure proper execution against
|
||||
* the master node table and creates a DDLJob to encapsulate information needed
|
||||
* the coordinator node table and creates a DDLJob to encapsulate information needed
|
||||
* during the worker node portion of DDL execution before returning that DDLJob
|
||||
* in a List. If no distributed table is involved, this function returns NIL.
|
||||
*/
|
||||
|
|
|
@ -4498,7 +4498,7 @@ ExtractParametersFromParamList(ParamListInfo paramListInfo,
|
|||
|
||||
/*
|
||||
* Use 0 for data types where the oid values can be different on
|
||||
* the master and worker nodes. Therefore, the worker nodes can
|
||||
* the coordinator and worker nodes. Therefore, the worker nodes can
|
||||
* infer the correct oid.
|
||||
*/
|
||||
if (parameterData->ptype >= FirstNormalObjectId && !useOriginalCustomTypeOids)
|
||||
|
|
|
@ -311,8 +311,8 @@ PartitionColumnInTableList(Var *column, List *tableNodeList)
|
|||
|
||||
/*
|
||||
* ShouldPullDistinctColumn returns true if distinct aggregate should pull
|
||||
* individual columns from worker to master and evaluate aggregate operation
|
||||
* at master.
|
||||
* individual columns from worker to coordinator and evaluate aggregate operation
|
||||
* on the coordinator.
|
||||
*
|
||||
* Pull cases are:
|
||||
* - repartition subqueries
|
||||
|
|
|
@ -167,7 +167,7 @@ PG_FUNCTION_INFO_V1(worker_save_query_explain_analyze);
|
|||
|
||||
/*
|
||||
* CitusExplainScan is a custom scan explain callback function which is used to
|
||||
* print explain information of a Citus plan which includes both master and
|
||||
* print explain information of a Citus plan which includes both combine query and
|
||||
* distributed plan.
|
||||
*/
|
||||
void
|
||||
|
|
|
@ -46,7 +46,7 @@ The join order planner is applied to the join tree in the original query and gen
|
|||
|
||||
The logical optimizer uses commutativity rules to push project and select operators down below the `MultiCollect` nodes. Everything above the `MultiCollect` operator will be is executed on the coordinator and everything below on the workers. Additionally, the optimizer uses distributivity rules to push down operators below the `MultiJoin` nodes, such that filters and projections are applied prior to joins. This is primarily relevant for re-partition joins which first try to reduce the data by applying selections and projections, and then re-partitioning the result.
|
||||
|
||||
A number of SQL clauses like aggregates, GROUP BY, ORDER BY, LIMIT can only be pushed down below the `MultiCollect` under certain conditions. All these clauses are bundled together in a `MultiExtendedOpNode`. After the basic transformation, the `MultiExtendedOpNode`s are directly above the `MultiCollect` nodes. They are then split into a master and a worker part and the worker part is pushed down below the `MultiCollect`.
|
||||
A number of SQL clauses like aggregates, GROUP BY, ORDER BY, LIMIT can only be pushed down below the `MultiCollect` under certain conditions. All these clauses are bundled together in a `MultiExtendedOpNode`. After the basic transformation, the `MultiExtendedOpNode`s are directly above the `MultiCollect` nodes. They are then split into a coordinator and a worker part and the worker part is pushed down below the `MultiCollect`.
|
||||
|
||||
### Physical planner
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#endif
|
||||
|
||||
|
||||
/* Function declarations for building local plans on the master node */
|
||||
/* Function declarations for building local plans on the coordinator node */
|
||||
struct DistributedPlan;
|
||||
struct CustomScan;
|
||||
extern Path * CreateCitusCustomScanPath(PlannerInfo *root, RelOptInfo *relOptInfo,
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
|
||||
/*
|
||||
* In our distributed database, we need a mechanism to make remote procedure
|
||||
* calls between clients, the master node, and worker nodes. These remote calls
|
||||
* calls between clients, the coordinator node, and worker nodes. These remote calls
|
||||
* require serializing and deserializing values and function signatures between
|
||||
* nodes; and for these, we currently use PostgreSQL's built-in type and
|
||||
* function definition system. This approach is by no means ideal however; and
|
||||
|
@ -41,7 +41,7 @@
|
|||
*/
|
||||
|
||||
|
||||
/* Number of tuple fields that master node functions return */
|
||||
/* Number of tuple fields that coordinator node functions return */
|
||||
#define TABLE_METADATA_FIELDS 7
|
||||
#define CANDIDATE_NODE_FIELDS 2
|
||||
#define WORKER_NODE_FIELDS 2
|
||||
|
|
|
@ -163,12 +163,13 @@ typedef struct DistributedPlanningContext
|
|||
|
||||
|
||||
/*
|
||||
* CitusCustomScanPath is injected into the planner during the master query planning phase
|
||||
* of the logical planner.
|
||||
* We call out to the standard planner to plan the master query part for the output of the
|
||||
* logical planner. This makes it easier to implement new sql features into the logical
|
||||
* planner by not having to manually implement the plan creation for the query on the
|
||||
* master.
|
||||
* CitusCustomScanPath is injected into the planner during the combine query planning
|
||||
* phase of the logical planner.
|
||||
*
|
||||
* We call out to the standard planner to plan the combine query part for the output of
|
||||
* the logical planner. This makes it easier to implement new sql features into the
|
||||
* logical planner by not having to manually implement the plan creation for the combine
|
||||
* query on the coordinator..
|
||||
*/
|
||||
typedef struct CitusCustomScanPath
|
||||
{
|
||||
|
@ -176,7 +177,7 @@ typedef struct CitusCustomScanPath
|
|||
|
||||
/*
|
||||
* Custom scan node computed by the citus planner that will produce the tuples for the
|
||||
* path we are injecting during the planning of the master query
|
||||
* path we are injecting during the planning of the combine query
|
||||
*/
|
||||
CustomScan *remoteScan;
|
||||
} CitusCustomScanPath;
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
/*
|
||||
* ExtendedOpNodeProperties is a helper structure that is used to
|
||||
* share the common information among the worker and master extended
|
||||
* share the common information among the worker and coordinator extended
|
||||
* op nodes.
|
||||
*
|
||||
* It is designed to be a read-only singleton object per extended op node
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* metadata_utility.h
|
||||
* Type and function declarations used for reading and modifying master
|
||||
* node's metadata.
|
||||
* Type and function declarations used for reading and modifying
|
||||
* coordinator node's metadata.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
|
|
|
@ -156,7 +156,7 @@ typedef struct MultiCartesianProduct
|
|||
/*
|
||||
* MultiExtendedOp defines a set of extended operators that operate on columns
|
||||
* in relational algebra. This node allows us to distinguish between operations
|
||||
* in the master and worker nodes, and also captures the following:
|
||||
* in the coordinator and worker nodes, and also captures the following:
|
||||
*
|
||||
* (1) Aggregate functions such as sums or averages;
|
||||
* (2) Grouping of attributes; these groupings may also be tied to aggregates;
|
||||
|
|
|
@ -58,7 +58,7 @@ typedef enum
|
|||
} TaskExecStatus;
|
||||
|
||||
|
||||
/* Enumeration to track file transmits to the master node */
|
||||
/* Enumeration to track file transmits to the coordinator node */
|
||||
typedef enum
|
||||
{
|
||||
EXEC_TRANSMIT_INVALID_FIRST = 0,
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
* definition of the "server" relation (pg_dist_placement).
|
||||
*
|
||||
* This table keeps information on remote shards and their whereabouts on the
|
||||
* master node. The table's contents are updated and used as follows: (i) the
|
||||
* coordinator node. The table's contents are updated and used as follows: (i) the
|
||||
* worker nodes send periodic reports about the shards they contain, and (ii)
|
||||
* the master reconciles these shard reports, and determines outdated, under-
|
||||
* the coordinator reconciles these shard reports, and determines outdated, under-
|
||||
* and over-replicated shards.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
|
|
Loading…
Reference in New Issue