mirror of https://github.com/citusdata/citus.git
Fix typos, rename isDistributedRelation to isCitusRelation
parent
00a7bc3044
commit
b514ab0f55
|
@ -140,7 +140,7 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand)
|
||||||
Relation relation = heap_openrv(createIndexStatement->relation, lockmode);
|
Relation relation = heap_openrv(createIndexStatement->relation, lockmode);
|
||||||
Oid relationId = RelationGetRelid(relation);
|
Oid relationId = RelationGetRelid(relation);
|
||||||
|
|
||||||
bool isDistributedRelation = IsCitusTable(relationId);
|
bool isCitusRelation = IsCitusTable(relationId);
|
||||||
|
|
||||||
if (createIndexStatement->relation->schemaname == NULL)
|
if (createIndexStatement->relation->schemaname == NULL)
|
||||||
{
|
{
|
||||||
|
@ -161,7 +161,7 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand)
|
||||||
|
|
||||||
heap_close(relation, NoLock);
|
heap_close(relation, NoLock);
|
||||||
|
|
||||||
if (isDistributedRelation)
|
if (isCitusRelation)
|
||||||
{
|
{
|
||||||
char *indexName = createIndexStatement->idxname;
|
char *indexName = createIndexStatement->idxname;
|
||||||
char *namespaceName = createIndexStatement->relation->schemaname;
|
char *namespaceName = createIndexStatement->relation->schemaname;
|
||||||
|
@ -212,7 +212,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand)
|
||||||
{
|
{
|
||||||
Relation relation = NULL;
|
Relation relation = NULL;
|
||||||
Oid relationId = InvalidOid;
|
Oid relationId = InvalidOid;
|
||||||
bool isDistributedRelation = false;
|
bool isCitusRelation = false;
|
||||||
#if PG_VERSION_NUM >= 120000
|
#if PG_VERSION_NUM >= 120000
|
||||||
LOCKMODE lockmode = reindexStatement->concurrent ? ShareUpdateExclusiveLock :
|
LOCKMODE lockmode = reindexStatement->concurrent ? ShareUpdateExclusiveLock :
|
||||||
AccessExclusiveLock;
|
AccessExclusiveLock;
|
||||||
|
@ -249,7 +249,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand)
|
||||||
relationId = RelationGetRelid(relation);
|
relationId = RelationGetRelid(relation);
|
||||||
}
|
}
|
||||||
|
|
||||||
isDistributedRelation = IsCitusTable(relationId);
|
isCitusRelation = IsCitusTable(relationId);
|
||||||
|
|
||||||
if (reindexStatement->relation->schemaname == NULL)
|
if (reindexStatement->relation->schemaname == NULL)
|
||||||
{
|
{
|
||||||
|
@ -277,7 +277,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand)
|
||||||
heap_close(relation, NoLock);
|
heap_close(relation, NoLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isDistributedRelation)
|
if (isCitusRelation)
|
||||||
{
|
{
|
||||||
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||||
ddlJob->targetRelationId = relationId;
|
ddlJob->targetRelationId = relationId;
|
||||||
|
@ -359,8 +359,8 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand)
|
||||||
}
|
}
|
||||||
|
|
||||||
Oid relationId = IndexGetRelation(indexId, false);
|
Oid relationId = IndexGetRelation(indexId, false);
|
||||||
bool isDistributedRelation = IsCitusTable(relationId);
|
bool isCitusRelation = IsCitusTable(relationId);
|
||||||
if (isDistributedRelation)
|
if (isCitusRelation)
|
||||||
{
|
{
|
||||||
distributedIndexId = indexId;
|
distributedIndexId = indexId;
|
||||||
distributedRelationId = relationId;
|
distributedRelationId = relationId;
|
||||||
|
|
|
@ -2587,7 +2587,7 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
|
||||||
isFrom ? RowExclusiveLock :
|
isFrom ? RowExclusiveLock :
|
||||||
AccessShareLock);
|
AccessShareLock);
|
||||||
|
|
||||||
bool isDistributedRelation = IsCitusTable(RelationGetRelid(copiedRelation));
|
bool isCitusRelation = IsCitusTable(RelationGetRelid(copiedRelation));
|
||||||
|
|
||||||
/* ensure future lookups hit the same relation */
|
/* ensure future lookups hit the same relation */
|
||||||
char *schemaName = get_namespace_name(RelationGetNamespace(copiedRelation));
|
char *schemaName = get_namespace_name(RelationGetNamespace(copiedRelation));
|
||||||
|
@ -2600,7 +2600,7 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
|
||||||
|
|
||||||
heap_close(copiedRelation, NoLock);
|
heap_close(copiedRelation, NoLock);
|
||||||
|
|
||||||
if (isDistributedRelation)
|
if (isCitusRelation)
|
||||||
{
|
{
|
||||||
if (copyStatement->is_from)
|
if (copyStatement->is_from)
|
||||||
{
|
{
|
||||||
|
@ -2710,7 +2710,7 @@ ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, const char *queryS
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CitusCopyTo runs a COPY .. TO STDOUT command on each shard to to a full
|
* CitusCopyTo runs a COPY .. TO STDOUT command on each shard to do a full
|
||||||
* table dump.
|
* table dump.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -96,16 +96,15 @@ PreprocessRenameStmt(Node *node, const char *renameCommand)
|
||||||
return NIL;
|
return NIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isDistributedRelation = IsCitusTable(tableRelationId);
|
bool isCitusRelation = IsCitusTable(tableRelationId);
|
||||||
if (!isDistributedRelation)
|
if (!isCitusRelation)
|
||||||
{
|
{
|
||||||
return NIL;
|
return NIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We might ERROR out on some commands, but only for Citus tables where
|
* We might ERROR out on some commands, but only for Citus tables.
|
||||||
* isDistributedRelation is true. That's why this test comes this late in
|
* That's why this test comes this late in the function.
|
||||||
* the function.
|
|
||||||
*/
|
*/
|
||||||
ErrorIfUnsupportedRenameStmt(renameStmt);
|
ErrorIfUnsupportedRenameStmt(renameStmt);
|
||||||
|
|
||||||
|
|
|
@ -459,7 +459,7 @@ PreprocessAlterTableStmt(Node *node, const char *alterTableCommand)
|
||||||
SetLocalMultiShardModifyModeToSequential();
|
SetLocalMultiShardModifyModeToSequential();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* fill them here as it is possible to use them in some condtional blocks below */
|
/* fill them here as it is possible to use them in some conditional blocks below */
|
||||||
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
DDLJob *ddlJob = palloc0(sizeof(DDLJob));
|
||||||
ddlJob->targetRelationId = leftRelationId;
|
ddlJob->targetRelationId = leftRelationId;
|
||||||
ddlJob->concurrentIndexCmd = false;
|
ddlJob->concurrentIndexCmd = false;
|
||||||
|
@ -567,8 +567,8 @@ WorkerProcessAlterTableStmt(AlterTableStmt *alterTableStatement,
|
||||||
return (Node *) alterTableStatement;
|
return (Node *) alterTableStatement;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isDistributedRelation = IsCitusTable(leftRelationId);
|
bool isCitusRelation = IsCitusTable(leftRelationId);
|
||||||
if (!isDistributedRelation)
|
if (!isCitusRelation)
|
||||||
{
|
{
|
||||||
return (Node *) alterTableStatement;
|
return (Node *) alterTableStatement;
|
||||||
}
|
}
|
||||||
|
@ -654,8 +654,8 @@ ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isDistributedRelation = IsCitusTable(leftRelationId);
|
bool isCitusRelation = IsCitusTable(leftRelationId);
|
||||||
if (!isDistributedRelation)
|
if (!isCitusRelation)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -472,7 +472,7 @@ CacheLocalPlanForShardQuery(Task *task, DistributedPlan *originalDistributedPlan
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We prefer to use jobQuery (over task->query) because we don't want any
|
* We prefer to use jobQuery (over task->query) because we don't want any
|
||||||
* functions/params have been evaluated in the cached plan.
|
* functions/params to have been evaluated in the cached plan.
|
||||||
*/
|
*/
|
||||||
Query *shardQuery = copyObject(originalDistributedPlan->workerJob->jobQuery);
|
Query *shardQuery = copyObject(originalDistributedPlan->workerJob->jobQuery);
|
||||||
|
|
||||||
|
|
|
@ -237,7 +237,7 @@ RepairShardPlacement(int64 shardId, const char *sourceNodeName, int32 sourceNode
|
||||||
/*
|
/*
|
||||||
* Let's not allow repairing partitions to prevent any edge cases.
|
* Let's not allow repairing partitions to prevent any edge cases.
|
||||||
* We're already not allowing any kind of modifications on the partitions
|
* We're already not allowing any kind of modifications on the partitions
|
||||||
* so their placements are not likely to to be marked as INVALID. The only
|
* so their placements are not likely to be marked as INVALID. The only
|
||||||
* possible case to mark placement of a partition as invalid is
|
* possible case to mark placement of a partition as invalid is
|
||||||
* "ALTER TABLE parent_table DETACH PARTITION partition_table". But,
|
* "ALTER TABLE parent_table DETACH PARTITION partition_table". But,
|
||||||
* given that the table would become a regular distributed table if the
|
* given that the table would become a regular distributed table if the
|
||||||
|
|
|
@ -1559,7 +1559,7 @@ BuildSubqueryJobQuery(MultiNode *multiNode)
|
||||||
hasAggregates = true;
|
hasAggregates = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* distinct is not send to worker query if there are top level aggregates */
|
/* distinct is not sent to worker query if there are top level aggregates */
|
||||||
if (hasAggregates)
|
if (hasAggregates)
|
||||||
{
|
{
|
||||||
hasDistinctOn = false;
|
hasDistinctOn = false;
|
||||||
|
|
|
@ -336,7 +336,7 @@ IsFunctionRTE(Node *node)
|
||||||
* IsNodeSubquery returns true if the given node is a Query or SubPlan or a
|
* IsNodeSubquery returns true if the given node is a Query or SubPlan or a
|
||||||
* Param node with paramkind PARAM_EXEC.
|
* Param node with paramkind PARAM_EXEC.
|
||||||
*
|
*
|
||||||
* The check for SubPlan is needed whev this is used on a already rewritten
|
* The check for SubPlan is needed when this is used on a already rewritten
|
||||||
* query. Such a query has SubPlan nodes instead of SubLink nodes (which
|
* query. Such a query has SubPlan nodes instead of SubLink nodes (which
|
||||||
* contain a Query node).
|
* contain a Query node).
|
||||||
* The check for PARAM_EXEC is needed because some very simple subqueries like
|
* The check for PARAM_EXEC is needed because some very simple subqueries like
|
||||||
|
@ -544,7 +544,7 @@ SubqueryMultiNodeTree(Query *originalQuery, Query *queryTree,
|
||||||
RaiseDeferredErrorInternal(repartitionQueryError, ERROR);
|
RaiseDeferredErrorInternal(repartitionQueryError, ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* all checks has passed, safe to create the multi plan */
|
/* all checks have passed, safe to create the multi plan */
|
||||||
multiQueryNode = MultiNodeTree(queryTree);
|
multiQueryNode = MultiNodeTree(queryTree);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1502,7 +1502,7 @@ HasRecurringTuples(Node *node, RecurringTuplesType *recurType)
|
||||||
* from other parts of code although it causes some code duplication.
|
* from other parts of code although it causes some code duplication.
|
||||||
*
|
*
|
||||||
* Current subquery pushdown support in MultiTree logic requires a single range
|
* Current subquery pushdown support in MultiTree logic requires a single range
|
||||||
* table entry in the top most from clause. Therefore we inject an synthetic
|
* table entry in the top most from clause. Therefore we inject a synthetic
|
||||||
* query derived from the top level query and make it the only range table
|
* query derived from the top level query and make it the only range table
|
||||||
* entry for the top level query. This way we can push down any subquery joins
|
* entry for the top level query. This way we can push down any subquery joins
|
||||||
* down to workers without invoking join order planner.
|
* down to workers without invoking join order planner.
|
||||||
|
|
|
@ -442,7 +442,7 @@ RecursivelyPlanNonColocatedSubqueries(Query *subquery, RecursivePlanningContext
|
||||||
* RecursivelyPlanNonColocatedJoinWalker gets a join node and walks over it to find
|
* RecursivelyPlanNonColocatedJoinWalker gets a join node and walks over it to find
|
||||||
* subqueries that live under the node.
|
* subqueries that live under the node.
|
||||||
*
|
*
|
||||||
* When a subquery found, its checked whether the subquery is colocated with the
|
* When a subquery found, it's checked whether the subquery is colocated with the
|
||||||
* anchor subquery specified in the nonColocatedJoinContext. If not,
|
* anchor subquery specified in the nonColocatedJoinContext. If not,
|
||||||
* the subquery is recursively planned.
|
* the subquery is recursively planned.
|
||||||
*/
|
*/
|
||||||
|
@ -518,7 +518,7 @@ RecursivelyPlanNonColocatedJoinWalker(Node *joinNode,
|
||||||
* RecursivelyPlanNonColocatedJoinWalker gets a query and walks over its sublinks
|
* RecursivelyPlanNonColocatedJoinWalker gets a query and walks over its sublinks
|
||||||
* to find subqueries that live in WHERE clause.
|
* to find subqueries that live in WHERE clause.
|
||||||
*
|
*
|
||||||
* When a subquery found, its checked whether the subquery is colocated with the
|
* When a subquery found, it's checked whether the subquery is colocated with the
|
||||||
* anchor subquery specified in the nonColocatedJoinContext. If not,
|
* anchor subquery specified in the nonColocatedJoinContext. If not,
|
||||||
* the subquery is recursively planned.
|
* the subquery is recursively planned.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -82,7 +82,7 @@ typedef struct
|
||||||
/*
|
/*
|
||||||
* The following two lists consists of relationIds that this distributed
|
* The following two lists consists of relationIds that this distributed
|
||||||
* relation has a foreign key to (e.g., referencedRelationsViaForeignKey) or
|
* relation has a foreign key to (e.g., referencedRelationsViaForeignKey) or
|
||||||
* other relations has a foreign key to to this relation (e.g.,
|
* other relations has a foreign key to this relation (e.g.,
|
||||||
* referencingRelationsViaForeignKey).
|
* referencingRelationsViaForeignKey).
|
||||||
*
|
*
|
||||||
* Note that we're keeping all transitive foreign key references as well
|
* Note that we're keeping all transitive foreign key references as well
|
||||||
|
|
Loading…
Reference in New Issue