mirror of https://github.com/citusdata/citus.git
Merge branch 'main' into citus_pause_node
commit
d9cecba67c
|
@ -358,6 +358,11 @@ ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte)
|
||||||
subquery->jointree = joinTree;
|
subquery->jointree = joinTree;
|
||||||
|
|
||||||
rte->rtekind = RTE_SUBQUERY;
|
rte->rtekind = RTE_SUBQUERY;
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
|
||||||
|
/* no permission checking for this RTE */
|
||||||
|
rte->perminfoindex = 0;
|
||||||
|
#endif
|
||||||
rte->subquery = subquery;
|
rte->subquery = subquery;
|
||||||
rte->alias = copyObject(rte->eref);
|
rte->alias = copyObject(rte->eref);
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,6 +56,9 @@
|
||||||
#include "nodes/makefuncs.h"
|
#include "nodes/makefuncs.h"
|
||||||
#include "nodes/nodeFuncs.h"
|
#include "nodes/nodeFuncs.h"
|
||||||
#include "nodes/pg_list.h"
|
#include "nodes/pg_list.h"
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
#include "parser/parse_relation.h"
|
||||||
|
#endif
|
||||||
#include "parser/parsetree.h"
|
#include "parser/parsetree.h"
|
||||||
#include "parser/parse_type.h"
|
#include "parser/parse_type.h"
|
||||||
#include "optimizer/optimizer.h"
|
#include "optimizer/optimizer.h"
|
||||||
|
@ -145,6 +148,8 @@ static void WarnIfListHasForeignDistributedTable(List *rangeTableList);
|
||||||
static RouterPlanType GetRouterPlanType(Query *query,
|
static RouterPlanType GetRouterPlanType(Query *query,
|
||||||
Query *originalQuery,
|
Query *originalQuery,
|
||||||
bool hasUnresolvedParams);
|
bool hasUnresolvedParams);
|
||||||
|
static void ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan,
|
||||||
|
PlannedStmt *concatPlan);
|
||||||
|
|
||||||
|
|
||||||
/* Distributed planner hook */
|
/* Distributed planner hook */
|
||||||
|
@ -1081,6 +1086,11 @@ CreateDistributedPlan(uint64 planId, bool allowRecursivePlanning, Query *origina
|
||||||
/*
|
/*
|
||||||
* Plan subqueries and CTEs that cannot be pushed down by recursively
|
* Plan subqueries and CTEs that cannot be pushed down by recursively
|
||||||
* calling the planner and return the resulting plans to subPlanList.
|
* calling the planner and return the resulting plans to subPlanList.
|
||||||
|
* Note that GenerateSubplansForSubqueriesAndCTEs will reset perminfoindexes
|
||||||
|
* for some RTEs in originalQuery->rtable list, while not changing
|
||||||
|
* originalQuery->rteperminfos. That's fine because we will go through
|
||||||
|
* standard_planner again, which will adjust things accordingly in
|
||||||
|
* set_plan_references>add_rtes_to_flat_rtable>add_rte_to_flat_rtable.
|
||||||
*/
|
*/
|
||||||
List *subPlanList = GenerateSubplansForSubqueriesAndCTEs(planId, originalQuery,
|
List *subPlanList = GenerateSubplansForSubqueriesAndCTEs(planId, originalQuery,
|
||||||
plannerRestrictionContext);
|
plannerRestrictionContext);
|
||||||
|
@ -1480,12 +1490,42 @@ FinalizeNonRouterPlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan,
|
||||||
finalPlan->utilityStmt = localPlan->utilityStmt;
|
finalPlan->utilityStmt = localPlan->utilityStmt;
|
||||||
|
|
||||||
/* add original range table list for access permission checks */
|
/* add original range table list for access permission checks */
|
||||||
finalPlan->rtable = list_concat(finalPlan->rtable, localPlan->rtable);
|
ConcatenateRTablesAndPerminfos(finalPlan, localPlan);
|
||||||
|
|
||||||
return finalPlan;
|
return finalPlan;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void
|
||||||
|
ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan, PlannedStmt *concatPlan)
|
||||||
|
{
|
||||||
|
mainPlan->rtable = list_concat(mainPlan->rtable, concatPlan->rtable);
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
|
||||||
|
/*
|
||||||
|
* concatPlan's range table list is concatenated to mainPlan's range table list
|
||||||
|
* therefore all the perminfoindexes should be updated to their value
|
||||||
|
* PLUS the highest perminfoindex in mainPlan's perminfos, which is exactly
|
||||||
|
* the list length.
|
||||||
|
*/
|
||||||
|
int mainPlan_highest_perminfoindex = list_length(mainPlan->permInfos);
|
||||||
|
|
||||||
|
ListCell *lc;
|
||||||
|
foreach(lc, concatPlan->rtable)
|
||||||
|
{
|
||||||
|
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
|
||||||
|
if (rte->perminfoindex != 0)
|
||||||
|
{
|
||||||
|
rte->perminfoindex = rte->perminfoindex + mainPlan_highest_perminfoindex;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* finally, concatenate perminfos as well */
|
||||||
|
mainPlan->permInfos = list_concat(mainPlan->permInfos, concatPlan->permInfos);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FinalizeRouterPlan gets a CustomScan node which already wrapped distributed
|
* FinalizeRouterPlan gets a CustomScan node which already wrapped distributed
|
||||||
* part of a router plan and sets it as the direct child of the router plan
|
* part of a router plan and sets it as the direct child of the router plan
|
||||||
|
@ -1517,7 +1557,7 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan)
|
||||||
routerPlan->rtable = list_make1(remoteScanRangeTableEntry);
|
routerPlan->rtable = list_make1(remoteScanRangeTableEntry);
|
||||||
|
|
||||||
/* add original range table list for access permission checks */
|
/* add original range table list for access permission checks */
|
||||||
routerPlan->rtable = list_concat(routerPlan->rtable, localPlan->rtable);
|
ConcatenateRTablesAndPerminfos(routerPlan, localPlan);
|
||||||
|
|
||||||
routerPlan->canSetTag = true;
|
routerPlan->canSetTag = true;
|
||||||
routerPlan->relationOids = NIL;
|
routerPlan->relationOids = NIL;
|
||||||
|
|
|
@ -136,6 +136,9 @@ GeneratePlaceHolderPlannedStmt(Query *parse)
|
||||||
result->stmt_len = parse->stmt_len;
|
result->stmt_len = parse->stmt_len;
|
||||||
|
|
||||||
result->rtable = copyObject(parse->rtable);
|
result->rtable = copyObject(parse->rtable);
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
result->permInfos = copyObject(parse->rteperminfos);
|
||||||
|
#endif
|
||||||
result->planTree = (Plan *) plan;
|
result->planTree = (Plan *) plan;
|
||||||
result->hasReturning = (parse->returningList != NIL);
|
result->hasReturning = (parse->returningList != NIL);
|
||||||
|
|
||||||
|
|
|
@ -604,6 +604,22 @@ CreateCombineQueryForRouterPlan(DistributedPlan *distPlan)
|
||||||
combineQuery->querySource = QSRC_ORIGINAL;
|
combineQuery->querySource = QSRC_ORIGINAL;
|
||||||
combineQuery->canSetTag = true;
|
combineQuery->canSetTag = true;
|
||||||
combineQuery->rtable = list_make1(rangeTableEntry);
|
combineQuery->rtable = list_make1(rangeTableEntry);
|
||||||
|
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This part of the code is more of a sanity check for readability,
|
||||||
|
* it doesn't really do anything.
|
||||||
|
* We know that Only relation RTEs and subquery RTEs that were once relation
|
||||||
|
* RTEs (views) have their perminfoindex set. (see ExecCheckPermissions function)
|
||||||
|
* DerivedRangeTableEntry sets the rtekind to RTE_FUNCTION
|
||||||
|
* Hence we should have no perminfos here.
|
||||||
|
*/
|
||||||
|
Assert(rangeTableEntry->rtekind == RTE_FUNCTION &&
|
||||||
|
rangeTableEntry->perminfoindex == 0);
|
||||||
|
combineQuery->rteperminfos = NIL;
|
||||||
|
#endif
|
||||||
|
|
||||||
combineQuery->targetList = targetList;
|
combineQuery->targetList = targetList;
|
||||||
combineQuery->jointree = joinTree;
|
combineQuery->jointree = joinTree;
|
||||||
return combineQuery;
|
return combineQuery;
|
||||||
|
@ -1533,6 +1549,20 @@ WrapSubquery(Query *subquery)
|
||||||
selectAlias, false, true));
|
selectAlias, false, true));
|
||||||
outerQuery->rtable = list_make1(newRangeTableEntry);
|
outerQuery->rtable = list_make1(newRangeTableEntry);
|
||||||
|
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This part of the code is more of a sanity check for readability,
|
||||||
|
* it doesn't really do anything.
|
||||||
|
* addRangeTableEntryForSubquery doesn't add permission info
|
||||||
|
* because the range table is set to be RTE_SUBQUERY.
|
||||||
|
* Hence we should also have no perminfos here.
|
||||||
|
*/
|
||||||
|
Assert(newRangeTableEntry->rtekind == RTE_SUBQUERY &&
|
||||||
|
newRangeTableEntry->perminfoindex == 0);
|
||||||
|
outerQuery->rteperminfos = NIL;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* set the FROM expression to the subquery */
|
/* set the FROM expression to the subquery */
|
||||||
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
|
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
|
||||||
newRangeTableRef->rtindex = 1;
|
newRangeTableRef->rtindex = 1;
|
||||||
|
|
|
@ -107,6 +107,7 @@
|
||||||
#include "optimizer/optimizer.h"
|
#include "optimizer/optimizer.h"
|
||||||
#include "optimizer/planner.h"
|
#include "optimizer/planner.h"
|
||||||
#include "optimizer/prep.h"
|
#include "optimizer/prep.h"
|
||||||
|
#include "parser/parse_relation.h"
|
||||||
#include "parser/parsetree.h"
|
#include "parser/parsetree.h"
|
||||||
#include "nodes/makefuncs.h"
|
#include "nodes/makefuncs.h"
|
||||||
#include "nodes/nodeFuncs.h"
|
#include "nodes/nodeFuncs.h"
|
||||||
|
@ -136,6 +137,9 @@ typedef struct RangeTableEntryDetails
|
||||||
RangeTblEntry *rangeTableEntry;
|
RangeTblEntry *rangeTableEntry;
|
||||||
List *requiredAttributeNumbers;
|
List *requiredAttributeNumbers;
|
||||||
bool hasConstantFilterOnUniqueColumn;
|
bool hasConstantFilterOnUniqueColumn;
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
RTEPermissionInfo *perminfo;
|
||||||
|
#endif
|
||||||
} RangeTableEntryDetails;
|
} RangeTableEntryDetails;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -176,7 +180,8 @@ static bool HasConstantFilterOnUniqueColumn(RangeTblEntry *rangeTableEntry,
|
||||||
static ConversionCandidates * CreateConversionCandidates(PlannerRestrictionContext *
|
static ConversionCandidates * CreateConversionCandidates(PlannerRestrictionContext *
|
||||||
plannerRestrictionContext,
|
plannerRestrictionContext,
|
||||||
List *rangeTableList,
|
List *rangeTableList,
|
||||||
int resultRTEIdentity);
|
int resultRTEIdentity,
|
||||||
|
List *rteperminfos);
|
||||||
static void AppendUniqueIndexColumnsToList(Form_pg_index indexForm, List **uniqueIndexes,
|
static void AppendUniqueIndexColumnsToList(Form_pg_index indexForm, List **uniqueIndexes,
|
||||||
int flags);
|
int flags);
|
||||||
static ConversionChoice GetConversionChoice(ConversionCandidates *
|
static ConversionChoice GetConversionChoice(ConversionCandidates *
|
||||||
|
@ -205,10 +210,17 @@ RecursivelyPlanLocalTableJoins(Query *query,
|
||||||
GetPlannerRestrictionContext(context);
|
GetPlannerRestrictionContext(context);
|
||||||
|
|
||||||
List *rangeTableList = query->rtable;
|
List *rangeTableList = query->rtable;
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
List *rteperminfos = query->rteperminfos;
|
||||||
|
#endif
|
||||||
int resultRTEIdentity = ResultRTEIdentity(query);
|
int resultRTEIdentity = ResultRTEIdentity(query);
|
||||||
ConversionCandidates *conversionCandidates =
|
ConversionCandidates *conversionCandidates =
|
||||||
CreateConversionCandidates(plannerRestrictionContext,
|
CreateConversionCandidates(plannerRestrictionContext,
|
||||||
rangeTableList, resultRTEIdentity);
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
rangeTableList, resultRTEIdentity, rteperminfos);
|
||||||
|
#else
|
||||||
|
rangeTableList, resultRTEIdentity, NIL);
|
||||||
|
#endif
|
||||||
|
|
||||||
ConversionChoice conversionChoise =
|
ConversionChoice conversionChoise =
|
||||||
GetConversionChoice(conversionCandidates, plannerRestrictionContext);
|
GetConversionChoice(conversionCandidates, plannerRestrictionContext);
|
||||||
|
@ -323,7 +335,12 @@ ConvertRTEsToSubquery(List *rangeTableEntryDetailsList, RecursivePlanningContext
|
||||||
RangeTblEntry *rangeTableEntry = rangeTableEntryDetails->rangeTableEntry;
|
RangeTblEntry *rangeTableEntry = rangeTableEntryDetails->rangeTableEntry;
|
||||||
List *requiredAttributeNumbers = rangeTableEntryDetails->requiredAttributeNumbers;
|
List *requiredAttributeNumbers = rangeTableEntryDetails->requiredAttributeNumbers;
|
||||||
ReplaceRTERelationWithRteSubquery(rangeTableEntry,
|
ReplaceRTERelationWithRteSubquery(rangeTableEntry,
|
||||||
requiredAttributeNumbers, context);
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
requiredAttributeNumbers, context,
|
||||||
|
rangeTableEntryDetails->perminfo);
|
||||||
|
#else
|
||||||
|
requiredAttributeNumbers, context, NULL);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -530,7 +547,9 @@ RequiredAttrNumbersForRelationInternal(Query *queryToProcess, int rteIndex)
|
||||||
*/
|
*/
|
||||||
static ConversionCandidates *
|
static ConversionCandidates *
|
||||||
CreateConversionCandidates(PlannerRestrictionContext *plannerRestrictionContext,
|
CreateConversionCandidates(PlannerRestrictionContext *plannerRestrictionContext,
|
||||||
List *rangeTableList, int resultRTEIdentity)
|
List *rangeTableList,
|
||||||
|
int resultRTEIdentity,
|
||||||
|
List *rteperminfos)
|
||||||
{
|
{
|
||||||
ConversionCandidates *conversionCandidates =
|
ConversionCandidates *conversionCandidates =
|
||||||
palloc0(sizeof(ConversionCandidates));
|
palloc0(sizeof(ConversionCandidates));
|
||||||
|
@ -564,6 +583,14 @@ CreateConversionCandidates(PlannerRestrictionContext *plannerRestrictionContext,
|
||||||
RequiredAttrNumbersForRelation(rangeTableEntry, plannerRestrictionContext);
|
RequiredAttrNumbersForRelation(rangeTableEntry, plannerRestrictionContext);
|
||||||
rangeTableEntryDetails->hasConstantFilterOnUniqueColumn =
|
rangeTableEntryDetails->hasConstantFilterOnUniqueColumn =
|
||||||
HasConstantFilterOnUniqueColumn(rangeTableEntry, relationRestriction);
|
HasConstantFilterOnUniqueColumn(rangeTableEntry, relationRestriction);
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
rangeTableEntryDetails->perminfo = NULL;
|
||||||
|
if (rangeTableEntry->perminfoindex)
|
||||||
|
{
|
||||||
|
rangeTableEntryDetails->perminfo = getRTEPermissionInfo(rteperminfos,
|
||||||
|
rangeTableEntry);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
bool referenceOrDistributedTable =
|
bool referenceOrDistributedTable =
|
||||||
IsCitusTableType(rangeTableEntry->relid, REFERENCE_TABLE) ||
|
IsCitusTableType(rangeTableEntry->relid, REFERENCE_TABLE) ||
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include "nodes/makefuncs.h"
|
#include "nodes/makefuncs.h"
|
||||||
#include "nodes/nodeFuncs.h"
|
#include "nodes/nodeFuncs.h"
|
||||||
#include "optimizer/optimizer.h"
|
#include "optimizer/optimizer.h"
|
||||||
|
#include "parser/parse_relation.h"
|
||||||
#include "parser/parsetree.h"
|
#include "parser/parsetree.h"
|
||||||
#include "tcop/tcopprot.h"
|
#include "tcop/tcopprot.h"
|
||||||
#include "utils/lsyscache.h"
|
#include "utils/lsyscache.h"
|
||||||
|
@ -777,6 +778,11 @@ ConvertCteRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte)
|
||||||
Query *cteQuery = (Query *) copyObject(sourceCte->ctequery);
|
Query *cteQuery = (Query *) copyObject(sourceCte->ctequery);
|
||||||
|
|
||||||
sourceRte->rtekind = RTE_SUBQUERY;
|
sourceRte->rtekind = RTE_SUBQUERY;
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
|
||||||
|
/* sanity check - sourceRte was RTE_CTE previously so it should have no perminfo */
|
||||||
|
Assert(sourceRte->perminfoindex == 0);
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* As we are delinking the CTE from main query, we have to walk through the
|
* As we are delinking the CTE from main query, we have to walk through the
|
||||||
|
@ -827,6 +833,20 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
|
||||||
RangeTblEntry *newRangeTableEntry = copyObject(sourceRte);
|
RangeTblEntry *newRangeTableEntry = copyObject(sourceRte);
|
||||||
sourceResultsQuery->rtable = list_make1(newRangeTableEntry);
|
sourceResultsQuery->rtable = list_make1(newRangeTableEntry);
|
||||||
|
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
sourceResultsQuery->rteperminfos = NIL;
|
||||||
|
if (sourceRte->perminfoindex)
|
||||||
|
{
|
||||||
|
/* create permission info for newRangeTableEntry */
|
||||||
|
RTEPermissionInfo *perminfo = getRTEPermissionInfo(mergeQuery->rteperminfos,
|
||||||
|
sourceRte);
|
||||||
|
|
||||||
|
/* update the sourceResultsQuery's rteperminfos accordingly */
|
||||||
|
newRangeTableEntry->perminfoindex = 1;
|
||||||
|
sourceResultsQuery->rteperminfos = list_make1(perminfo);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* set the FROM expression to the subquery */
|
/* set the FROM expression to the subquery */
|
||||||
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
|
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
|
||||||
sourceResultsQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
|
sourceResultsQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
|
||||||
|
@ -852,6 +872,9 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
|
||||||
|
|
||||||
/* replace the function with the constructed subquery */
|
/* replace the function with the constructed subquery */
|
||||||
sourceRte->rtekind = RTE_SUBQUERY;
|
sourceRte->rtekind = RTE_SUBQUERY;
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
sourceRte->perminfoindex = 0;
|
||||||
|
#endif
|
||||||
sourceRte->subquery = sourceResultsQuery;
|
sourceRte->subquery = sourceResultsQuery;
|
||||||
sourceRte->inh = false;
|
sourceRte->inh = false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,7 +83,16 @@ CreateColocatedJoinChecker(Query *subquery, PlannerRestrictionContext *restricti
|
||||||
* functions (i.e., FilterPlannerRestrictionForQuery()) rely on queries
|
* functions (i.e., FilterPlannerRestrictionForQuery()) rely on queries
|
||||||
* not relations.
|
* not relations.
|
||||||
*/
|
*/
|
||||||
anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL);
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
RTEPermissionInfo *perminfo = NULL;
|
||||||
|
if (anchorRangeTblEntry->perminfoindex)
|
||||||
|
{
|
||||||
|
perminfo = getRTEPermissionInfo(subquery->rteperminfos, anchorRangeTblEntry);
|
||||||
|
}
|
||||||
|
anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL, perminfo);
|
||||||
|
#else
|
||||||
|
anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL, NULL);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
else if (anchorRangeTblEntry->rtekind == RTE_SUBQUERY)
|
else if (anchorRangeTblEntry->rtekind == RTE_SUBQUERY)
|
||||||
{
|
{
|
||||||
|
@ -266,7 +275,9 @@ SubqueryColocated(Query *subquery, ColocatedJoinChecker *checker)
|
||||||
* designed for generating a stub query.
|
* designed for generating a stub query.
|
||||||
*/
|
*/
|
||||||
Query *
|
Query *
|
||||||
WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation, List *requiredAttributes)
|
WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation,
|
||||||
|
List *requiredAttributes,
|
||||||
|
RTEPermissionInfo *perminfo)
|
||||||
{
|
{
|
||||||
Query *subquery = makeNode(Query);
|
Query *subquery = makeNode(Query);
|
||||||
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
|
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
|
||||||
|
@ -277,6 +288,14 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation, List *requiredAttributes
|
||||||
RangeTblEntry *newRangeTableEntry = copyObject(rteRelation);
|
RangeTblEntry *newRangeTableEntry = copyObject(rteRelation);
|
||||||
subquery->rtable = list_make1(newRangeTableEntry);
|
subquery->rtable = list_make1(newRangeTableEntry);
|
||||||
|
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
if (perminfo)
|
||||||
|
{
|
||||||
|
newRangeTableEntry->perminfoindex = 1;
|
||||||
|
subquery->rteperminfos = list_make1(perminfo);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* set the FROM expression to the subquery */
|
/* set the FROM expression to the subquery */
|
||||||
newRangeTableRef = makeNode(RangeTblRef);
|
newRangeTableRef = makeNode(RangeTblRef);
|
||||||
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
|
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
|
||||||
|
|
|
@ -1915,6 +1915,9 @@ SubqueryPushdownMultiNodeTree(Query *originalQuery)
|
||||||
pushedDownQuery->targetList = subqueryTargetEntryList;
|
pushedDownQuery->targetList = subqueryTargetEntryList;
|
||||||
pushedDownQuery->jointree = copyObject(queryTree->jointree);
|
pushedDownQuery->jointree = copyObject(queryTree->jointree);
|
||||||
pushedDownQuery->rtable = copyObject(queryTree->rtable);
|
pushedDownQuery->rtable = copyObject(queryTree->rtable);
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
pushedDownQuery->rteperminfos = copyObject(queryTree->rteperminfos);
|
||||||
|
#endif
|
||||||
pushedDownQuery->setOperations = copyObject(queryTree->setOperations);
|
pushedDownQuery->setOperations = copyObject(queryTree->setOperations);
|
||||||
pushedDownQuery->querySource = queryTree->querySource;
|
pushedDownQuery->querySource = queryTree->querySource;
|
||||||
pushedDownQuery->hasSubLinks = queryTree->hasSubLinks;
|
pushedDownQuery->hasSubLinks = queryTree->hasSubLinks;
|
||||||
|
|
|
@ -80,6 +80,7 @@
|
||||||
#include "optimizer/optimizer.h"
|
#include "optimizer/optimizer.h"
|
||||||
#include "optimizer/planner.h"
|
#include "optimizer/planner.h"
|
||||||
#include "optimizer/prep.h"
|
#include "optimizer/prep.h"
|
||||||
|
#include "parser/parse_relation.h"
|
||||||
#include "parser/parsetree.h"
|
#include "parser/parsetree.h"
|
||||||
#include "nodes/makefuncs.h"
|
#include "nodes/makefuncs.h"
|
||||||
#include "nodes/nodeFuncs.h"
|
#include "nodes/nodeFuncs.h"
|
||||||
|
@ -886,8 +887,19 @@ RecursivelyPlanDistributedJoinNode(Node *node, Query *query,
|
||||||
List *requiredAttributes =
|
List *requiredAttributes =
|
||||||
RequiredAttrNumbersForRelation(distributedRte, restrictionContext);
|
RequiredAttrNumbersForRelation(distributedRte, restrictionContext);
|
||||||
|
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
RTEPermissionInfo *perminfo = NULL;
|
||||||
|
if (distributedRte->perminfoindex)
|
||||||
|
{
|
||||||
|
perminfo = getRTEPermissionInfo(query->rteperminfos, distributedRte);
|
||||||
|
}
|
||||||
|
|
||||||
ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes,
|
ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes,
|
||||||
recursivePlanningContext);
|
recursivePlanningContext, perminfo);
|
||||||
|
#else
|
||||||
|
ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes,
|
||||||
|
recursivePlanningContext, NULL);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
else if (distributedRte->rtekind == RTE_SUBQUERY)
|
else if (distributedRte->rtekind == RTE_SUBQUERY)
|
||||||
{
|
{
|
||||||
|
@ -1751,9 +1763,11 @@ NodeContainsSubqueryReferencingOuterQuery(Node *node)
|
||||||
void
|
void
|
||||||
ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
|
ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
|
||||||
List *requiredAttrNumbers,
|
List *requiredAttrNumbers,
|
||||||
RecursivePlanningContext *context)
|
RecursivePlanningContext *context,
|
||||||
|
RTEPermissionInfo *perminfo)
|
||||||
{
|
{
|
||||||
Query *subquery = WrapRteRelationIntoSubquery(rangeTableEntry, requiredAttrNumbers);
|
Query *subquery = WrapRteRelationIntoSubquery(rangeTableEntry, requiredAttrNumbers,
|
||||||
|
perminfo);
|
||||||
List *outerQueryTargetList = CreateAllTargetListForRelation(rangeTableEntry->relid,
|
List *outerQueryTargetList = CreateAllTargetListForRelation(rangeTableEntry->relid,
|
||||||
requiredAttrNumbers);
|
requiredAttrNumbers);
|
||||||
|
|
||||||
|
@ -1778,6 +1792,9 @@ ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
|
||||||
|
|
||||||
/* replace the function with the constructed subquery */
|
/* replace the function with the constructed subquery */
|
||||||
rangeTableEntry->rtekind = RTE_SUBQUERY;
|
rangeTableEntry->rtekind = RTE_SUBQUERY;
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
rangeTableEntry->perminfoindex = 0;
|
||||||
|
#endif
|
||||||
rangeTableEntry->subquery = subquery;
|
rangeTableEntry->subquery = subquery;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1850,6 +1867,15 @@ CreateOuterSubquery(RangeTblEntry *rangeTableEntry, List *outerSubqueryTargetLis
|
||||||
innerSubqueryRTE->eref->colnames = innerSubqueryColNames;
|
innerSubqueryRTE->eref->colnames = innerSubqueryColNames;
|
||||||
outerSubquery->rtable = list_make1(innerSubqueryRTE);
|
outerSubquery->rtable = list_make1(innerSubqueryRTE);
|
||||||
|
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
|
||||||
|
/* sanity check */
|
||||||
|
Assert(innerSubqueryRTE->rtekind == RTE_SUBQUERY &&
|
||||||
|
innerSubqueryRTE->perminfoindex == 0);
|
||||||
|
outerSubquery->rteperminfos = NIL;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* set the FROM expression to the subquery */
|
/* set the FROM expression to the subquery */
|
||||||
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
|
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
|
||||||
newRangeTableRef->rtindex = 1;
|
newRangeTableRef->rtindex = 1;
|
||||||
|
@ -2022,6 +2048,15 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
|
||||||
|
|
||||||
/* set the FROM expression to the subquery */
|
/* set the FROM expression to the subquery */
|
||||||
subquery->rtable = list_make1(newRangeTableEntry);
|
subquery->rtable = list_make1(newRangeTableEntry);
|
||||||
|
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
|
||||||
|
/* sanity check */
|
||||||
|
Assert(newRangeTableEntry->rtekind == RTE_FUNCTION &&
|
||||||
|
newRangeTableEntry->perminfoindex == 0);
|
||||||
|
subquery->rteperminfos = NIL;
|
||||||
|
#endif
|
||||||
|
|
||||||
newRangeTableRef->rtindex = 1;
|
newRangeTableRef->rtindex = 1;
|
||||||
subquery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
|
subquery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
|
||||||
|
|
||||||
|
@ -2392,6 +2427,9 @@ BuildReadIntermediateResultsQuery(List *targetEntryList, List *columnAliasList,
|
||||||
Query *resultQuery = makeNode(Query);
|
Query *resultQuery = makeNode(Query);
|
||||||
resultQuery->commandType = CMD_SELECT;
|
resultQuery->commandType = CMD_SELECT;
|
||||||
resultQuery->rtable = list_make1(rangeTableEntry);
|
resultQuery->rtable = list_make1(rangeTableEntry);
|
||||||
|
#if PG_VERSION_NUM >= PG_VERSION_16
|
||||||
|
resultQuery->rteperminfos = NIL;
|
||||||
|
#endif
|
||||||
resultQuery->jointree = joinTree;
|
resultQuery->jointree = joinTree;
|
||||||
resultQuery->targetList = targetList;
|
resultQuery->targetList = targetList;
|
||||||
|
|
||||||
|
|
|
@ -45,9 +45,6 @@ RelationGetNamespaceName(Relation relation)
|
||||||
* we are dealing with GetUserId().
|
* we are dealing with GetUserId().
|
||||||
* Currently the following entries are filled like this:
|
* Currently the following entries are filled like this:
|
||||||
* perminfo->checkAsUser = GetUserId();
|
* perminfo->checkAsUser = GetUserId();
|
||||||
* perminfo->selectedCols = NULL;
|
|
||||||
* perminfo->insertedCols = NULL;
|
|
||||||
* perminfo->updatedCols = NULL;
|
|
||||||
*/
|
*/
|
||||||
RTEPermissionInfo *
|
RTEPermissionInfo *
|
||||||
GetFilledPermissionInfo(Oid relid, bool inh, AclMode requiredPerms)
|
GetFilledPermissionInfo(Oid relid, bool inh, AclMode requiredPerms)
|
||||||
|
@ -57,9 +54,6 @@ GetFilledPermissionInfo(Oid relid, bool inh, AclMode requiredPerms)
|
||||||
perminfo->inh = inh;
|
perminfo->inh = inh;
|
||||||
perminfo->requiredPerms = requiredPerms;
|
perminfo->requiredPerms = requiredPerms;
|
||||||
perminfo->checkAsUser = GetUserId();
|
perminfo->checkAsUser = GetUserId();
|
||||||
perminfo->selectedCols = NULL;
|
|
||||||
perminfo->insertedCols = NULL;
|
|
||||||
perminfo->updatedCols = NULL;
|
|
||||||
return perminfo;
|
return perminfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,8 @@ extern ColocatedJoinChecker CreateColocatedJoinChecker(Query *subquery,
|
||||||
restrictionContext);
|
restrictionContext);
|
||||||
extern bool SubqueryColocated(Query *subquery, ColocatedJoinChecker *context);
|
extern bool SubqueryColocated(Query *subquery, ColocatedJoinChecker *context);
|
||||||
extern Query * WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation,
|
extern Query * WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation,
|
||||||
List *requiredAttributes);
|
List *requiredAttributes,
|
||||||
|
RTEPermissionInfo *perminfo);
|
||||||
extern List * CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes);
|
extern List * CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes);
|
||||||
|
|
||||||
#endif /* QUERY_COLOCATION_CHECKER_H */
|
#endif /* QUERY_COLOCATION_CHECKER_H */
|
||||||
|
|
|
@ -42,7 +42,8 @@ extern bool GeneratingSubplans(void);
|
||||||
extern bool ContainsLocalTableDistributedTableJoin(List *rangeTableList);
|
extern bool ContainsLocalTableDistributedTableJoin(List *rangeTableList);
|
||||||
extern void ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
|
extern void ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
|
||||||
List *requiredAttrNumbers,
|
List *requiredAttrNumbers,
|
||||||
RecursivePlanningContext *context);
|
RecursivePlanningContext *context,
|
||||||
|
RTEPermissionInfo *perminfo);
|
||||||
extern bool IsRecursivelyPlannableRelation(RangeTblEntry *rangeTableEntry);
|
extern bool IsRecursivelyPlannableRelation(RangeTblEntry *rangeTableEntry);
|
||||||
extern bool IsRelationLocalTableOrMatView(Oid relationId);
|
extern bool IsRelationLocalTableOrMatView(Oid relationId);
|
||||||
extern bool ContainsReferencesToOuterQuery(Query *query);
|
extern bool ContainsReferencesToOuterQuery(Query *query);
|
||||||
|
|
|
@ -144,6 +144,13 @@ object_aclcheck(Oid classid, Oid objectid, Oid roleid, AclMode mode)
|
||||||
|
|
||||||
typedef bool TU_UpdateIndexes;
|
typedef bool TU_UpdateIndexes;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* we define RTEPermissionInfo for PG16 compatibility
|
||||||
|
* There are some functions that need to include RTEPermissionInfo in their signature
|
||||||
|
* for PG14/PG15 we pass a NULL argument in these functions
|
||||||
|
*/
|
||||||
|
typedef RangeTblEntry RTEPermissionInfo;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if PG_VERSION_NUM >= PG_VERSION_15
|
#if PG_VERSION_NUM >= PG_VERSION_15
|
||||||
|
|
|
@ -301,5 +301,6 @@ s/(NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA p
|
||||||
# (This is not preprocessor directive, but a reminder for the developer that will drop PG14&15 support )
|
# (This is not preprocessor directive, but a reminder for the developer that will drop PG14&15 support )
|
||||||
|
|
||||||
s/, password_required=false//g
|
s/, password_required=false//g
|
||||||
|
s/provide the file or change sslmode/provide the file, use the system's trusted roots with sslrootcert=system, or change sslmode/g
|
||||||
|
|
||||||
#endif /* PG_VERSION_NUM < PG_VERSION_16 */
|
#endif /* PG_VERSION_NUM < PG_VERSION_16 */
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
--
|
--
|
||||||
-- Test chunk filtering in columnar using min/max values in stripe skip lists.
|
-- Test chunk filtering in columnar using min/max values in stripe skip lists.
|
||||||
--
|
--
|
||||||
|
-- It has an alternative test output file
|
||||||
|
-- because PG16 changed the order of some Filters in EXPLAIN
|
||||||
|
-- Relevant PG commit:
|
||||||
|
-- https://github.com/postgres/postgres/commit/2489d76c4906f4461a364ca8ad7e0751ead8aa0d
|
||||||
--
|
--
|
||||||
-- filtered_row_count returns number of rows filtered by the WHERE clause.
|
-- filtered_row_count returns number of rows filtered by the WHERE clause.
|
||||||
-- If chunks get filtered by columnar, less rows are passed to WHERE
|
-- If chunks get filtered by columnar, less rows are passed to WHERE
|
||||||
|
@ -370,10 +374,10 @@ SELECT * FROM r1, coltest WHERE
|
||||||
Filter: ((n1 % 10) = 0)
|
Filter: ((n1 % 10) = 0)
|
||||||
Rows Removed by Filter: 1
|
Rows Removed by Filter: 1
|
||||||
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=4)
|
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=4)
|
||||||
Filter: ((x1 > 15000) AND (r1.id1 = id) AND ((x1)::text > '000000'::text))
|
Filter: ((x1 > 15000) AND (id = r1.id1) AND ((x1)::text > '000000'::text))
|
||||||
Rows Removed by Filter: 999
|
Rows Removed by Filter: 999
|
||||||
Columnar Projected Columns: id, x1, x2, x3
|
Columnar Projected Columns: id, x1, x2, x3
|
||||||
Columnar Chunk Group Filters: ((x1 > 15000) AND (r1.id1 = id))
|
Columnar Chunk Group Filters: ((x1 > 15000) AND (id = r1.id1))
|
||||||
Columnar Chunk Groups Removed by Filter: 19
|
Columnar Chunk Groups Removed by Filter: 19
|
||||||
(10 rows)
|
(10 rows)
|
||||||
|
|
||||||
|
@ -413,10 +417,10 @@ SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE
|
||||||
-> Seq Scan on r2 (actual rows=5 loops=5)
|
-> Seq Scan on r2 (actual rows=5 loops=5)
|
||||||
-> Seq Scan on r3 (actual rows=5 loops=5)
|
-> Seq Scan on r3 (actual rows=5 loops=5)
|
||||||
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5)
|
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5)
|
||||||
Filter: (r1.id1 = id)
|
Filter: (id = r1.id1)
|
||||||
Rows Removed by Filter: 999
|
Rows Removed by Filter: 999
|
||||||
Columnar Projected Columns: id, x1, x2, x3
|
Columnar Projected Columns: id, x1, x2, x3
|
||||||
Columnar Chunk Group Filters: (r1.id1 = id)
|
Columnar Chunk Group Filters: (id = r1.id1)
|
||||||
Columnar Chunk Groups Removed by Filter: 19
|
Columnar Chunk Groups Removed by Filter: 19
|
||||||
-> Seq Scan on r4 (actual rows=1 loops=5)
|
-> Seq Scan on r4 (actual rows=1 loops=5)
|
||||||
-> Seq Scan on r5 (actual rows=1 loops=1)
|
-> Seq Scan on r5 (actual rows=1 loops=1)
|
||||||
|
@ -588,10 +592,10 @@ DETAIL: parameterized by rels {r3}; 2 clauses pushed down
|
||||||
-> Nested Loop (actual rows=3 loops=1)
|
-> Nested Loop (actual rows=3 loops=1)
|
||||||
-> Seq Scan on r1 (actual rows=5 loops=1)
|
-> Seq Scan on r1 (actual rows=5 loops=1)
|
||||||
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5)
|
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5)
|
||||||
Filter: ((r1.n1 > x1) AND (r1.id1 = id))
|
Filter: ((r1.n1 > x1) AND (id = r1.id1))
|
||||||
Rows Removed by Filter: 799
|
Rows Removed by Filter: 799
|
||||||
Columnar Projected Columns: id, x1, x2, x3
|
Columnar Projected Columns: id, x1, x2, x3
|
||||||
Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id))
|
Columnar Chunk Group Filters: ((r1.n1 > x1) AND (id = r1.id1))
|
||||||
Columnar Chunk Groups Removed by Filter: 19
|
Columnar Chunk Groups Removed by Filter: 19
|
||||||
-> Seq Scan on r2 (actual rows=5 loops=3)
|
-> Seq Scan on r2 (actual rows=5 loops=3)
|
||||||
-> Seq Scan on r3 (actual rows=5 loops=3)
|
-> Seq Scan on r3 (actual rows=5 loops=3)
|
||||||
|
@ -618,10 +622,10 @@ SELECT * FROM r1, coltest_part WHERE
|
||||||
-> Seq Scan on r1 (actual rows=5 loops=1)
|
-> Seq Scan on r1 (actual rows=5 loops=1)
|
||||||
-> Append (actual rows=1 loops=5)
|
-> Append (actual rows=1 loops=5)
|
||||||
-> Custom Scan (ColumnarScan) on coltest_part0 coltest_part_1 (actual rows=1 loops=3)
|
-> Custom Scan (ColumnarScan) on coltest_part0 coltest_part_1 (actual rows=1 loops=3)
|
||||||
Filter: ((r1.n1 > x1) AND (r1.id1 = id))
|
Filter: ((r1.n1 > x1) AND (id = r1.id1))
|
||||||
Rows Removed by Filter: 999
|
Rows Removed by Filter: 999
|
||||||
Columnar Projected Columns: id, x1, x2, x3
|
Columnar Projected Columns: id, x1, x2, x3
|
||||||
Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id))
|
Columnar Chunk Group Filters: ((r1.n1 > x1) AND (id = r1.id1))
|
||||||
Columnar Chunk Groups Removed by Filter: 9
|
Columnar Chunk Groups Removed by Filter: 9
|
||||||
-> Seq Scan on coltest_part1 coltest_part_2 (actual rows=0 loops=2)
|
-> Seq Scan on coltest_part1 coltest_part_2 (actual rows=0 loops=2)
|
||||||
Filter: ((r1.n1 > x1) AND (r1.id1 = id))
|
Filter: ((r1.n1 > x1) AND (r1.id1 = id))
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -77,10 +77,10 @@ FROM columnar_test_helpers.columnar_store_memory_stats();
|
||||||
top_growth | 1
|
top_growth | 1
|
||||||
|
|
||||||
-- before this change, max mem usage while executing inserts was 28MB and
|
-- before this change, max mem usage while executing inserts was 28MB and
|
||||||
-- with this change it's less than 8MB.
|
-- with this change it's less than 9MB.
|
||||||
SELECT
|
SELECT
|
||||||
(SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
|
(SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
|
||||||
(SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
|
(SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
|
||||||
-[ RECORD 1 ]--+--
|
-[ RECORD 1 ]--+--
|
||||||
large_batch_ok | t
|
large_batch_ok | t
|
||||||
first_batch_ok | t
|
first_batch_ok | t
|
||||||
|
|
|
@ -244,13 +244,13 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||||
1
|
1
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
|
SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
|
||||||
role | member | grantor | admin_option
|
role | member | grantor | admin_option
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
dist_role_1 | dist_role_2 | non_dist_role_1 | f
|
dist_role_1 | dist_role_2 | t | f
|
||||||
dist_role_3 | non_dist_role_3 | postgres | f
|
dist_role_3 | non_dist_role_3 | t | f
|
||||||
non_dist_role_1 | non_dist_role_2 | dist_role_1 | f
|
non_dist_role_1 | non_dist_role_2 | t | f
|
||||||
non_dist_role_4 | dist_role_4 | postgres | f
|
non_dist_role_4 | dist_role_4 | t | f
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1;
|
SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1;
|
||||||
|
|
|
@ -1214,7 +1214,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
|
||||||
sum(cardinality),
|
sum(cardinality),
|
||||||
sum(sum)
|
sum(sum)
|
||||||
FROM source_table
|
FROM source_table
|
||||||
GROUP BY c1, c2, c3, c4, c5, c6
|
GROUP BY c1, c2, c3, c4, c6
|
||||||
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
||||||
DO UPDATE SET
|
DO UPDATE SET
|
||||||
cardinality = enriched.cardinality + excluded.cardinality,
|
cardinality = enriched.cardinality + excluded.cardinality,
|
||||||
|
@ -1232,7 +1232,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
|
||||||
sum(cardinality),
|
sum(cardinality),
|
||||||
sum(sum)
|
sum(sum)
|
||||||
FROM source_table
|
FROM source_table
|
||||||
GROUP BY c1, c2, c3, c4, c5, c6
|
GROUP BY c1, c2, c3, c4, c6
|
||||||
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
||||||
DO UPDATE SET
|
DO UPDATE SET
|
||||||
cardinality = enriched.cardinality + excluded.cardinality,
|
cardinality = enriched.cardinality + excluded.cardinality,
|
||||||
|
@ -1247,7 +1247,7 @@ DO UPDATE SET
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: c1, c2, c3, c4, '-1'::double precision, insert_select_repartition.dist_func(c1, 4)
|
Group Key: c1, c2, c3, c4, insert_select_repartition.dist_func(c1, 4)
|
||||||
-> Seq Scan on source_table_4213644 source_table
|
-> Seq Scan on source_table_4213644 source_table
|
||||||
(10 rows)
|
(10 rows)
|
||||||
|
|
||||||
|
|
|
@ -1214,7 +1214,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
|
||||||
sum(cardinality),
|
sum(cardinality),
|
||||||
sum(sum)
|
sum(sum)
|
||||||
FROM source_table
|
FROM source_table
|
||||||
GROUP BY c1, c2, c3, c4, c5, c6
|
GROUP BY c1, c2, c3, c4, c6
|
||||||
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
||||||
DO UPDATE SET
|
DO UPDATE SET
|
||||||
cardinality = enriched.cardinality + excluded.cardinality,
|
cardinality = enriched.cardinality + excluded.cardinality,
|
||||||
|
@ -1232,7 +1232,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
|
||||||
sum(cardinality),
|
sum(cardinality),
|
||||||
sum(sum)
|
sum(sum)
|
||||||
FROM source_table
|
FROM source_table
|
||||||
GROUP BY c1, c2, c3, c4, c5, c6
|
GROUP BY c1, c2, c3, c4, c6
|
||||||
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
||||||
DO UPDATE SET
|
DO UPDATE SET
|
||||||
cardinality = enriched.cardinality + excluded.cardinality,
|
cardinality = enriched.cardinality + excluded.cardinality,
|
||||||
|
@ -1247,7 +1247,7 @@ DO UPDATE SET
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: c1, c2, c3, c4, '-1'::double precision, insert_select_repartition.dist_func(c1, 4)
|
Group Key: c1, c2, c3, c4, insert_select_repartition.dist_func(c1, 4)
|
||||||
-> Seq Scan on source_table_4213644 source_table
|
-> Seq Scan on source_table_4213644 source_table
|
||||||
(10 rows)
|
(10 rows)
|
||||||
|
|
||||||
|
|
|
@ -1232,31 +1232,20 @@ WHERE o_orderkey IN (1, 2)
|
||||||
-> Seq Scan on lineitem_hash_partitioned_630004 lineitem_hash_partitioned
|
-> Seq Scan on lineitem_hash_partitioned_630004 lineitem_hash_partitioned
|
||||||
(13 rows)
|
(13 rows)
|
||||||
|
|
||||||
|
SELECT public.coordinator_plan($Q$
|
||||||
EXPLAIN (COSTS OFF)
|
EXPLAIN (COSTS OFF)
|
||||||
SELECT count(*)
|
SELECT count(*)
|
||||||
FROM orders_hash_partitioned
|
FROM orders_hash_partitioned
|
||||||
FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
|
FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
|
||||||
WHERE o_orderkey IN (1, 2)
|
WHERE o_orderkey IN (1, 2)
|
||||||
AND l_orderkey IN (2, 3);
|
AND l_orderkey IN (2, 3);
|
||||||
QUERY PLAN
|
$Q$);
|
||||||
|
coordinator_plan
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Aggregate
|
Aggregate
|
||||||
-> Custom Scan (Citus Adaptive)
|
-> Custom Scan (Citus Adaptive)
|
||||||
Task Count: 3
|
Task Count: 3
|
||||||
Tasks Shown: One of 3
|
(3 rows)
|
||||||
-> Task
|
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
|
||||||
-> Aggregate
|
|
||||||
-> Nested Loop
|
|
||||||
Join Filter: (orders_hash_partitioned.o_orderkey = lineitem_hash_partitioned.l_orderkey)
|
|
||||||
-> Seq Scan on orders_hash_partitioned_630000 orders_hash_partitioned
|
|
||||||
Filter: (o_orderkey = ANY ('{1,2}'::integer[]))
|
|
||||||
-> Materialize
|
|
||||||
-> Bitmap Heap Scan on lineitem_hash_partitioned_630004 lineitem_hash_partitioned
|
|
||||||
Recheck Cond: (l_orderkey = ANY ('{2,3}'::integer[]))
|
|
||||||
-> Bitmap Index Scan on lineitem_hash_partitioned_pkey_630004
|
|
||||||
Index Cond: (l_orderkey = ANY ('{2,3}'::integer[]))
|
|
||||||
(16 rows)
|
|
||||||
|
|
||||||
SET citus.task_executor_type TO DEFAULT;
|
SET citus.task_executor_type TO DEFAULT;
|
||||||
DROP TABLE lineitem_hash_partitioned;
|
DROP TABLE lineitem_hash_partitioned;
|
||||||
|
|
|
@ -120,7 +120,7 @@ EXPLAIN (COSTS FALSE)
|
||||||
SELECT sum(l_extendedprice * l_discount) as revenue
|
SELECT sum(l_extendedprice * l_discount) as revenue
|
||||||
FROM lineitem_hash, orders_hash
|
FROM lineitem_hash, orders_hash
|
||||||
WHERE o_orderkey = l_orderkey
|
WHERE o_orderkey = l_orderkey
|
||||||
GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24
|
GROUP BY l_orderkey, l_shipmode HAVING sum(l_quantity) > 24
|
||||||
ORDER BY 1 DESC LIMIT 3;
|
ORDER BY 1 DESC LIMIT 3;
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -136,7 +136,7 @@ EXPLAIN (COSTS FALSE)
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: (sum((lineitem_hash.l_extendedprice * lineitem_hash.l_discount))) DESC
|
Sort Key: (sum((lineitem_hash.l_extendedprice * lineitem_hash.l_discount))) DESC
|
||||||
-> HashAggregate
|
-> HashAggregate
|
||||||
Group Key: lineitem_hash.l_orderkey, orders_hash.o_orderkey, lineitem_hash.l_shipmode
|
Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_shipmode
|
||||||
Filter: (sum(lineitem_hash.l_quantity) > '24'::numeric)
|
Filter: (sum(lineitem_hash.l_quantity) > '24'::numeric)
|
||||||
-> Hash Join
|
-> Hash Join
|
||||||
Hash Cond: (orders_hash.o_orderkey = lineitem_hash.l_orderkey)
|
Hash Cond: (orders_hash.o_orderkey = lineitem_hash.l_orderkey)
|
||||||
|
|
|
@ -148,7 +148,7 @@ SELECT pg_reload_conf();
|
||||||
CREATE SUBSCRIPTION subs_01 CONNECTION 'host=''localhost'' port=57637'
|
CREATE SUBSCRIPTION subs_01 CONNECTION 'host=''localhost'' port=57637'
|
||||||
PUBLICATION pub_01 WITH (citus_use_authinfo=true);
|
PUBLICATION pub_01 WITH (citus_use_authinfo=true);
|
||||||
ERROR: could not connect to the publisher: root certificate file "/non/existing/certificate.crt" does not exist
|
ERROR: could not connect to the publisher: root certificate file "/non/existing/certificate.crt" does not exist
|
||||||
Either provide the file or change sslmode to disable server certificate verification.
|
Either provide the file, use the system's trusted roots with sslrootcert=system, or change sslmode to disable server certificate verification.
|
||||||
ALTER SYSTEM RESET citus.node_conninfo;
|
ALTER SYSTEM RESET citus.node_conninfo;
|
||||||
SELECT pg_reload_conf();
|
SELECT pg_reload_conf();
|
||||||
pg_reload_conf
|
pg_reload_conf
|
||||||
|
|
|
@ -425,9 +425,25 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
|
||||||
test_table_2_1130000
|
test_table_2_1130000
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
|
-- PG16 added one more backend type B_STANDALONE_BACKEND
|
||||||
|
-- and also alphabetized the backend types, hence the orders changed
|
||||||
|
-- Relevant PG commit:
|
||||||
|
-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
|
||||||
|
\gset
|
||||||
|
\if :server_version_ge_16
|
||||||
|
SELECT 4 AS client_backend \gset
|
||||||
|
SELECT 5 AS bgworker \gset
|
||||||
|
SELECT 12 AS walsender \gset
|
||||||
|
\else
|
||||||
|
SELECT 3 AS client_backend \gset
|
||||||
|
SELECT 4 AS bgworker \gset
|
||||||
|
SELECT 9 AS walsender \gset
|
||||||
|
\endif
|
||||||
-- say, we set it to bgworker
|
-- say, we set it to bgworker
|
||||||
-- the shards and indexes do not show up
|
-- the shards and indexes do not show up
|
||||||
SELECT set_backend_type(4);
|
SELECT set_backend_type(:bgworker);
|
||||||
NOTICE: backend type switched to: background worker
|
NOTICE: backend type switched to: background worker
|
||||||
set_backend_type
|
set_backend_type
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -445,7 +461,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
|
||||||
|
|
||||||
-- or, we set it to walsender
|
-- or, we set it to walsender
|
||||||
-- the shards and indexes do not show up
|
-- the shards and indexes do not show up
|
||||||
SELECT set_backend_type(9);
|
SELECT set_backend_type(:walsender);
|
||||||
NOTICE: backend type switched to: walsender
|
NOTICE: backend type switched to: walsender
|
||||||
set_backend_type
|
set_backend_type
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
@ -480,7 +496,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
|
||||||
|
|
||||||
RESET application_name;
|
RESET application_name;
|
||||||
-- but, client backends to see the shards
|
-- but, client backends to see the shards
|
||||||
SELECT set_backend_type(3);
|
SELECT set_backend_type(:client_backend);
|
||||||
NOTICE: backend type switched to: client backend
|
NOTICE: backend type switched to: client backend
|
||||||
set_backend_type
|
set_backend_type
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
|
|
|
@ -1062,7 +1062,7 @@ SELECT count(*) FROM keyval1 GROUP BY key HAVING sum(value) > (SELECT sum(value)
|
||||||
(26 rows)
|
(26 rows)
|
||||||
|
|
||||||
EXPLAIN (COSTS OFF)
|
EXPLAIN (COSTS OFF)
|
||||||
SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 GROUP BY key ORDER BY 1 DESC LIMIT 1);
|
SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 ORDER BY 1 DESC LIMIT 1);
|
||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
---------------------------------------------------------------------
|
---------------------------------------------------------------------
|
||||||
Custom Scan (Citus Adaptive)
|
Custom Scan (Citus Adaptive)
|
||||||
|
@ -1070,20 +1070,18 @@ SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value)
|
||||||
Tasks Shown: All
|
Tasks Shown: All
|
||||||
-> Task
|
-> Task
|
||||||
Node: host=localhost port=xxxxx dbname=regression
|
Node: host=localhost port=xxxxx dbname=regression
|
||||||
-> GroupAggregate
|
-> Aggregate
|
||||||
Group Key: k1.key
|
|
||||||
Filter: (sum(k1.value) > $0)
|
Filter: (sum(k1.value) > $0)
|
||||||
InitPlan 1 (returns $0)
|
InitPlan 1 (returns $0)
|
||||||
-> Limit
|
-> Limit
|
||||||
-> Sort
|
-> Sort
|
||||||
Sort Key: (sum(k2.value)) DESC
|
Sort Key: (sum(k2.value)) DESC
|
||||||
-> GroupAggregate
|
-> Aggregate
|
||||||
Group Key: k2.key
|
|
||||||
-> Seq Scan on keyval2_xxxxxxx k2
|
-> Seq Scan on keyval2_xxxxxxx k2
|
||||||
Filter: (key = 2)
|
Filter: (key = 2)
|
||||||
-> Seq Scan on keyval1_xxxxxxx k1
|
-> Seq Scan on keyval1_xxxxxxx k1
|
||||||
Filter: (key = 2)
|
Filter: (key = 2)
|
||||||
(18 rows)
|
(16 rows)
|
||||||
|
|
||||||
-- Simple join subquery pushdown
|
-- Simple join subquery pushdown
|
||||||
SELECT
|
SELECT
|
||||||
|
|
|
@ -370,11 +370,13 @@ SELECT DISTINCT y FROM test;
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- non deterministic collations
|
-- non deterministic collations
|
||||||
|
SET client_min_messages TO WARNING;
|
||||||
CREATE COLLATION test_pg12.case_insensitive (
|
CREATE COLLATION test_pg12.case_insensitive (
|
||||||
provider = icu,
|
provider = icu,
|
||||||
locale = '@colStrength=secondary',
|
locale = '@colStrength=secondary',
|
||||||
deterministic = false
|
deterministic = false
|
||||||
);
|
);
|
||||||
|
RESET client_min_messages;
|
||||||
CREATE TABLE col_test (
|
CREATE TABLE col_test (
|
||||||
id int,
|
id int,
|
||||||
val text collate case_insensitive
|
val text collate case_insensitive
|
||||||
|
|
|
@ -400,22 +400,6 @@ NOTICE: renaming the new table to undistribute_table.dist_type_table
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- test CREATE RULE with ON SELECT
|
|
||||||
CREATE TABLE rule_table_1 (a INT);
|
|
||||||
CREATE TABLE rule_table_2 (a INT);
|
|
||||||
SELECT create_distributed_table('rule_table_2', 'a');
|
|
||||||
create_distributed_table
|
|
||||||
---------------------------------------------------------------------
|
|
||||||
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
CREATE RULE "_RETURN" AS ON SELECT TO rule_table_1 DO INSTEAD SELECT * FROM rule_table_2;
|
|
||||||
-- the CREATE RULE turns rule_table_1 into a view
|
|
||||||
ALTER EXTENSION plpgsql ADD VIEW rule_table_1;
|
|
||||||
NOTICE: Citus does not propagate adding/dropping member objects
|
|
||||||
HINT: You can add/drop the member objects on the workers as well.
|
|
||||||
SELECT undistribute_table('rule_table_2');
|
|
||||||
ERROR: cannot alter table because an extension depends on it
|
|
||||||
-- test CREATE RULE without ON SELECT
|
-- test CREATE RULE without ON SELECT
|
||||||
CREATE TABLE rule_table_3 (a INT);
|
CREATE TABLE rule_table_3 (a INT);
|
||||||
CREATE TABLE rule_table_4 (a INT);
|
CREATE TABLE rule_table_4 (a INT);
|
||||||
|
@ -444,9 +428,6 @@ NOTICE: renaming the new table to undistribute_table.rule_table_4
|
||||||
ALTER EXTENSION plpgsql DROP VIEW extension_view;
|
ALTER EXTENSION plpgsql DROP VIEW extension_view;
|
||||||
NOTICE: Citus does not propagate adding/dropping member objects
|
NOTICE: Citus does not propagate adding/dropping member objects
|
||||||
HINT: You can add/drop the member objects on the workers as well.
|
HINT: You can add/drop the member objects on the workers as well.
|
||||||
ALTER EXTENSION plpgsql DROP VIEW rule_table_1;
|
|
||||||
NOTICE: Citus does not propagate adding/dropping member objects
|
|
||||||
HINT: You can add/drop the member objects on the workers as well.
|
|
||||||
ALTER EXTENSION plpgsql DROP TABLE rule_table_3;
|
ALTER EXTENSION plpgsql DROP TABLE rule_table_3;
|
||||||
NOTICE: Citus does not propagate adding/dropping member objects
|
NOTICE: Citus does not propagate adding/dropping member objects
|
||||||
HINT: You can add/drop the member objects on the workers as well.
|
HINT: You can add/drop the member objects on the workers as well.
|
||||||
|
@ -456,11 +437,9 @@ DETAIL: drop cascades to view undis_view1
|
||||||
drop cascades to view undis_view2
|
drop cascades to view undis_view2
|
||||||
drop cascades to view another_schema.undis_view3
|
drop cascades to view another_schema.undis_view3
|
||||||
DROP SCHEMA undistribute_table, another_schema CASCADE;
|
DROP SCHEMA undistribute_table, another_schema CASCADE;
|
||||||
NOTICE: drop cascades to 7 other objects
|
NOTICE: drop cascades to 5 other objects
|
||||||
DETAIL: drop cascades to table extension_table
|
DETAIL: drop cascades to table extension_table
|
||||||
drop cascades to view extension_view
|
drop cascades to view extension_view
|
||||||
drop cascades to table dist_type_table
|
drop cascades to table dist_type_table
|
||||||
drop cascades to table rule_table_2
|
|
||||||
drop cascades to view rule_table_1
|
|
||||||
drop cascades to table rule_table_3
|
drop cascades to table rule_table_3
|
||||||
drop cascades to table rule_table_4
|
drop cascades to table rule_table_4
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
--
|
--
|
||||||
-- Test chunk filtering in columnar using min/max values in stripe skip lists.
|
-- Test chunk filtering in columnar using min/max values in stripe skip lists.
|
||||||
--
|
--
|
||||||
|
-- It has an alternative test output file
|
||||||
|
-- because PG16 changed the order of some Filters in EXPLAIN
|
||||||
|
-- Relevant PG commit:
|
||||||
|
-- https://github.com/postgres/postgres/commit/2489d76c4906f4461a364ca8ad7e0751ead8aa0d
|
||||||
|
|
||||||
|
|
||||||
--
|
--
|
||||||
|
|
|
@ -77,10 +77,10 @@ SELECT CASE WHEN 1.0 * TopMemoryContext / :top_post BETWEEN 0.98 AND 1.03 THEN 1
|
||||||
FROM columnar_test_helpers.columnar_store_memory_stats();
|
FROM columnar_test_helpers.columnar_store_memory_stats();
|
||||||
|
|
||||||
-- before this change, max mem usage while executing inserts was 28MB and
|
-- before this change, max mem usage while executing inserts was 28MB and
|
||||||
-- with this change it's less than 8MB.
|
-- with this change it's less than 9MB.
|
||||||
SELECT
|
SELECT
|
||||||
(SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
|
(SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
|
||||||
(SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
|
(SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
|
||||||
|
|
||||||
\x
|
\x
|
||||||
|
|
||||||
|
|
|
@ -117,7 +117,7 @@ GRANT non_dist_role_4 TO dist_role_4;
|
||||||
|
|
||||||
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
|
SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
|
||||||
SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1;
|
SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1;
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
|
|
@ -611,7 +611,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
|
||||||
sum(cardinality),
|
sum(cardinality),
|
||||||
sum(sum)
|
sum(sum)
|
||||||
FROM source_table
|
FROM source_table
|
||||||
GROUP BY c1, c2, c3, c4, c5, c6
|
GROUP BY c1, c2, c3, c4, c6
|
||||||
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
||||||
DO UPDATE SET
|
DO UPDATE SET
|
||||||
cardinality = enriched.cardinality + excluded.cardinality,
|
cardinality = enriched.cardinality + excluded.cardinality,
|
||||||
|
@ -625,7 +625,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
|
||||||
sum(cardinality),
|
sum(cardinality),
|
||||||
sum(sum)
|
sum(sum)
|
||||||
FROM source_table
|
FROM source_table
|
||||||
GROUP BY c1, c2, c3, c4, c5, c6
|
GROUP BY c1, c2, c3, c4, c6
|
||||||
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
ON CONFLICT(c1, c2, c3, c4, c5, c6)
|
||||||
DO UPDATE SET
|
DO UPDATE SET
|
||||||
cardinality = enriched.cardinality + excluded.cardinality,
|
cardinality = enriched.cardinality + excluded.cardinality,
|
||||||
|
|
|
@ -336,12 +336,14 @@ FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
|
||||||
WHERE o_orderkey IN (1, 2)
|
WHERE o_orderkey IN (1, 2)
|
||||||
OR l_orderkey IN (2, 3);
|
OR l_orderkey IN (2, 3);
|
||||||
|
|
||||||
|
SELECT public.coordinator_plan($Q$
|
||||||
EXPLAIN (COSTS OFF)
|
EXPLAIN (COSTS OFF)
|
||||||
SELECT count(*)
|
SELECT count(*)
|
||||||
FROM orders_hash_partitioned
|
FROM orders_hash_partitioned
|
||||||
FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
|
FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
|
||||||
WHERE o_orderkey IN (1, 2)
|
WHERE o_orderkey IN (1, 2)
|
||||||
AND l_orderkey IN (2, 3);
|
AND l_orderkey IN (2, 3);
|
||||||
|
$Q$);
|
||||||
|
|
||||||
SET citus.task_executor_type TO DEFAULT;
|
SET citus.task_executor_type TO DEFAULT;
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ EXPLAIN (COSTS FALSE)
|
||||||
SELECT sum(l_extendedprice * l_discount) as revenue
|
SELECT sum(l_extendedprice * l_discount) as revenue
|
||||||
FROM lineitem_hash, orders_hash
|
FROM lineitem_hash, orders_hash
|
||||||
WHERE o_orderkey = l_orderkey
|
WHERE o_orderkey = l_orderkey
|
||||||
GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24
|
GROUP BY l_orderkey, l_shipmode HAVING sum(l_quantity) > 24
|
||||||
ORDER BY 1 DESC LIMIT 3;
|
ORDER BY 1 DESC LIMIT 3;
|
||||||
|
|
||||||
EXPLAIN (COSTS FALSE)
|
EXPLAIN (COSTS FALSE)
|
||||||
|
|
|
@ -226,14 +226,32 @@ RESET citus.enable_metadata_sync;
|
||||||
-- the shards and indexes do not show up
|
-- the shards and indexes do not show up
|
||||||
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
|
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
|
||||||
|
|
||||||
|
-- PG16 added one more backend type B_STANDALONE_BACKEND
|
||||||
|
-- and also alphabetized the backend types, hence the orders changed
|
||||||
|
-- Relevant PG commit:
|
||||||
|
-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690
|
||||||
|
SHOW server_version \gset
|
||||||
|
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
|
||||||
|
\gset
|
||||||
|
|
||||||
|
\if :server_version_ge_16
|
||||||
|
SELECT 4 AS client_backend \gset
|
||||||
|
SELECT 5 AS bgworker \gset
|
||||||
|
SELECT 12 AS walsender \gset
|
||||||
|
\else
|
||||||
|
SELECT 3 AS client_backend \gset
|
||||||
|
SELECT 4 AS bgworker \gset
|
||||||
|
SELECT 9 AS walsender \gset
|
||||||
|
\endif
|
||||||
|
|
||||||
-- say, we set it to bgworker
|
-- say, we set it to bgworker
|
||||||
-- the shards and indexes do not show up
|
-- the shards and indexes do not show up
|
||||||
SELECT set_backend_type(4);
|
SELECT set_backend_type(:bgworker);
|
||||||
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
|
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
|
||||||
|
|
||||||
-- or, we set it to walsender
|
-- or, we set it to walsender
|
||||||
-- the shards and indexes do not show up
|
-- the shards and indexes do not show up
|
||||||
SELECT set_backend_type(9);
|
SELECT set_backend_type(:walsender);
|
||||||
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
|
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
|
||||||
|
|
||||||
-- unless the application name starts with citus_shard
|
-- unless the application name starts with citus_shard
|
||||||
|
@ -242,7 +260,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
|
||||||
RESET application_name;
|
RESET application_name;
|
||||||
|
|
||||||
-- but, client backends to see the shards
|
-- but, client backends to see the shards
|
||||||
SELECT set_backend_type(3);
|
SELECT set_backend_type(:client_backend);
|
||||||
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
|
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -676,7 +676,7 @@ EXPLAIN (COSTS OFF)
|
||||||
SELECT count(*) FROM keyval1 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 GROUP BY key ORDER BY 1 DESC LIMIT 1);
|
SELECT count(*) FROM keyval1 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 GROUP BY key ORDER BY 1 DESC LIMIT 1);
|
||||||
|
|
||||||
EXPLAIN (COSTS OFF)
|
EXPLAIN (COSTS OFF)
|
||||||
SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 GROUP BY key ORDER BY 1 DESC LIMIT 1);
|
SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 ORDER BY 1 DESC LIMIT 1);
|
||||||
|
|
||||||
-- Simple join subquery pushdown
|
-- Simple join subquery pushdown
|
||||||
SELECT
|
SELECT
|
||||||
|
|
|
@ -242,11 +242,13 @@ COMMIT;
|
||||||
SELECT DISTINCT y FROM test;
|
SELECT DISTINCT y FROM test;
|
||||||
|
|
||||||
-- non deterministic collations
|
-- non deterministic collations
|
||||||
|
SET client_min_messages TO WARNING;
|
||||||
CREATE COLLATION test_pg12.case_insensitive (
|
CREATE COLLATION test_pg12.case_insensitive (
|
||||||
provider = icu,
|
provider = icu,
|
||||||
locale = '@colStrength=secondary',
|
locale = '@colStrength=secondary',
|
||||||
deterministic = false
|
deterministic = false
|
||||||
);
|
);
|
||||||
|
RESET client_min_messages;
|
||||||
|
|
||||||
CREATE TABLE col_test (
|
CREATE TABLE col_test (
|
||||||
id int,
|
id int,
|
||||||
|
|
|
@ -131,18 +131,6 @@ SELECT create_distributed_table('dist_type_table', 'a');
|
||||||
|
|
||||||
SELECT undistribute_table('dist_type_table');
|
SELECT undistribute_table('dist_type_table');
|
||||||
|
|
||||||
-- test CREATE RULE with ON SELECT
|
|
||||||
CREATE TABLE rule_table_1 (a INT);
|
|
||||||
CREATE TABLE rule_table_2 (a INT);
|
|
||||||
SELECT create_distributed_table('rule_table_2', 'a');
|
|
||||||
|
|
||||||
CREATE RULE "_RETURN" AS ON SELECT TO rule_table_1 DO INSTEAD SELECT * FROM rule_table_2;
|
|
||||||
|
|
||||||
-- the CREATE RULE turns rule_table_1 into a view
|
|
||||||
ALTER EXTENSION plpgsql ADD VIEW rule_table_1;
|
|
||||||
|
|
||||||
SELECT undistribute_table('rule_table_2');
|
|
||||||
|
|
||||||
-- test CREATE RULE without ON SELECT
|
-- test CREATE RULE without ON SELECT
|
||||||
CREATE TABLE rule_table_3 (a INT);
|
CREATE TABLE rule_table_3 (a INT);
|
||||||
CREATE TABLE rule_table_4 (a INT);
|
CREATE TABLE rule_table_4 (a INT);
|
||||||
|
@ -155,7 +143,6 @@ ALTER EXTENSION plpgsql ADD TABLE rule_table_3;
|
||||||
SELECT undistribute_table('rule_table_4');
|
SELECT undistribute_table('rule_table_4');
|
||||||
|
|
||||||
ALTER EXTENSION plpgsql DROP VIEW extension_view;
|
ALTER EXTENSION plpgsql DROP VIEW extension_view;
|
||||||
ALTER EXTENSION plpgsql DROP VIEW rule_table_1;
|
|
||||||
ALTER EXTENSION plpgsql DROP TABLE rule_table_3;
|
ALTER EXTENSION plpgsql DROP TABLE rule_table_3;
|
||||||
|
|
||||||
DROP TABLE view_table CASCADE;
|
DROP TABLE view_table CASCADE;
|
||||||
|
|
Loading…
Reference in New Issue