Merge remote-tracking branch 'origin/main' into circleci-gha-migration

pull/7154/head
Gokhan Gulbiz 2023-08-17 12:51:03 +03:00
commit e5b55814b3
No known key found for this signature in database
GPG Key ID: 608EF06B6BD1B45B
102 changed files with 6681 additions and 654 deletions

View File

@ -6,16 +6,16 @@ orbs:
parameters:
image_suffix:
type: string
default: '-vbab548a'
default: '-v1b94240'
pg14_version:
type: string
default: '14.8'
default: '14.9'
pg15_version:
type: string
default: '15.3'
default: '15.4'
upgrade_pg_versions:
type: string
default: '14.8-15.3'
default: '14.9-15.4'
style_checker_tools_version:
type: string
default: '0.8.18'

View File

@ -819,12 +819,15 @@ GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options)
grantRoleStmt->grantee_roles = list_make1(roleSpec);
}
#if PG_VERSION_NUM < PG_VERSION_16
if (strcmp(option->defname, "adminmembers") == 0)
{
#if PG_VERSION_NUM >= PG_VERSION_16
DefElem *opt = makeDefElem("admin", (Node *) makeBoolean(true), -1);
grantRoleStmt->opt = list_make1(opt);
#else
grantRoleStmt->admin_opt = true;
}
#endif
}
stmts = lappend(stmts, grantRoleStmt);
}
@ -871,7 +874,13 @@ GenerateGrantRoleStmtsOfRole(Oid roleid)
grantRoleStmt->grantor = NULL;
#if PG_VERSION_NUM < PG_VERSION_16
#if PG_VERSION_NUM >= PG_VERSION_16
if (membership->admin_option)
{
DefElem *opt = makeDefElem("admin", (Node *) makeBoolean(true), -1);
grantRoleStmt->opt = list_make1(opt);
}
#else
grantRoleStmt->admin_opt = membership->admin_option;
#endif

View File

@ -42,6 +42,9 @@ typedef struct CitusVacuumParams
VacOptValue truncate;
VacOptValue index_cleanup;
int nworkers;
#if PG_VERSION_NUM >= PG_VERSION_16
int ring_size;
#endif
} CitusVacuumParams;
/* Local functions forward declarations for processing distributed table commands */
@ -318,7 +321,19 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
}
/* if no flags remain, exit early */
if (vacuumFlags == 0 &&
#if PG_VERSION_NUM >= PG_VERSION_16
if (vacuumFlags & VACOPT_PROCESS_TOAST &&
vacuumFlags & VACOPT_PROCESS_MAIN)
{
/* process toast and process main are true by default */
if (((vacuumFlags & ~VACOPT_PROCESS_TOAST) & ~VACOPT_PROCESS_MAIN) == 0 &&
vacuumParams.ring_size == -1 &&
#else
if (vacuumFlags & VACOPT_PROCESS_TOAST)
{
/* process toast is true by default */
if ((vacuumFlags & ~VACOPT_PROCESS_TOAST) == 0 &&
#endif
vacuumParams.truncate == VACOPTVALUE_UNSPECIFIED &&
vacuumParams.index_cleanup == VACOPTVALUE_UNSPECIFIED &&
vacuumParams.nworkers == VACUUM_PARALLEL_NOTSET
@ -326,6 +341,7 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
{
return vacuumPrefix->data;
}
}
/* otherwise, handle options */
appendStringInfoChar(vacuumPrefix, '(');
@ -360,11 +376,33 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
appendStringInfoString(vacuumPrefix, "SKIP_LOCKED,");
}
if (vacuumFlags & VACOPT_PROCESS_TOAST)
if (!(vacuumFlags & VACOPT_PROCESS_TOAST))
{
appendStringInfoString(vacuumPrefix, "PROCESS_TOAST,");
appendStringInfoString(vacuumPrefix, "PROCESS_TOAST FALSE,");
}
#if PG_VERSION_NUM >= PG_VERSION_16
if (!(vacuumFlags & VACOPT_PROCESS_MAIN))
{
appendStringInfoString(vacuumPrefix, "PROCESS_MAIN FALSE,");
}
if (vacuumFlags & VACOPT_SKIP_DATABASE_STATS)
{
appendStringInfoString(vacuumPrefix, "SKIP_DATABASE_STATS,");
}
if (vacuumFlags & VACOPT_ONLY_DATABASE_STATS)
{
appendStringInfoString(vacuumPrefix, "ONLY_DATABASE_STATS,");
}
if (vacuumParams.ring_size != -1)
{
appendStringInfo(vacuumPrefix, "BUFFER_USAGE_LIMIT %d,", vacuumParams.ring_size);
}
#endif
if (vacuumParams.truncate != VACOPTVALUE_UNSPECIFIED)
{
appendStringInfoString(vacuumPrefix,
@ -499,7 +537,14 @@ VacuumStmtParams(VacuumStmt *vacstmt)
bool freeze = false;
bool full = false;
bool disable_page_skipping = false;
bool process_toast = false;
bool process_toast = true;
#if PG_VERSION_NUM >= PG_VERSION_16
bool process_main = true;
bool skip_database_stats = false;
bool only_database_stats = false;
params.ring_size = -1;
#endif
/* Set default value */
params.index_cleanup = VACOPTVALUE_UNSPECIFIED;
@ -519,6 +564,13 @@ VacuumStmtParams(VacuumStmt *vacstmt)
{
skip_locked = defGetBoolean(opt);
}
#if PG_VERSION_NUM >= PG_VERSION_16
else if (strcmp(opt->defname, "buffer_usage_limit") == 0)
{
char *vac_buffer_size = defGetString(opt);
parse_int(vac_buffer_size, &params.ring_size, GUC_UNIT_KB, NULL);
}
#endif
else if (!vacstmt->is_vacuumcmd)
{
ereport(ERROR,
@ -543,6 +595,20 @@ VacuumStmtParams(VacuumStmt *vacstmt)
{
disable_page_skipping = defGetBoolean(opt);
}
#if PG_VERSION_NUM >= PG_VERSION_16
else if (strcmp(opt->defname, "process_main") == 0)
{
process_main = defGetBoolean(opt);
}
else if (strcmp(opt->defname, "skip_database_stats") == 0)
{
skip_database_stats = defGetBoolean(opt);
}
else if (strcmp(opt->defname, "only_database_stats") == 0)
{
only_database_stats = defGetBoolean(opt);
}
#endif
else if (strcmp(opt->defname, "process_toast") == 0)
{
process_toast = defGetBoolean(opt);
@ -613,6 +679,11 @@ VacuumStmtParams(VacuumStmt *vacstmt)
(analyze ? VACOPT_ANALYZE : 0) |
(freeze ? VACOPT_FREEZE : 0) |
(full ? VACOPT_FULL : 0) |
#if PG_VERSION_NUM >= PG_VERSION_16
(process_main ? VACOPT_PROCESS_MAIN : 0) |
(skip_database_stats ? VACOPT_SKIP_DATABASE_STATS : 0) |
(only_database_stats ? VACOPT_ONLY_DATABASE_STATS : 0) |
#endif
(process_toast ? VACOPT_PROCESS_TOAST : 0) |
(disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0);
return params;

View File

@ -17,6 +17,7 @@
#include "distributed/citus_ruleutils.h"
#include "distributed/deparser.h"
#include "distributed/listutils.h"
#include "lib/stringinfo.h"
#include "nodes/parsenodes.h"
#include "utils/builtins.h"
@ -349,7 +350,20 @@ AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt)
{
appendStringInfo(buf, "%s ", stmt->is_grant ? "GRANT" : "REVOKE");
#if PG_VERSION_NUM < PG_VERSION_16
#if PG_VERSION_NUM >= PG_VERSION_16
if (!stmt->is_grant)
{
DefElem *opt = NULL;
foreach_ptr(opt, stmt->opt)
{
if (strcmp(opt->defname, "admin") == 0)
{
appendStringInfo(buf, "ADMIN OPTION FOR ");
break;
}
}
}
#else
if (!stmt->is_grant && stmt->admin_opt)
{
appendStringInfo(buf, "ADMIN OPTION FOR ");
@ -364,7 +378,17 @@ AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt)
if (stmt->is_grant)
{
#if PG_VERSION_NUM < PG_VERSION_16
#if PG_VERSION_NUM >= PG_VERSION_16
DefElem *opt = NULL;
foreach_ptr(opt, stmt->opt)
{
if (strcmp(opt->defname, "admin") == 0)
{
appendStringInfo(buf, " WITH ADMIN OPTION");
break;
}
}
#else
if (stmt->admin_opt)
{
appendStringInfo(buf, " WITH ADMIN OPTION");

View File

@ -1022,13 +1022,12 @@ GetUndistributableDependency(const ObjectAddress *objectAddress)
if (!SupportedDependencyByCitus(dependency))
{
/*
* Skip roles and text search templates.
*
* Roles should be handled manually with Citus community whereas text search
* templates should be handled manually in both community and enterprise
* Since we do not yet support distributed TS TEMPLATE and AM objects, we skip
* dependency checks for text search templates. The user is expected to
* manually create the TS TEMPLATE and AM objects.
*/
if (getObjectClass(dependency) != OCLASS_ROLE &&
getObjectClass(dependency) != OCLASS_TSTEMPLATE)
if (getObjectClass(dependency) != OCLASS_TSTEMPLATE &&
getObjectClass(dependency) != OCLASS_AM)
{
return dependency;
}

View File

@ -358,6 +358,11 @@ ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte)
subquery->jointree = joinTree;
rte->rtekind = RTE_SUBQUERY;
#if PG_VERSION_NUM >= PG_VERSION_16
/* no permission checking for this RTE */
rte->perminfoindex = 0;
#endif
rte->subquery = subquery;
rte->alias = copyObject(rte->eref);
}

View File

@ -56,6 +56,9 @@
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "nodes/pg_list.h"
#if PG_VERSION_NUM >= PG_VERSION_16
#include "parser/parse_relation.h"
#endif
#include "parser/parsetree.h"
#include "parser/parse_type.h"
#include "optimizer/optimizer.h"
@ -108,6 +111,7 @@ static int AssignRTEIdentities(List *rangeTableList, int rteIdCounter);
static void AssignRTEIdentity(RangeTblEntry *rangeTableEntry, int rteIdentifier);
static void AdjustPartitioningForDistributedPlanning(List *rangeTableList,
bool setPartitionedTablesInherited);
static bool RTEWentThroughAdjustPartitioning(RangeTblEntry *rangeTableEntry);
static PlannedStmt * FinalizeNonRouterPlan(PlannedStmt *localPlan,
DistributedPlan *distributedPlan,
CustomScan *customScan);
@ -144,6 +148,8 @@ static void WarnIfListHasForeignDistributedTable(List *rangeTableList);
static RouterPlanType GetRouterPlanType(Query *query,
Query *originalQuery,
bool hasUnresolvedParams);
static void ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan,
PlannedStmt *concatPlan);
/* Distributed planner hook */
@ -494,6 +500,20 @@ AdjustPartitioningForDistributedPlanning(List *rangeTableList,
}
/*
* RTEWentThroughAdjustPartitioning returns true if the given rangetableentry
* has been modified through AdjustPartitioningForDistributedPlanning
* function, false otherwise.
*/
static bool
RTEWentThroughAdjustPartitioning(RangeTblEntry *rangeTableEntry)
{
return (rangeTableEntry->rtekind == RTE_RELATION &&
PartitionedTable(rangeTableEntry->relid) &&
rangeTableEntry->inh == false);
}
/*
* AssignRTEIdentity assigns the given rteIdentifier to the given range table
* entry.
@ -1066,6 +1086,11 @@ CreateDistributedPlan(uint64 planId, bool allowRecursivePlanning, Query *origina
/*
* Plan subqueries and CTEs that cannot be pushed down by recursively
* calling the planner and return the resulting plans to subPlanList.
* Note that GenerateSubplansForSubqueriesAndCTEs will reset perminfoindexes
* for some RTEs in originalQuery->rtable list, while not changing
* originalQuery->rteperminfos. That's fine because we will go through
* standard_planner again, which will adjust things accordingly in
* set_plan_references>add_rtes_to_flat_rtable>add_rte_to_flat_rtable.
*/
List *subPlanList = GenerateSubplansForSubqueriesAndCTEs(planId, originalQuery,
plannerRestrictionContext);
@ -1465,12 +1490,42 @@ FinalizeNonRouterPlan(PlannedStmt *localPlan, DistributedPlan *distributedPlan,
finalPlan->utilityStmt = localPlan->utilityStmt;
/* add original range table list for access permission checks */
finalPlan->rtable = list_concat(finalPlan->rtable, localPlan->rtable);
ConcatenateRTablesAndPerminfos(finalPlan, localPlan);
return finalPlan;
}
static void
ConcatenateRTablesAndPerminfos(PlannedStmt *mainPlan, PlannedStmt *concatPlan)
{
mainPlan->rtable = list_concat(mainPlan->rtable, concatPlan->rtable);
#if PG_VERSION_NUM >= PG_VERSION_16
/*
* concatPlan's range table list is concatenated to mainPlan's range table list
* therefore all the perminfoindexes should be updated to their value
* PLUS the highest perminfoindex in mainPlan's perminfos, which is exactly
* the list length.
*/
int mainPlan_highest_perminfoindex = list_length(mainPlan->permInfos);
ListCell *lc;
foreach(lc, concatPlan->rtable)
{
RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
if (rte->perminfoindex != 0)
{
rte->perminfoindex = rte->perminfoindex + mainPlan_highest_perminfoindex;
}
}
/* finally, concatenate perminfos as well */
mainPlan->permInfos = list_concat(mainPlan->permInfos, concatPlan->permInfos);
#endif
}
/*
* FinalizeRouterPlan gets a CustomScan node which already wrapped distributed
* part of a router plan and sets it as the direct child of the router plan
@ -1502,7 +1557,7 @@ FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan)
routerPlan->rtable = list_make1(remoteScanRangeTableEntry);
/* add original range table list for access permission checks */
routerPlan->rtable = list_concat(routerPlan->rtable, localPlan->rtable);
ConcatenateRTablesAndPerminfos(routerPlan, localPlan);
routerPlan->canSetTag = true;
routerPlan->relationOids = NIL;
@ -1976,6 +2031,62 @@ multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
}
/*
* multi_get_relation_info_hook modifies the relation's indexlist
* if necessary, to avoid a crash in PG16 caused by our
* Citus function AdjustPartitioningForDistributedPlanning().
*
* AdjustPartitioningForDistributedPlanning() is a hack that we use
* to prevent Postgres' standard_planner() to expand all the partitions
* for the distributed planning when a distributed partitioned table
* is queried. It is required for both correctness and performance
* reasons. Although we can eliminate the use of the function for
* the correctness (e.g., make sure that rest of the planner can handle
* partitions), it's performance implication is hard to avoid. Certain
* planning logic of Citus (such as router or query pushdown) relies
* heavily on the relationRestrictionList. If
* AdjustPartitioningForDistributedPlanning() is removed, all the
* partitions show up in the relationRestrictionList, causing high
* planning times for such queries.
*/
void
multi_get_relation_info_hook(PlannerInfo *root, Oid relationObjectId, bool inhparent,
RelOptInfo *rel)
{
if (!CitusHasBeenLoaded())
{
return;
}
Index varno = rel->relid;
RangeTblEntry *rangeTableEntry = planner_rt_fetch(varno, root);
if (RTEWentThroughAdjustPartitioning(rangeTableEntry))
{
ListCell *lc = NULL;
foreach(lc, rel->indexlist)
{
IndexOptInfo *indexOptInfo = (IndexOptInfo *) lfirst(lc);
if (get_rel_relkind(indexOptInfo->indexoid) == RELKIND_PARTITIONED_INDEX)
{
/*
* Normally, we should not need this. However, the combination of
* Postgres commit 3c569049b7b502bb4952483d19ce622ff0af5fd6 and
* Citus function AdjustPartitioningForDistributedPlanning()
* forces us to do this. The commit expects partitioned indexes
* to belong to relations with "inh" flag set properly. Whereas, the
* function overrides "inh" flag. To avoid a crash,
* we go over the list of indexinfos and remove all partitioned indexes.
* Partitioned indexes were ignored pre PG16 anyway, we are essentially
* not breaking any logic.
*/
rel->indexlist = foreach_delete_current(rel->indexlist, lc);
}
}
}
}
/*
* TranslatedVars deep copies the translated vars for the given relation index
* if there is any append rel list.

View File

@ -136,6 +136,9 @@ GeneratePlaceHolderPlannedStmt(Query *parse)
result->stmt_len = parse->stmt_len;
result->rtable = copyObject(parse->rtable);
#if PG_VERSION_NUM >= PG_VERSION_16
result->permInfos = copyObject(parse->rteperminfos);
#endif
result->planTree = (Plan *) plan;
result->hasReturning = (parse->returningList != NIL);

View File

@ -604,6 +604,22 @@ CreateCombineQueryForRouterPlan(DistributedPlan *distPlan)
combineQuery->querySource = QSRC_ORIGINAL;
combineQuery->canSetTag = true;
combineQuery->rtable = list_make1(rangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
/*
* This part of the code is more of a sanity check for readability,
* it doesn't really do anything.
* We know that Only relation RTEs and subquery RTEs that were once relation
* RTEs (views) have their perminfoindex set. (see ExecCheckPermissions function)
* DerivedRangeTableEntry sets the rtekind to RTE_FUNCTION
* Hence we should have no perminfos here.
*/
Assert(rangeTableEntry->rtekind == RTE_FUNCTION &&
rangeTableEntry->perminfoindex == 0);
combineQuery->rteperminfos = NIL;
#endif
combineQuery->targetList = targetList;
combineQuery->jointree = joinTree;
return combineQuery;
@ -1533,6 +1549,20 @@ WrapSubquery(Query *subquery)
selectAlias, false, true));
outerQuery->rtable = list_make1(newRangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
/*
* This part of the code is more of a sanity check for readability,
* it doesn't really do anything.
* addRangeTableEntryForSubquery doesn't add permission info
* because the range table is set to be RTE_SUBQUERY.
* Hence we should also have no perminfos here.
*/
Assert(newRangeTableEntry->rtekind == RTE_SUBQUERY &&
newRangeTableEntry->perminfoindex == 0);
outerQuery->rteperminfos = NIL;
#endif
/* set the FROM expression to the subquery */
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
newRangeTableRef->rtindex = 1;

View File

@ -107,6 +107,7 @@
#include "optimizer/optimizer.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
@ -136,6 +137,9 @@ typedef struct RangeTableEntryDetails
RangeTblEntry *rangeTableEntry;
List *requiredAttributeNumbers;
bool hasConstantFilterOnUniqueColumn;
#if PG_VERSION_NUM >= PG_VERSION_16
RTEPermissionInfo *perminfo;
#endif
} RangeTableEntryDetails;
/*
@ -176,7 +180,8 @@ static bool HasConstantFilterOnUniqueColumn(RangeTblEntry *rangeTableEntry,
static ConversionCandidates * CreateConversionCandidates(PlannerRestrictionContext *
plannerRestrictionContext,
List *rangeTableList,
int resultRTEIdentity);
int resultRTEIdentity,
List *rteperminfos);
static void AppendUniqueIndexColumnsToList(Form_pg_index indexForm, List **uniqueIndexes,
int flags);
static ConversionChoice GetConversionChoice(ConversionCandidates *
@ -205,10 +210,17 @@ RecursivelyPlanLocalTableJoins(Query *query,
GetPlannerRestrictionContext(context);
List *rangeTableList = query->rtable;
#if PG_VERSION_NUM >= PG_VERSION_16
List *rteperminfos = query->rteperminfos;
#endif
int resultRTEIdentity = ResultRTEIdentity(query);
ConversionCandidates *conversionCandidates =
CreateConversionCandidates(plannerRestrictionContext,
rangeTableList, resultRTEIdentity);
#if PG_VERSION_NUM >= PG_VERSION_16
rangeTableList, resultRTEIdentity, rteperminfos);
#else
rangeTableList, resultRTEIdentity, NIL);
#endif
ConversionChoice conversionChoise =
GetConversionChoice(conversionCandidates, plannerRestrictionContext);
@ -323,7 +335,12 @@ ConvertRTEsToSubquery(List *rangeTableEntryDetailsList, RecursivePlanningContext
RangeTblEntry *rangeTableEntry = rangeTableEntryDetails->rangeTableEntry;
List *requiredAttributeNumbers = rangeTableEntryDetails->requiredAttributeNumbers;
ReplaceRTERelationWithRteSubquery(rangeTableEntry,
requiredAttributeNumbers, context);
#if PG_VERSION_NUM >= PG_VERSION_16
requiredAttributeNumbers, context,
rangeTableEntryDetails->perminfo);
#else
requiredAttributeNumbers, context, NULL);
#endif
}
}
@ -530,7 +547,9 @@ RequiredAttrNumbersForRelationInternal(Query *queryToProcess, int rteIndex)
*/
static ConversionCandidates *
CreateConversionCandidates(PlannerRestrictionContext *plannerRestrictionContext,
List *rangeTableList, int resultRTEIdentity)
List *rangeTableList,
int resultRTEIdentity,
List *rteperminfos)
{
ConversionCandidates *conversionCandidates =
palloc0(sizeof(ConversionCandidates));
@ -564,6 +583,14 @@ CreateConversionCandidates(PlannerRestrictionContext *plannerRestrictionContext,
RequiredAttrNumbersForRelation(rangeTableEntry, plannerRestrictionContext);
rangeTableEntryDetails->hasConstantFilterOnUniqueColumn =
HasConstantFilterOnUniqueColumn(rangeTableEntry, relationRestriction);
#if PG_VERSION_NUM >= PG_VERSION_16
rangeTableEntryDetails->perminfo = NULL;
if (rangeTableEntry->perminfoindex)
{
rangeTableEntryDetails->perminfo = getRTEPermissionInfo(rteperminfos,
rangeTableEntry);
}
#endif
bool referenceOrDistributedTable =
IsCitusTableType(rangeTableEntry->relid, REFERENCE_TABLE) ||

View File

@ -15,6 +15,7 @@
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/optimizer.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "tcop/tcopprot.h"
#include "utils/lsyscache.h"
@ -777,6 +778,11 @@ ConvertCteRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte)
Query *cteQuery = (Query *) copyObject(sourceCte->ctequery);
sourceRte->rtekind = RTE_SUBQUERY;
#if PG_VERSION_NUM >= PG_VERSION_16
/* sanity check - sourceRte was RTE_CTE previously so it should have no perminfo */
Assert(sourceRte->perminfoindex == 0);
#endif
/*
* As we are delinking the CTE from main query, we have to walk through the
@ -827,6 +833,20 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
RangeTblEntry *newRangeTableEntry = copyObject(sourceRte);
sourceResultsQuery->rtable = list_make1(newRangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
sourceResultsQuery->rteperminfos = NIL;
if (sourceRte->perminfoindex)
{
/* create permission info for newRangeTableEntry */
RTEPermissionInfo *perminfo = getRTEPermissionInfo(mergeQuery->rteperminfos,
sourceRte);
/* update the sourceResultsQuery's rteperminfos accordingly */
newRangeTableEntry->perminfoindex = 1;
sourceResultsQuery->rteperminfos = list_make1(perminfo);
}
#endif
/* set the FROM expression to the subquery */
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;
sourceResultsQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
@ -852,6 +872,9 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte,
/* replace the function with the constructed subquery */
sourceRte->rtekind = RTE_SUBQUERY;
#if PG_VERSION_NUM >= PG_VERSION_16
sourceRte->perminfoindex = 0;
#endif
sourceRte->subquery = sourceResultsQuery;
sourceRte->inh = false;
}

View File

@ -83,7 +83,16 @@ CreateColocatedJoinChecker(Query *subquery, PlannerRestrictionContext *restricti
* functions (i.e., FilterPlannerRestrictionForQuery()) rely on queries
* not relations.
*/
anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL);
#if PG_VERSION_NUM >= PG_VERSION_16
RTEPermissionInfo *perminfo = NULL;
if (anchorRangeTblEntry->perminfoindex)
{
perminfo = getRTEPermissionInfo(subquery->rteperminfos, anchorRangeTblEntry);
}
anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL, perminfo);
#else
anchorSubquery = WrapRteRelationIntoSubquery(anchorRangeTblEntry, NIL, NULL);
#endif
}
else if (anchorRangeTblEntry->rtekind == RTE_SUBQUERY)
{
@ -266,7 +275,9 @@ SubqueryColocated(Query *subquery, ColocatedJoinChecker *checker)
* designed for generating a stub query.
*/
Query *
WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation, List *requiredAttributes)
WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation,
List *requiredAttributes,
RTEPermissionInfo *perminfo)
{
Query *subquery = makeNode(Query);
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
@ -277,6 +288,14 @@ WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation, List *requiredAttributes
RangeTblEntry *newRangeTableEntry = copyObject(rteRelation);
subquery->rtable = list_make1(newRangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
if (perminfo)
{
newRangeTableEntry->perminfoindex = 1;
subquery->rteperminfos = list_make1(perminfo);
}
#endif
/* set the FROM expression to the subquery */
newRangeTableRef = makeNode(RangeTblRef);
newRangeTableRef->rtindex = SINGLE_RTE_INDEX;

View File

@ -1915,6 +1915,9 @@ SubqueryPushdownMultiNodeTree(Query *originalQuery)
pushedDownQuery->targetList = subqueryTargetEntryList;
pushedDownQuery->jointree = copyObject(queryTree->jointree);
pushedDownQuery->rtable = copyObject(queryTree->rtable);
#if PG_VERSION_NUM >= PG_VERSION_16
pushedDownQuery->rteperminfos = copyObject(queryTree->rteperminfos);
#endif
pushedDownQuery->setOperations = copyObject(queryTree->setOperations);
pushedDownQuery->querySource = queryTree->querySource;
pushedDownQuery->hasSubLinks = queryTree->hasSubLinks;

View File

@ -80,6 +80,7 @@
#include "optimizer/optimizer.h"
#include "optimizer/planner.h"
#include "optimizer/prep.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
@ -886,8 +887,19 @@ RecursivelyPlanDistributedJoinNode(Node *node, Query *query,
List *requiredAttributes =
RequiredAttrNumbersForRelation(distributedRte, restrictionContext);
#if PG_VERSION_NUM >= PG_VERSION_16
RTEPermissionInfo *perminfo = NULL;
if (distributedRte->perminfoindex)
{
perminfo = getRTEPermissionInfo(query->rteperminfos, distributedRte);
}
ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes,
recursivePlanningContext);
recursivePlanningContext, perminfo);
#else
ReplaceRTERelationWithRteSubquery(distributedRte, requiredAttributes,
recursivePlanningContext, NULL);
#endif
}
else if (distributedRte->rtekind == RTE_SUBQUERY)
{
@ -1751,9 +1763,11 @@ NodeContainsSubqueryReferencingOuterQuery(Node *node)
void
ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
List *requiredAttrNumbers,
RecursivePlanningContext *context)
RecursivePlanningContext *context,
RTEPermissionInfo *perminfo)
{
Query *subquery = WrapRteRelationIntoSubquery(rangeTableEntry, requiredAttrNumbers);
Query *subquery = WrapRteRelationIntoSubquery(rangeTableEntry, requiredAttrNumbers,
perminfo);
List *outerQueryTargetList = CreateAllTargetListForRelation(rangeTableEntry->relid,
requiredAttrNumbers);
@ -1778,6 +1792,9 @@ ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
/* replace the function with the constructed subquery */
rangeTableEntry->rtekind = RTE_SUBQUERY;
#if PG_VERSION_NUM >= PG_VERSION_16
rangeTableEntry->perminfoindex = 0;
#endif
rangeTableEntry->subquery = subquery;
/*
@ -1850,6 +1867,15 @@ CreateOuterSubquery(RangeTblEntry *rangeTableEntry, List *outerSubqueryTargetLis
innerSubqueryRTE->eref->colnames = innerSubqueryColNames;
outerSubquery->rtable = list_make1(innerSubqueryRTE);
#if PG_VERSION_NUM >= PG_VERSION_16
/* sanity check */
Assert(innerSubqueryRTE->rtekind == RTE_SUBQUERY &&
innerSubqueryRTE->perminfoindex == 0);
outerSubquery->rteperminfos = NIL;
#endif
/* set the FROM expression to the subquery */
RangeTblRef *newRangeTableRef = makeNode(RangeTblRef);
newRangeTableRef->rtindex = 1;
@ -2022,6 +2048,15 @@ TransformFunctionRTE(RangeTblEntry *rangeTblEntry)
/* set the FROM expression to the subquery */
subquery->rtable = list_make1(newRangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
/* sanity check */
Assert(newRangeTableEntry->rtekind == RTE_FUNCTION &&
newRangeTableEntry->perminfoindex == 0);
subquery->rteperminfos = NIL;
#endif
newRangeTableRef->rtindex = 1;
subquery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL);
@ -2392,6 +2427,9 @@ BuildReadIntermediateResultsQuery(List *targetEntryList, List *columnAliasList,
Query *resultQuery = makeNode(Query);
resultQuery->commandType = CMD_SELECT;
resultQuery->rtable = list_make1(rangeTableEntry);
#if PG_VERSION_NUM >= PG_VERSION_16
resultQuery->rteperminfos = NIL;
#endif
resultQuery->jointree = joinTree;
resultQuery->targetList = targetList;

View File

@ -104,6 +104,7 @@
#include "replication/walsender.h"
#include "storage/ipc.h"
#include "optimizer/planner.h"
#include "optimizer/plancat.h"
#include "optimizer/paths.h"
#include "tcop/tcopprot.h"
#include "utils/guc.h"
@ -452,6 +453,7 @@ _PG_init(void)
/* register for planner hook */
set_rel_pathlist_hook = multi_relation_restriction_hook;
get_relation_info_hook = multi_get_relation_info_hook;
set_join_pathlist_hook = multi_join_restriction_hook;
ExecutorStart_hook = CitusExecutorStart;
ExecutorRun_hook = CitusExecutorRun;

View File

@ -106,6 +106,15 @@ BEGIN
-- restore citus catalog tables
--
INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition;
-- if we are upgrading from PG14/PG15 to PG16+,
-- we need to regenerate the partkeys because they will include varnullingrels as well.
UPDATE pg_catalog.pg_dist_partition
SET partkey = column_name_to_column(pg_dist_partkeys_pre_16_upgrade.logicalrelid, col_name)
FROM public.pg_dist_partkeys_pre_16_upgrade
WHERE pg_dist_partkeys_pre_16_upgrade.logicalrelid = pg_dist_partition.logicalrelid;
DROP TABLE public.pg_dist_partkeys_pre_16_upgrade;
INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;

View File

@ -106,6 +106,15 @@ BEGIN
-- restore citus catalog tables
--
INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition;
-- if we are upgrading from PG14/PG15 to PG16+,
-- we need to regenerate the partkeys because they will include varnullingrels as well.
UPDATE pg_catalog.pg_dist_partition
SET partkey = column_name_to_column(pg_dist_partkeys_pre_16_upgrade.logicalrelid, col_name)
FROM public.pg_dist_partkeys_pre_16_upgrade
WHERE pg_dist_partkeys_pre_16_upgrade.logicalrelid = pg_dist_partition.logicalrelid;
DROP TABLE public.pg_dist_partkeys_pre_16_upgrade;
INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard;
INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement;
INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata;

View File

@ -84,6 +84,13 @@ BEGIN
objects.colocationid
FROM pg_catalog.pg_dist_object objects,
pg_catalog.pg_identify_object_as_address(objects.classid, objects.objid, objects.objsubid) address;
-- if we are upgrading from PG14/PG15 to PG16+,
-- we will need to regenerate the partkeys because they will include varnullingrels as well.
-- so we save the partkeys as column names here
CREATE TABLE IF NOT EXISTS public.pg_dist_partkeys_pre_16_upgrade AS
SELECT logicalrelid, column_to_column_name(logicalrelid, partkey) as col_name
FROM pg_catalog.pg_dist_partition WHERE partkey IS NOT NULL AND partkey NOT ILIKE '%varnullingrels%';
END;
$cppu$;

View File

@ -84,6 +84,13 @@ BEGIN
objects.colocationid
FROM pg_catalog.pg_dist_object objects,
pg_catalog.pg_identify_object_as_address(objects.classid, objects.objid, objects.objsubid) address;
-- if we are upgrading from PG14/PG15 to PG16+,
-- we will need to regenerate the partkeys because they will include varnullingrels as well.
-- so we save the partkeys as column names here
CREATE TABLE IF NOT EXISTS public.pg_dist_partkeys_pre_16_upgrade AS
SELECT logicalrelid, column_to_column_name(logicalrelid, partkey) as col_name
FROM pg_catalog.pg_dist_partition WHERE partkey IS NOT NULL AND partkey NOT ILIKE '%varnullingrels%';
END;
$cppu$;

View File

@ -45,9 +45,6 @@ RelationGetNamespaceName(Relation relation)
* we are dealing with GetUserId().
* Currently the following entries are filled like this:
* perminfo->checkAsUser = GetUserId();
* perminfo->selectedCols = NULL;
* perminfo->insertedCols = NULL;
* perminfo->updatedCols = NULL;
*/
RTEPermissionInfo *
GetFilledPermissionInfo(Oid relid, bool inh, AclMode requiredPerms)
@ -57,9 +54,6 @@ GetFilledPermissionInfo(Oid relid, bool inh, AclMode requiredPerms)
perminfo->inh = inh;
perminfo->requiredPerms = requiredPerms;
perminfo->checkAsUser = GetUserId();
perminfo->selectedCols = NULL;
perminfo->insertedCols = NULL;
perminfo->updatedCols = NULL;
return perminfo;
}

View File

@ -234,6 +234,8 @@ extern List * TranslatedVarsForRteIdentity(int rteIdentity);
extern struct DistributedPlan * GetDistributedPlan(CustomScan *node);
extern void multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo,
Index restrictionIndex, RangeTblEntry *rte);
extern void multi_get_relation_info_hook(PlannerInfo *root, Oid relationObjectId, bool
inhparent, RelOptInfo *rel);
extern void multi_join_restriction_hook(PlannerInfo *root,
RelOptInfo *joinrel,
RelOptInfo *outerrel,

View File

@ -35,7 +35,8 @@ extern ColocatedJoinChecker CreateColocatedJoinChecker(Query *subquery,
restrictionContext);
extern bool SubqueryColocated(Query *subquery, ColocatedJoinChecker *context);
extern Query * WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation,
List *requiredAttributes);
List *requiredAttributes,
RTEPermissionInfo *perminfo);
extern List * CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes);
#endif /* QUERY_COLOCATION_CHECKER_H */

View File

@ -42,7 +42,8 @@ extern bool GeneratingSubplans(void);
extern bool ContainsLocalTableDistributedTableJoin(List *rangeTableList);
extern void ReplaceRTERelationWithRteSubquery(RangeTblEntry *rangeTableEntry,
List *requiredAttrNumbers,
RecursivePlanningContext *context);
RecursivePlanningContext *context,
RTEPermissionInfo *perminfo);
extern bool IsRecursivelyPlannableRelation(RangeTblEntry *rangeTableEntry);
extern bool IsRelationLocalTableOrMatView(Oid relationId);
extern bool ContainsReferencesToOuterQuery(Query *query);

View File

@ -144,6 +144,13 @@ object_aclcheck(Oid classid, Oid objectid, Oid roleid, AclMode mode)
typedef bool TU_UpdateIndexes;
/*
* we define RTEPermissionInfo for PG16 compatibility
* There are some functions that need to include RTEPermissionInfo in their signature
* for PG14/PG15 we pass a NULL argument in these functions
*/
typedef RangeTblEntry RTEPermissionInfo;
#endif
#if PG_VERSION_NUM >= PG_VERSION_15

View File

@ -21,7 +21,6 @@
"sha256:4ef1ab46b484e3c706329cedeff284a5d40824200638503f5768edb6de7d58e9",
"sha256:ffc141aa908e6f175673e7b1b3b7af4fdb0ecb738fc5c8b88f69f055c2415214"
],
"markers": "python_version >= '3.6'",
"version": "==3.4.1"
},
"blinker": {
@ -119,11 +118,10 @@
},
"certifi": {
"hashes": [
"sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3",
"sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"
"sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082",
"sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"
],
"markers": "python_version >= '3.6'",
"version": "==2022.12.7"
"version": "==2023.7.22"
},
"cffi": {
"hashes": [
@ -199,7 +197,6 @@
"sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1",
"sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb"
],
"markers": "python_version >= '3.6'",
"version": "==8.0.4"
},
"construct": {
@ -211,28 +208,32 @@
},
"cryptography": {
"hashes": [
"sha256:05dc219433b14046c476f6f09d7636b92a1c3e5808b9a6536adf4932b3b2c440",
"sha256:0dcca15d3a19a66e63662dc8d30f8036b07be851a8680eda92d079868f106288",
"sha256:142bae539ef28a1c76794cca7f49729e7c54423f615cfd9b0b1fa90ebe53244b",
"sha256:3daf9b114213f8ba460b829a02896789751626a2a4e7a43a28ee77c04b5e4958",
"sha256:48f388d0d153350f378c7f7b41497a54ff1513c816bcbbcafe5b829e59b9ce5b",
"sha256:4df2af28d7bedc84fe45bd49bc35d710aede676e2a4cb7fc6d103a2adc8afe4d",
"sha256:4f01c9863da784558165f5d4d916093737a75203a5c5286fde60e503e4276c7a",
"sha256:7a38250f433cd41df7fcb763caa3ee9362777fdb4dc642b9a349721d2bf47404",
"sha256:8f79b5ff5ad9d3218afb1e7e20ea74da5f76943ee5edb7f76e56ec5161ec782b",
"sha256:956ba8701b4ffe91ba59665ed170a2ebbdc6fc0e40de5f6059195d9f2b33ca0e",
"sha256:a04386fb7bc85fab9cd51b6308633a3c271e3d0d3eae917eebab2fac6219b6d2",
"sha256:a95f4802d49faa6a674242e25bfeea6fc2acd915b5e5e29ac90a32b1139cae1c",
"sha256:adc0d980fd2760c9e5de537c28935cc32b9353baaf28e0814df417619c6c8c3b",
"sha256:aecbb1592b0188e030cb01f82d12556cf72e218280f621deed7d806afd2113f9",
"sha256:b12794f01d4cacfbd3177b9042198f3af1c856eedd0a98f10f141385c809a14b",
"sha256:c0764e72b36a3dc065c155e5b22f93df465da9c39af65516fe04ed3c68c92636",
"sha256:c33c0d32b8594fa647d2e01dbccc303478e16fdd7cf98652d5b3ed11aa5e5c99",
"sha256:cbaba590180cba88cb99a5f76f90808a624f18b169b90a4abb40c1fd8c19420e",
"sha256:d5a1bd0e9e2031465761dfa920c16b0065ad77321d8a8c1f5ee331021fda65e9"
"sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306",
"sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84",
"sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47",
"sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d",
"sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116",
"sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207",
"sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81",
"sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087",
"sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd",
"sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507",
"sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858",
"sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae",
"sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34",
"sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906",
"sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd",
"sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922",
"sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7",
"sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4",
"sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574",
"sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1",
"sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c",
"sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e",
"sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"
],
"index": "pypi",
"version": "==40.0.2"
"version": "==41.0.3"
},
"docopt": {
"hashes": [
@ -243,34 +244,32 @@
},
"exceptiongroup": {
"hashes": [
"sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e",
"sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"
"sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9",
"sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"
],
"markers": "python_version < '3.11'",
"version": "==1.1.1"
"version": "==1.1.3"
},
"execnet": {
"hashes": [
"sha256:8f694f3ba9cc92cab508b152dcfe322153975c29bda272e2fd7f3f00f36e47c5",
"sha256:a295f7cc774947aac58dde7fdc85f4aa00c42adf5d8f5468fc630c1acf30a142"
"sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41",
"sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==1.9.0"
"version": "==2.0.2"
},
"filelock": {
"hashes": [
"sha256:ad98852315c2ab702aeb628412cbf7e95b7ce8c3bf9565670b4eaecf1db370a9",
"sha256:fc03ae43288c013d2ea83c8597001b1129db351aad9c57fe2409327916b8e718"
"sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81",
"sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec"
],
"index": "pypi",
"version": "==3.12.0"
"version": "==3.12.2"
},
"flask": {
"hashes": [
"sha256:59da8a3170004800a2837844bfa84d49b022550616070f7cb1a659682b2e7c9f",
"sha256:e1120c228ca2f553b470df4a5fa927ab66258467526069981b3eb0a91902687d"
],
"markers": "python_version >= '3.6'",
"version": "==2.0.3"
},
"h11": {
@ -278,7 +277,6 @@
"sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6",
"sha256:47222cb6067e4a307d535814917cd98fd0a57b6788ce715755fa2b6c28b56042"
],
"markers": "python_version >= '3.6'",
"version": "==0.12.0"
},
"h2": {
@ -286,7 +284,6 @@
"sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d",
"sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"
],
"markers": "python_full_version >= '3.6.1'",
"version": "==4.1.0"
},
"hpack": {
@ -294,7 +291,6 @@
"sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c",
"sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"
],
"markers": "python_full_version >= '3.6.1'",
"version": "==4.0.0"
},
"hyperframe": {
@ -302,7 +298,6 @@
"sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15",
"sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"
],
"markers": "python_full_version >= '3.6.1'",
"version": "==6.0.1"
},
"iniconfig": {
@ -310,7 +305,6 @@
"sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3",
"sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"
],
"markers": "python_version >= '3.7'",
"version": "==2.0.0"
},
"itsdangerous": {
@ -318,7 +312,6 @@
"sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44",
"sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"
],
"markers": "python_version >= '3.7'",
"version": "==2.1.2"
},
"jinja2": {
@ -326,7 +319,6 @@
"sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852",
"sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"
],
"markers": "python_version >= '3.7'",
"version": "==3.1.2"
},
"kaitaistruct": {
@ -347,59 +339,58 @@
},
"markupsafe": {
"hashes": [
"sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed",
"sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc",
"sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2",
"sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460",
"sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7",
"sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0",
"sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1",
"sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa",
"sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03",
"sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323",
"sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65",
"sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013",
"sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036",
"sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f",
"sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4",
"sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419",
"sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2",
"sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619",
"sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a",
"sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a",
"sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd",
"sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7",
"sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666",
"sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65",
"sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859",
"sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625",
"sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff",
"sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156",
"sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd",
"sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba",
"sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f",
"sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1",
"sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094",
"sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a",
"sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513",
"sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed",
"sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d",
"sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3",
"sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147",
"sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c",
"sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603",
"sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601",
"sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a",
"sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1",
"sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d",
"sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3",
"sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54",
"sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2",
"sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6",
"sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"
"sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e",
"sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e",
"sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431",
"sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686",
"sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559",
"sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc",
"sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c",
"sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0",
"sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4",
"sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9",
"sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575",
"sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba",
"sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d",
"sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3",
"sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00",
"sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155",
"sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac",
"sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52",
"sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f",
"sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8",
"sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b",
"sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24",
"sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea",
"sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198",
"sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0",
"sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee",
"sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be",
"sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2",
"sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707",
"sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6",
"sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58",
"sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779",
"sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636",
"sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c",
"sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad",
"sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee",
"sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc",
"sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2",
"sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48",
"sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7",
"sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e",
"sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b",
"sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa",
"sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5",
"sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e",
"sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb",
"sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9",
"sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57",
"sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc",
"sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"
],
"markers": "python_version >= '3.7'",
"version": "==2.1.2"
"version": "==2.1.3"
},
"mitmproxy": {
"editable": true,
@ -479,7 +470,6 @@
"sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61",
"sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"
],
"markers": "python_version >= '3.7'",
"version": "==23.1"
},
"passlib": {
@ -491,11 +481,10 @@
},
"pluggy": {
"hashes": [
"sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159",
"sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"
"sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849",
"sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"
],
"markers": "python_version >= '3.6'",
"version": "==1.0.0"
"version": "==1.2.0"
},
"protobuf": {
"hashes": [
@ -521,16 +510,15 @@
"sha256:e68ad00695547d9397dd14abd3efba23cb31cef67228f4512d41396971889812",
"sha256:e9bffd52d6ee039a1cafb72475b2900c6fd0f0dca667fb7a09af0a3e119e78cb"
],
"markers": "python_version >= '3.5'",
"version": "==3.18.3"
},
"psycopg": {
"hashes": [
"sha256:59b4a71536b146925513c0234dfd1dc42b81e65d56ce5335dff4813434dbc113",
"sha256:b1500c42063abaa01d30b056f0b300826b8dd8d586900586029a294ce74af327"
"sha256:15b25741494344c24066dc2479b0f383dd1b82fa5e75612fa4fa5bb30726e9b6",
"sha256:8bbeddae5075c7890b2fa3e3553440376d3c5e28418335dee3c3656b06fa2b52"
],
"index": "pypi",
"version": "==3.1.8"
"version": "==3.1.10"
},
"publicsuffix2": {
"hashes": [
@ -544,7 +532,6 @@
"sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57",
"sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
"version": "==0.5.0"
},
"pycparser": {
@ -556,18 +543,16 @@
},
"pyopenssl": {
"hashes": [
"sha256:841498b9bec61623b1b6c47ebbc02367c07d60e0e195f19790817f10cc8db0b7",
"sha256:9e0c526404a210df9d2b18cd33364beadb0dc858a739b885677bc65e105d4a4c"
"sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2",
"sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac"
],
"markers": "python_version >= '3.6'",
"version": "==23.1.1"
"version": "==23.2.0"
},
"pyparsing": {
"hashes": [
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
],
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'",
"version": "==2.4.7"
},
"pyperclip": {
@ -578,19 +563,19 @@
},
"pytest": {
"hashes": [
"sha256:3799fa815351fea3a5e96ac7e503a96fa51cc9942c3753cda7651b93c1cfa362",
"sha256:434afafd78b1d78ed0addf160ad2b77a30d35d4bdf8af234fe621919d9ed15e3"
"sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32",
"sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"
],
"index": "pypi",
"version": "==7.3.1"
"version": "==7.4.0"
},
"pytest-asyncio": {
"hashes": [
"sha256:2b38a496aef56f56b0e87557ec313e11e1ab9276fc3863f6a7be0f1d0e415e1b",
"sha256:f2b3366b7cd501a4056858bd39349d5af19742aed2d81660b7998b6341c7eb9c"
"sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d",
"sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"
],
"index": "pypi",
"version": "==0.21.0"
"version": "==0.21.1"
},
"pytest-repeat": {
"hashes": [
@ -610,64 +595,63 @@
},
"pytest-xdist": {
"hashes": [
"sha256:1849bd98d8b242b948e472db7478e090bf3361912a8fed87992ed94085f54727",
"sha256:37290d161638a20b672401deef1cba812d110ac27e35d213f091d15b8beb40c9"
"sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93",
"sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2"
],
"index": "pypi",
"version": "==3.2.1"
"version": "==3.3.1"
},
"pyyaml": {
"hashes": [
"sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf",
"sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293",
"sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b",
"sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57",
"sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b",
"sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4",
"sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07",
"sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba",
"sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9",
"sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287",
"sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513",
"sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0",
"sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782",
"sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0",
"sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92",
"sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f",
"sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2",
"sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc",
"sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1",
"sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c",
"sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86",
"sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4",
"sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c",
"sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34",
"sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b",
"sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d",
"sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c",
"sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb",
"sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7",
"sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737",
"sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3",
"sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d",
"sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358",
"sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53",
"sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78",
"sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803",
"sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a",
"sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f",
"sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174",
"sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"
"sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc",
"sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741",
"sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206",
"sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27",
"sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595",
"sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62",
"sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98",
"sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696",
"sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d",
"sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867",
"sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47",
"sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486",
"sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6",
"sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3",
"sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007",
"sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938",
"sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c",
"sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735",
"sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d",
"sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba",
"sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8",
"sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5",
"sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd",
"sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3",
"sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0",
"sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515",
"sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c",
"sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c",
"sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924",
"sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34",
"sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43",
"sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859",
"sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673",
"sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a",
"sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab",
"sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa",
"sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c",
"sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585",
"sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d",
"sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"
],
"index": "pypi",
"version": "==6.0"
"version": "==6.0.1"
},
"ruamel.yaml": {
"hashes": [
"sha256:1a771fc92d3823682b7f0893ad56cb5a5c87c48e62b5399d6f42c8759a583b33",
"sha256:ea21da1198c4b41b8e7a259301cc9710d3b972bf8ba52f06218478e6802dd1f1"
],
"markers": "python_version >= '3'",
"version": "==0.17.16"
},
"ruamel.yaml.clib": {
@ -675,6 +659,7 @@
"sha256:045e0626baf1c52e5527bd5db361bc83180faaba2ff586e763d3d5982a876a9e",
"sha256:15910ef4f3e537eea7fe45f8a5d19997479940d9196f357152a09031c5be59f3",
"sha256:184faeaec61dbaa3cace407cffc5819f7b977e75360e8d5ca19461cd851a5fc5",
"sha256:1a6391a7cabb7641c32517539ca42cf84b87b667bad38b78d4d42dd23e957c81",
"sha256:1f08fd5a2bea9c4180db71678e850b995d2a5f4537be0e94557668cf0f5f9497",
"sha256:2aa261c29a5545adfef9296b7e33941f46aa5bbd21164228e833412af4c9c75f",
"sha256:3110a99e0f94a4a3470ff67fc20d3f96c25b13d24c6980ff841e82bafe827cac",
@ -685,13 +670,13 @@
"sha256:4a4d8d417868d68b979076a9be6a38c676eca060785abaa6709c7b31593c35d1",
"sha256:4b3a93bb9bc662fc1f99c5c3ea8e623d8b23ad22f861eb6fce9377ac07ad6072",
"sha256:5bc0667c1eb8f83a3752b71b9c4ba55ef7c7058ae57022dd9b29065186a113d9",
"sha256:721bc4ba4525f53f6a611ec0967bdcee61b31df5a56801281027a3a6d1c2daf5",
"sha256:763d65baa3b952479c4e972669f679fe490eee058d5aa85da483ebae2009d231",
"sha256:7bdb4c06b063f6fd55e472e201317a3bb6cdeeee5d5a38512ea5c01e1acbdd93",
"sha256:8831a2cedcd0f0927f788c5bdf6567d9dc9cc235646a434986a852af1cb54b4b",
"sha256:91a789b4aa0097b78c93e3dc4b40040ba55bef518f84a40d4442f713b4094acb",
"sha256:92460ce908546ab69770b2e576e4f99fbb4ce6ab4b245345a3869a0a0410488f",
"sha256:99e77daab5d13a48a4054803d052ff40780278240a902b880dd37a51ba01a307",
"sha256:9c7617df90c1365638916b98cdd9be833d31d337dbcd722485597b43c4a215bf",
"sha256:a234a20ae07e8469da311e182e70ef6b199d0fbeb6c6cc2901204dd87fb867e8",
"sha256:a7b301ff08055d73223058b5c46c55638917f04d21577c95e00e0c4d79201a6b",
"sha256:be2a7ad8fd8f7442b24323d24ba0b56c51219513cfa45b9ada3b87b76c374d4b",
@ -729,28 +714,26 @@
},
"tornado": {
"hashes": [
"sha256:4546003dc8b5733489139d3bff5fa6a0211be505faf819bd9970e7c2b32e8122",
"sha256:4d349846931557b7ec92f224b5d598b160e2ba26ae1812480b42e9622c884bf7",
"sha256:6164571f5b9f73143d1334df4584cb9ac86d20c461e17b6c189a19ead8bb93c1",
"sha256:6cfff1e9c15c79e106b8352269d201f8fc0815914a6260f3893ca18b724ea94b",
"sha256:720f53e6367b38190ae7fa398c25c086c69d88b3c6535bd6021a126b727fb5cd",
"sha256:912df5712024564e362ecce43c8d5862e14c78c8dd3846c9d889d44fbd7f4951",
"sha256:c37b6a384d54ce6a31168d40ab21ad2591ddaf34973075cc0cad154402ecd9e8",
"sha256:c659ab04d5aa477dbe44152c67d93f3ad3243b992d94f795ca1d5c73c37337ce",
"sha256:c9114a61a4588c09065b9996ae05462350d17160b92b9bf9a1e93689cc0424dc",
"sha256:d68f3192936ff2c4add04dc21a436a43b4408d466746b78bb2b9d0a53a18683f",
"sha256:d7b737e18f701de3e4a3b0824260b4d740e4d60607b8089bb80e80ffd464780e"
"sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f",
"sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5",
"sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d",
"sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3",
"sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2",
"sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a",
"sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16",
"sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a",
"sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17",
"sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0",
"sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe"
],
"markers": "python_version >= '3.8'",
"version": "==6.3"
"version": "==6.3.3"
},
"typing-extensions": {
"hashes": [
"sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb",
"sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"
"sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36",
"sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"
],
"markers": "python_version >= '3.7'",
"version": "==4.5.0"
"version": "==4.7.1"
},
"urwid": {
"hashes": [
@ -760,18 +743,16 @@
},
"werkzeug": {
"hashes": [
"sha256:2e1ccc9417d4da358b9de6f174e3ac094391ea1d4fbef2d667865d819dfd0afe",
"sha256:56433961bc1f12533306c624f3be5e744389ac61d722175d543e1751285da612"
"sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8",
"sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528"
],
"markers": "python_version >= '3.7'",
"version": "==2.2.3"
"version": "==2.3.7"
},
"wsproto": {
"hashes": [
"sha256:868776f8456997ad0d9720f7322b746bbe9193751b5b290b7f924659377c8c38",
"sha256:d8345d1808dd599b5ffb352c25a367adb6157e664e140dbecba3f9bc007edb9f"
],
"markers": "python_full_version >= '3.6.1'",
"version": "==1.0.0"
},
"zstandard": {
@ -825,7 +806,6 @@
"sha256:f98fc5750aac2d63d482909184aac72a979bfd123b112ec53fd365104ea15b1c",
"sha256:ff5b75f94101beaa373f1511319580a010f6e03458ee51b1a386d7de5331440a"
],
"markers": "python_version >= '3.5'",
"version": "==0.15.2"
}
},
@ -835,63 +815,58 @@
"sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04",
"sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"
],
"markers": "python_version >= '3.7'",
"version": "==23.1.0"
},
"black": {
"hashes": [
"sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5",
"sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915",
"sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326",
"sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940",
"sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b",
"sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30",
"sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c",
"sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c",
"sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab",
"sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27",
"sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2",
"sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961",
"sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9",
"sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb",
"sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70",
"sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331",
"sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2",
"sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266",
"sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d",
"sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6",
"sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b",
"sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925",
"sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8",
"sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4",
"sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"
"sha256:01ede61aac8c154b55f35301fac3e730baf0c9cf8120f65a9cd61a81cfb4a0c3",
"sha256:022a582720b0d9480ed82576c920a8c1dde97cc38ff11d8d8859b3bd6ca9eedb",
"sha256:25cc308838fe71f7065df53aedd20327969d05671bac95b38fdf37ebe70ac087",
"sha256:27eb7a0c71604d5de083757fbdb245b1a4fae60e9596514c6ec497eb63f95320",
"sha256:327a8c2550ddc573b51e2c352adb88143464bb9d92c10416feb86b0f5aee5ff6",
"sha256:47e56d83aad53ca140da0af87678fb38e44fd6bc0af71eebab2d1f59b1acf1d3",
"sha256:501387a9edcb75d7ae8a4412bb8749900386eaef258f1aefab18adddea1936bc",
"sha256:552513d5cd5694590d7ef6f46e1767a4df9af168d449ff767b13b084c020e63f",
"sha256:5c4bc552ab52f6c1c506ccae05681fab58c3f72d59ae6e6639e8885e94fe2587",
"sha256:642496b675095d423f9b8448243336f8ec71c9d4d57ec17bf795b67f08132a91",
"sha256:6d1c6022b86f83b632d06f2b02774134def5d4d4f1dac8bef16d90cda18ba28a",
"sha256:7f3bf2dec7d541b4619b8ce526bda74a6b0bffc480a163fed32eb8b3c9aed8ad",
"sha256:831d8f54c3a8c8cf55f64d0422ee875eecac26f5f649fb6c1df65316b67c8926",
"sha256:8417dbd2f57b5701492cd46edcecc4f9208dc75529bcf76c514864e48da867d9",
"sha256:86cee259349b4448adb4ef9b204bb4467aae74a386bce85d56ba4f5dc0da27be",
"sha256:893695a76b140881531062d48476ebe4a48f5d1e9388177e175d76234ca247cd",
"sha256:9fd59d418c60c0348505f2ddf9609c1e1de8e7493eab96198fc89d9f865e7a96",
"sha256:ad0014efc7acf0bd745792bd0d8857413652979200ab924fbf239062adc12491",
"sha256:b5b0ee6d96b345a8b420100b7d71ebfdd19fab5e8301aff48ec270042cd40ac2",
"sha256:c333286dc3ddca6fdff74670b911cccedacb4ef0a60b34e491b8a67c833b343a",
"sha256:f9062af71c59c004cd519e2fb8f5d25d39e46d3af011b41ab43b9c74e27e236f",
"sha256:fb074d8b213749fa1d077d630db0d5f8cc3b2ae63587ad4116e8a436e9bbe995"
],
"index": "pypi",
"version": "==23.3.0"
"version": "==23.7.0"
},
"click": {
"hashes": [
"sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1",
"sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb"
],
"markers": "python_version >= '3.6'",
"version": "==8.0.4"
},
"flake8": {
"hashes": [
"sha256:3833794e27ff64ea4e9cf5d410082a8b97ff1a06c16aa3d2027339cd0f1195c7",
"sha256:c61007e76655af75e6785a931f452915b371dc48f56efd765247c8fe68f2b181"
"sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23",
"sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"
],
"index": "pypi",
"version": "==6.0.0"
"version": "==6.1.0"
},
"flake8-bugbear": {
"hashes": [
"sha256:8a218d13abd6904800970381057ce8e72cde8eea743240c4ef3ede4dd0bc9cfb",
"sha256:ea565bdb87b96b56dc499edd6cc3ba7f695373d902a5f56c989b74fad7c7719d"
"sha256:0ebdc7d8ec1ca8bd49347694562381f099f4de2f8ec6bda7a7dca65555d9e0d4",
"sha256:d99d005114020fbef47ed5e4aebafd22f167f9a0fbd0d8bf3c9e90612cb25c34"
],
"index": "pypi",
"version": "==23.3.23"
"version": "==23.7.10"
},
"isort": {
"hashes": [
@ -906,7 +881,6 @@
"sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325",
"sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"
],
"markers": "python_version >= '3.6'",
"version": "==0.7.0"
},
"mypy-extensions": {
@ -914,7 +888,6 @@
"sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d",
"sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"
],
"markers": "python_version >= '3.5'",
"version": "==1.0.0"
},
"packaging": {
@ -922,40 +895,35 @@
"sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61",
"sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"
],
"markers": "python_version >= '3.7'",
"version": "==23.1"
},
"pathspec": {
"hashes": [
"sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687",
"sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"
"sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20",
"sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"
],
"markers": "python_version >= '3.7'",
"version": "==0.11.1"
"version": "==0.11.2"
},
"platformdirs": {
"hashes": [
"sha256:d5b638ca397f25f979350ff789db335903d7ea010ab28903f57b27e1b16c2b08",
"sha256:ebe11c0d7a805086e99506aa331612429a72ca7cd52a1f0d277dc4adc20cb10e"
"sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d",
"sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"
],
"markers": "python_version >= '3.7'",
"version": "==3.2.0"
"version": "==3.10.0"
},
"pycodestyle": {
"hashes": [
"sha256:347187bdb476329d98f695c213d7295a846d1152ff4fe9bacb8a9590b8ee7053",
"sha256:8a4eaf0d0495c7395bdab3589ac2db602797d76207242c17d470186815706610"
"sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0",
"sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8"
],
"markers": "python_version >= '3.6'",
"version": "==2.10.0"
"version": "==2.11.0"
},
"pyflakes": {
"hashes": [
"sha256:ec55bf7fe21fff7f1ad2f7da62363d749e2a470500eab1b555334b67aa1ef8cf",
"sha256:ec8b276a6b60bd80defed25add7e439881c19e64850afd9b346283d4165fd0fd"
"sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774",
"sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"
],
"markers": "python_version >= '3.6'",
"version": "==3.0.1"
"version": "==3.1.0"
},
"tomli": {
"hashes": [
@ -967,11 +935,10 @@
},
"typing-extensions": {
"hashes": [
"sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb",
"sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"
"sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36",
"sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"
],
"markers": "python_version >= '3.7'",
"version": "==4.5.0"
"version": "==4.7.1"
}
}
}

View File

@ -301,5 +301,10 @@ s/(NOTICE: issuing CREATE EXTENSION IF NOT EXISTS citus_columnar WITH SCHEMA p
# (This is not preprocessor directive, but a reminder for the developer that will drop PG14&15 support )
s/, password_required=false//g
s/provide the file or change sslmode/provide the file, use the system's trusted roots with sslrootcert=system, or change sslmode/g
s/(:varcollid [0-9]+) :varlevelsup 0/\1 :varnullingrels (b) :varlevelsup 0/g
s/table_name_for_view\.([_a-z0-9]+)(,| |$)/\1\2/g
s/permission denied to terminate process/must be a superuser to terminate superuser process/g
s/permission denied to cancel query/must be a superuser to cancel superuser query/g
#endif /* PG_VERSION_NUM < PG_VERSION_16 */

View File

@ -635,7 +635,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'citus_local_table_4'::regclass;
SELECT column_name_to_column('citus_local_table_4', 'a');
column_name_to_column
---------------------------------------------------------------------
{VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
{VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
(1 row)
SELECT master_update_shard_statistics(shardid)

View File

@ -769,8 +769,8 @@ SELECT logicalrelid, partmethod, partkey FROM pg_dist_partition
ORDER BY logicalrelid;
logicalrelid | partmethod | partkey
---------------------------------------------------------------------
parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 5 :location -1}
parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 5 :location -1}
(2 rows)
-- some tests for view propagation on citus local tables
@ -912,12 +912,12 @@ select run_command_on_workers($$SELECT count(*)=0 from citus_local_tables_mx.v10
(2 rows)
CREATE TABLE loc_tb_2 (a int);
CREATE VIEW v104 AS SELECT * from loc_tb_2;
CREATE VIEW v104 AS SELECT * from loc_tb_2 table_name_for_view;
SET client_min_messages TO DEBUG1;
-- verify the CREATE command for the view is generated correctly
ALTER TABLE loc_tb_2 ADD CONSTRAINT fkey_2 FOREIGN KEY (a) references ref_tb(a);
DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v104 (a) AS SELECT loc_tb_2.a
FROM citus_local_tables_mx.loc_tb_2; ALTER VIEW citus_local_tables_mx.v104 OWNER TO postgres"
DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v104 (a) AS SELECT a
FROM citus_local_tables_mx.loc_tb_2 table_name_for_view; ALTER VIEW citus_local_tables_mx.v104 OWNER TO postgres"
DEBUG: "view v104" has dependency to "table loc_tb_2" that is not in Citus' metadata
DEBUG: validating foreign key constraint "fkey_2_1330083"
SET client_min_messages TO WARNING;

View File

@ -1,6 +1,10 @@
--
-- Test chunk filtering in columnar using min/max values in stripe skip lists.
--
-- It has an alternative test output file
-- because PG16 changed the order of some Filters in EXPLAIN
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/2489d76c4906f4461a364ca8ad7e0751ead8aa0d
--
-- filtered_row_count returns number of rows filtered by the WHERE clause.
-- If chunks get filtered by columnar, less rows are passed to WHERE
@ -370,10 +374,10 @@ SELECT * FROM r1, coltest WHERE
Filter: ((n1 % 10) = 0)
Rows Removed by Filter: 1
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=4)
Filter: ((x1 > 15000) AND (r1.id1 = id) AND ((x1)::text > '000000'::text))
Filter: ((x1 > 15000) AND (id = r1.id1) AND ((x1)::text > '000000'::text))
Rows Removed by Filter: 999
Columnar Projected Columns: id, x1, x2, x3
Columnar Chunk Group Filters: ((x1 > 15000) AND (r1.id1 = id))
Columnar Chunk Group Filters: ((x1 > 15000) AND (id = r1.id1))
Columnar Chunk Groups Removed by Filter: 19
(10 rows)
@ -413,10 +417,10 @@ SELECT * FROM r1, r2, r3, r4, r5, r6, r7, coltest WHERE
-> Seq Scan on r2 (actual rows=5 loops=5)
-> Seq Scan on r3 (actual rows=5 loops=5)
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5)
Filter: (r1.id1 = id)
Filter: (id = r1.id1)
Rows Removed by Filter: 999
Columnar Projected Columns: id, x1, x2, x3
Columnar Chunk Group Filters: (r1.id1 = id)
Columnar Chunk Group Filters: (id = r1.id1)
Columnar Chunk Groups Removed by Filter: 19
-> Seq Scan on r4 (actual rows=1 loops=5)
-> Seq Scan on r5 (actual rows=1 loops=1)
@ -588,10 +592,10 @@ DETAIL: parameterized by rels {r3}; 2 clauses pushed down
-> Nested Loop (actual rows=3 loops=1)
-> Seq Scan on r1 (actual rows=5 loops=1)
-> Custom Scan (ColumnarScan) on coltest (actual rows=1 loops=5)
Filter: ((r1.n1 > x1) AND (r1.id1 = id))
Filter: ((r1.n1 > x1) AND (id = r1.id1))
Rows Removed by Filter: 799
Columnar Projected Columns: id, x1, x2, x3
Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id))
Columnar Chunk Group Filters: ((r1.n1 > x1) AND (id = r1.id1))
Columnar Chunk Groups Removed by Filter: 19
-> Seq Scan on r2 (actual rows=5 loops=3)
-> Seq Scan on r3 (actual rows=5 loops=3)
@ -618,10 +622,10 @@ SELECT * FROM r1, coltest_part WHERE
-> Seq Scan on r1 (actual rows=5 loops=1)
-> Append (actual rows=1 loops=5)
-> Custom Scan (ColumnarScan) on coltest_part0 coltest_part_1 (actual rows=1 loops=3)
Filter: ((r1.n1 > x1) AND (r1.id1 = id))
Filter: ((r1.n1 > x1) AND (id = r1.id1))
Rows Removed by Filter: 999
Columnar Projected Columns: id, x1, x2, x3
Columnar Chunk Group Filters: ((r1.n1 > x1) AND (r1.id1 = id))
Columnar Chunk Group Filters: ((r1.n1 > x1) AND (id = r1.id1))
Columnar Chunk Groups Removed by Filter: 9
-> Seq Scan on coltest_part1 coltest_part_2 (actual rows=0 loops=2)
Filter: ((r1.n1 > x1) AND (r1.id1 = id))

File diff suppressed because it is too large Load Diff

View File

@ -77,10 +77,10 @@ FROM columnar_test_helpers.columnar_store_memory_stats();
top_growth | 1
-- before this change, max mem usage while executing inserts was 28MB and
-- with this change it's less than 8MB.
-- with this change it's less than 9MB.
SELECT
(SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
(SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
(SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
(SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
-[ RECORD 1 ]--+--
large_batch_ok | t
first_batch_ok | t

View File

@ -326,7 +326,7 @@ WHERE w2.a = 123;
EXPLAIN (COSTS OFF) SELECT sub_1.b, sub_2.a, sub_3.avg
FROM
(SELECT b FROM full_correlated WHERE (a > 2) GROUP BY b HAVING count(DISTINCT a) > 0 ORDER BY 1 DESC LIMIT 5) AS sub_1,
(SELECT b FROM full_correlated WHERE (a > 2) GROUP BY b ORDER BY 1 DESC LIMIT 5) AS sub_1,
(SELECT a FROM full_correlated WHERE (a > 10) GROUP BY a HAVING count(DISTINCT a) >= 1 ORDER BY 1 DESC LIMIT 3) AS sub_2,
(SELECT avg(a) AS AVG FROM full_correlated WHERE (a > 2) GROUP BY a HAVING sum(a) > 10 ORDER BY (sum(d) - avg(a) - COALESCE(array_upper(ARRAY[max(a)],1) * 5, 0)) DESC LIMIT 3) AS sub_3
WHERE sub_2.a < sub_1.b::integer
@ -341,11 +341,10 @@ LIMIT 100;
-> Nested Loop
Join Filter: (full_correlated_1.a < (full_correlated.b)::integer)
-> Limit
-> GroupAggregate
Group Key: full_correlated.b
Filter: (count(DISTINCT full_correlated.a) > 0)
-> Sort
Sort Key: full_correlated.b DESC
-> HashAggregate
Group Key: full_correlated.b
-> Custom Scan (ColumnarScan) on full_correlated
Filter: (a > 2)
Columnar Projected Columns: a, b
@ -366,7 +365,7 @@ LIMIT 100;
Filter: (sum(full_correlated_2.a) > 10)
-> Index Scan using full_correlated_btree on full_correlated full_correlated_2
Index Cond: (a > 2)
(32 rows)
(31 rows)
DROP INDEX full_correlated_btree;
CREATE INDEX full_correlated_hash ON full_correlated USING hash(a);

View File

@ -244,13 +244,13 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
1
(1 row)
SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
role | member | grantor | admin_option
---------------------------------------------------------------------
dist_role_1 | dist_role_2 | non_dist_role_1 | f
dist_role_3 | non_dist_role_3 | postgres | f
non_dist_role_1 | non_dist_role_2 | dist_role_1 | f
non_dist_role_4 | dist_role_4 | postgres | f
dist_role_1 | dist_role_2 | t | f
dist_role_3 | non_dist_role_3 | t | f
non_dist_role_1 | non_dist_role_2 | t | f
non_dist_role_4 | dist_role_4 | t | f
(4 rows)
SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1;

View File

@ -67,12 +67,12 @@ SELECT pg_typeof(:maintenance_daemon_gpid);
bigint
(1 row)
\set VERBOSITY terse
SELECT pg_cancel_backend(:maintenance_daemon_gpid);
ERROR: must be a superuser to cancel superuser query
CONTEXT: while executing command on localhost:xxxxx
SELECT pg_terminate_backend(:maintenance_daemon_gpid);
ERROR: must be a superuser to terminate superuser process
CONTEXT: while executing command on localhost:xxxxx
\set VERBOSITY default
-- we can cancel our own backend
SELECT pg_cancel_backend(citus_backend_gpid());
ERROR: canceling statement due to user request

View File

@ -1214,7 +1214,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
GROUP BY c1, c2, c3, c4, c5, c6
GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
@ -1232,7 +1232,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
GROUP BY c1, c2, c3, c4, c5, c6
GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
@ -1247,7 +1247,7 @@ DO UPDATE SET
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Group Key: c1, c2, c3, c4, '-1'::double precision, insert_select_repartition.dist_func(c1, 4)
Group Key: c1, c2, c3, c4, insert_select_repartition.dist_func(c1, 4)
-> Seq Scan on source_table_4213644 source_table
(10 rows)

View File

@ -1214,7 +1214,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
GROUP BY c1, c2, c3, c4, c5, c6
GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
@ -1232,7 +1232,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
GROUP BY c1, c2, c3, c4, c5, c6
GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
@ -1247,7 +1247,7 @@ DO UPDATE SET
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> HashAggregate
Group Key: c1, c2, c3, c4, '-1'::double precision, insert_select_repartition.dist_func(c1, 4)
Group Key: c1, c2, c3, c4, insert_select_repartition.dist_func(c1, 4)
-> Seq Scan on source_table_4213644 source_table
(10 rows)

View File

@ -357,13 +357,13 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
101
(1 row)
CREATE VIEW local_regular_view AS SELECT * FROM local;
CREATE VIEW local_regular_view AS SELECT * FROM local table_name_for_view;
WARNING: "view local_regular_view" has dependency to "table local" that is not in Citus' metadata
DETAIL: "view local_regular_view" will be created only locally
HINT: Distribute "table local" first to distribute "view local_regular_view"
CREATE VIEW dist_regular_view AS SELECT * FROM distributed;
SELECT count(*) FROM distributed JOIN local_regular_view USING (id);
DEBUG: generating subplan XXX_1 for subquery SELECT local.id, local.title FROM local_dist_join_mixed.local
DEBUG: generating subplan XXX_1 for subquery SELECT id, title FROM local_dist_join_mixed.local table_name_for_view
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (local_dist_join_mixed.distributed JOIN (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title text)) local_regular_view USING (id))
count
---------------------------------------------------------------------
@ -380,7 +380,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
(1 row)
SELECT count(*) FROM dist_regular_view JOIN local_regular_view USING (id);
DEBUG: generating subplan XXX_1 for subquery SELECT local.id, local.title FROM local_dist_join_mixed.local
DEBUG: generating subplan XXX_1 for subquery SELECT id, title FROM local_dist_join_mixed.local table_name_for_view
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT distributed.id, distributed.name, distributed.created_at FROM local_dist_join_mixed.distributed) dist_regular_view JOIN (SELECT intermediate_result.id, intermediate_result.title FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id bigint, title text)) local_regular_view USING (id))
count
---------------------------------------------------------------------

View File

@ -1370,9 +1370,6 @@ select typdefault from (
select a from tbl
where typdefault > 'a'
limit 1) as subq_0
where (
select true as bool from pg_catalog.pg_am limit 1
)
) as subq_1
) as subq_2;
typdefault
@ -1400,15 +1397,11 @@ select typdefault from (
select a from tbl
where typdefault > 'a'
limit 1) as subq_0
where (
select true as bool from pg_catalog.pg_am limit 1
)
) as subq_1
) as subq_2;
DEBUG: generating subplan XXX_1 for subquery SELECT true AS bool FROM pg_am LIMIT 1
DEBUG: Wrapping relation "custom_pg_type" to a subquery
DEBUG: generating subplan XXX_2 for subquery SELECT typdefault FROM local_table_join.custom_pg_type WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT typdefault FROM (SELECT subq_1.typdefault FROM (SELECT custom_pg_type.typdefault FROM (SELECT custom_pg_type_1.typdefault FROM (SELECT intermediate_result.typdefault FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(typdefault text)) custom_pg_type_1) custom_pg_type, LATERAL (SELECT tbl.a FROM local_table_join.tbl WHERE (custom_pg_type.typdefault OPERATOR(pg_catalog.>) 'a'::text) LIMIT 1) subq_0 WHERE (SELECT intermediate_result.bool FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(bool boolean))) subq_1) subq_2
DEBUG: generating subplan XXX_1 for subquery SELECT typdefault FROM local_table_join.custom_pg_type WHERE true
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT typdefault FROM (SELECT subq_1.typdefault FROM (SELECT custom_pg_type.typdefault FROM (SELECT custom_pg_type_1.typdefault FROM (SELECT intermediate_result.typdefault FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(typdefault text)) custom_pg_type_1) custom_pg_type, LATERAL (SELECT tbl.a FROM local_table_join.tbl WHERE (custom_pg_type.typdefault OPERATOR(pg_catalog.>) 'a'::text) LIMIT 1) subq_0) subq_1) subq_2
ERROR: cannot push down this subquery
DETAIL: Limit clause is currently unsupported when a lateral subquery references a column from complex subqueries, CTEs or local tables
-- Not supported because of 4470

View File

@ -1284,8 +1284,17 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET application_name to 'citus_internal gpid=10000000001';
-- with an ugly trick, update the vartype of table from int to bigint
-- so that making two tables colocated fails
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}'
-- include varnullingrels for PG16
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
\else
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
\endif
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
ERROR: cannot colocate tables test_2 and test_3
ROLLBACK;

View File

@ -1,6 +1,18 @@
--
-- COMPLEX_COUNT_DISTINCT
--
-- This test file has an alternative output because of the following in PG16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16
---------------------------------------------------------------------
t
(1 row)
SET citus.next_shard_id TO 240000;
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1;
@ -87,9 +99,12 @@ SELECT
-> GroupAggregate
Output: l_orderkey, count(DISTINCT l_partkey)
Group Key: lineitem_hash.l_orderkey
-> Index Scan Backward using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
(22 rows)
-> Sort
Output: l_orderkey, l_partkey
Sort Key: lineitem_hash.l_orderkey DESC, lineitem_hash.l_partkey
-> Seq Scan on public.lineitem_hash_240000 lineitem_hash
Output: l_orderkey, l_partkey
(25 rows)
-- it is also supported if there is no grouping or grouping is on non-partition field
SELECT
@ -117,6 +132,9 @@ SELECT
Sort Key: (count(DISTINCT remote_scan.count)) DESC
-> Aggregate
Output: count(DISTINCT remote_scan.count)
-> Sort
Output: remote_scan.count
Sort Key: remote_scan.count
-> Custom Scan (Citus Adaptive)
Output: remote_scan.count
Task Count: 8
@ -129,7 +147,7 @@ SELECT
Group Key: lineitem_hash.l_partkey
-> Seq Scan on public.lineitem_hash_240000 lineitem_hash
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
(19 rows)
(22 rows)
SELECT
l_shipmode, count(DISTINCT l_partkey)
@ -167,7 +185,7 @@ SELECT
Group Key: remote_scan.l_shipmode
-> Sort
Output: remote_scan.l_shipmode, remote_scan.count
Sort Key: remote_scan.l_shipmode DESC
Sort Key: remote_scan.l_shipmode DESC, remote_scan.count
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_shipmode, remote_scan.count
Task Count: 8
@ -232,9 +250,12 @@ SELECT
-> GroupAggregate
Output: l_orderkey, count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
Group Key: lineitem_hash.l_orderkey
-> Index Scan using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
(22 rows)
-> Sort
Output: l_orderkey, l_partkey, l_shipmode
Sort Key: lineitem_hash.l_orderkey, lineitem_hash.l_partkey
-> Seq Scan on public.lineitem_hash_240000 lineitem_hash
Output: l_orderkey, l_partkey, l_shipmode
(25 rows)
-- partition/non-partition column count distinct no grouping
SELECT
@ -253,6 +274,9 @@ SELECT
---------------------------------------------------------------------
Aggregate
Output: count(DISTINCT remote_scan.count), count(DISTINCT remote_scan.count_1), count(DISTINCT remote_scan.count_2)
-> Sort
Output: remote_scan.count, remote_scan.count_1, remote_scan.count_2
Sort Key: remote_scan.count
-> Custom Scan (Citus Adaptive)
Output: remote_scan.count, remote_scan.count_1, remote_scan.count_2
Task Count: 8
@ -265,7 +289,7 @@ SELECT
Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_partkey, lineitem_hash.l_shipmode
-> Seq Scan on public.lineitem_hash_240000 lineitem_hash
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
(14 rows)
(17 rows)
-- distinct/non-distinct on partition and non-partition columns
SELECT
@ -433,7 +457,7 @@ SELECT *
Group Key: lineitem_hash.l_partkey
-> Sort
Output: l_partkey, l_orderkey
Sort Key: lineitem_hash.l_partkey
Sort Key: lineitem_hash.l_partkey, lineitem_hash.l_orderkey
-> Seq Scan on public.lineitem_hash_240000 lineitem_hash
Output: l_partkey, l_orderkey
Task Count: 1
@ -505,9 +529,12 @@ SELECT
-> GroupAggregate
Output: l_orderkey, count(DISTINCT l_suppkey) FILTER (WHERE (l_shipmode = 'AIR'::bpchar)), count(DISTINCT l_suppkey)
Group Key: lineitem_hash.l_orderkey
-> Index Scan using lineitem_hash_pkey_240000 on public.lineitem_hash_240000 lineitem_hash
Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment
(22 rows)
-> Sort
Output: l_orderkey, l_suppkey, l_shipmode
Sort Key: lineitem_hash.l_orderkey, lineitem_hash.l_suppkey
-> Seq Scan on public.lineitem_hash_240000 lineitem_hash
Output: l_orderkey, l_suppkey, l_shipmode
(25 rows)
-- group by on non-partition column
SELECT
@ -550,7 +577,7 @@ SELECT
Group Key: remote_scan.l_suppkey
-> Sort
Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1
Sort Key: remote_scan.l_suppkey DESC
Sort Key: remote_scan.l_suppkey DESC, remote_scan.count
-> Custom Scan (Citus Adaptive)
Output: remote_scan.l_suppkey, remote_scan.count, remote_scan.count_1
Task Count: 8

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,18 @@
--
-- MULTI_EXPLAIN
--
-- This test file has an alternative output because of the following in PG16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
server_version_ge_16
---------------------------------------------------------------------
t
(1 row)
SET citus.next_shard_id TO 570000;
\a\t
SET citus.explain_distributed_queries TO on;
@ -651,7 +663,7 @@ Aggregate
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), events.event_time
-> Hash Join
Hash Cond: (users.composite_id = events.composite_id)
-> Seq Scan on users_1400289 users
@ -737,7 +749,7 @@ HashAggregate
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
-> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone, events.event_time
-> Hash Left Join
Hash Cond: (users.composite_id = subquery_2.composite_id)
-> HashAggregate
@ -853,7 +865,7 @@ Sort
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
Filter: (array_ndims(array_agg(('action=>1'::text) ORDER BY events.event_time)) > 0)
-> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.count_pay, events.event_time
-> Hash Left Join
Hash Cond: (users.composite_id = subquery_2.composite_id)
-> HashAggregate
@ -951,7 +963,7 @@ Limit
-> GroupAggregate
Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
-> Sort
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id)
Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), events.event_time
-> Nested Loop Left Join
-> Limit
-> Sort
@ -2381,11 +2393,16 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Tuple data received from node: 8 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1)
-> Hash Join (actual rows=10 loops=1)
Hash Cond: (ref_table.a = intermediate_result.a)
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
-> Hash (actual rows=10 loops=1)
-> Merge Join (actual rows=10 loops=1)
Merge Cond: (intermediate_result.a = ref_table.a)
-> Sort (actual rows=10 loops=1)
Sort Key: intermediate_result.a
Sort Method: quicksort Memory: 25kB
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: ref_table.a
Sort Method: quicksort Memory: 25kB
-> Seq Scan on ref_table_570021 ref_table (actual rows=10 loops=1)
EXPLAIN :default_analyze_flags
SELECT count(distinct a) FROM (SELECT GREATEST(random(), 2) r, a FROM dist_table) t NATURAL JOIN ref_table;
Aggregate (actual rows=1 loops=1)
@ -2442,6 +2459,9 @@ Aggregate (actual rows=1 loops=1)
-> Aggregate (actual rows=1 loops=1)
InitPlan 1 (returns $0)
-> Function Scan on read_intermediate_result intermediate_result (actual rows=1 loops=1)
-> Sort (actual rows=4 loops=1)
Sort Key: dist_table.a
Sort Method: quicksort Memory: 25kB
-> Result (actual rows=4 loops=1)
One-Time Filter: $0
-> Seq Scan on dist_table_570017 dist_table (actual rows=4 loops=1)
@ -2486,6 +2506,9 @@ Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
Tuple data received from node: 8 bytes
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate (actual rows=1 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: intermediate_result.a2
Sort Method: quicksort Memory: 25kB
-> Function Scan on read_intermediate_result intermediate_result (actual rows=10 loops=1)
ROLLBACK;
-- https://github.com/citusdata/citus/issues/4074

File diff suppressed because it is too large Load Diff

View File

@ -986,7 +986,7 @@ DELETE FROM pg_dist_shard WHERE shardid = 1;
CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id);
CREATE TABLE orders_2020_07_01
PARTITION OF e_transactions FOR VALUES IN (1,2,3);
INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's');
INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's');
SELECT
(metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11,
(metadata->>'partitioned_citus_table_exists_pre_11') IS NULL as is_null

View File

@ -1232,31 +1232,20 @@ WHERE o_orderkey IN (1, 2)
-> Seq Scan on lineitem_hash_partitioned_630004 lineitem_hash_partitioned
(13 rows)
SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
SELECT count(*)
FROM orders_hash_partitioned
FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
WHERE o_orderkey IN (1, 2)
AND l_orderkey IN (2, 3);
QUERY PLAN
$Q$);
coordinator_plan
---------------------------------------------------------------------
Aggregate
-> Custom Scan (Citus Adaptive)
Task Count: 3
Tasks Shown: One of 3
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> Aggregate
-> Nested Loop
Join Filter: (orders_hash_partitioned.o_orderkey = lineitem_hash_partitioned.l_orderkey)
-> Seq Scan on orders_hash_partitioned_630000 orders_hash_partitioned
Filter: (o_orderkey = ANY ('{1,2}'::integer[]))
-> Materialize
-> Bitmap Heap Scan on lineitem_hash_partitioned_630004 lineitem_hash_partitioned
Recheck Cond: (l_orderkey = ANY ('{2,3}'::integer[]))
-> Bitmap Index Scan on lineitem_hash_partitioned_pkey_630004
Index Cond: (l_orderkey = ANY ('{2,3}'::integer[]))
(16 rows)
(3 rows)
SET citus.task_executor_type TO DEFAULT;
DROP TABLE lineitem_hash_partitioned;

View File

@ -120,7 +120,7 @@ EXPLAIN (COSTS FALSE)
SELECT sum(l_extendedprice * l_discount) as revenue
FROM lineitem_hash, orders_hash
WHERE o_orderkey = l_orderkey
GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24
GROUP BY l_orderkey, l_shipmode HAVING sum(l_quantity) > 24
ORDER BY 1 DESC LIMIT 3;
QUERY PLAN
---------------------------------------------------------------------
@ -136,7 +136,7 @@ EXPLAIN (COSTS FALSE)
-> Sort
Sort Key: (sum((lineitem_hash.l_extendedprice * lineitem_hash.l_discount))) DESC
-> HashAggregate
Group Key: lineitem_hash.l_orderkey, orders_hash.o_orderkey, lineitem_hash.l_shipmode
Group Key: lineitem_hash.l_orderkey, lineitem_hash.l_shipmode
Filter: (sum(lineitem_hash.l_quantity) > '24'::numeric)
-> Hash Join
Hash Cond: (orders_hash.o_orderkey = lineitem_hash.l_orderkey)

View File

@ -533,7 +533,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
---------------------------------------------------------------------
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
@ -672,7 +672,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
---------------------------------------------------------------------
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;

View File

@ -533,7 +533,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
---------------------------------------------------------------------
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;
@ -672,7 +672,7 @@ SELECT * FROM pg_dist_node ORDER BY nodeid;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
---------------------------------------------------------------------
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 2 | s | f
(1 row)
SELECT * FROM pg_dist_shard WHERE logicalrelid::text LIKE 'mx_testing_schema%' ORDER BY shardid;

View File

@ -148,7 +148,7 @@ SELECT pg_reload_conf();
CREATE SUBSCRIPTION subs_01 CONNECTION 'host=''localhost'' port=57637'
PUBLICATION pub_01 WITH (citus_use_authinfo=true);
ERROR: could not connect to the publisher: root certificate file "/non/existing/certificate.crt" does not exist
Either provide the file or change sslmode to disable server certificate verification.
Either provide the file, use the system's trusted roots with sslrootcert=system, or change sslmode to disable server certificate verification.
ALTER SYSTEM RESET citus.node_conninfo;
SELECT pg_reload_conf();
pg_reload_conf

View File

@ -425,9 +425,25 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
test_table_2_1130000
(4 rows)
-- PG16 added one more backend type B_STANDALONE_BACKEND
-- and also alphabetized the backend types, hence the orders changed
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
SELECT 4 AS client_backend \gset
SELECT 5 AS bgworker \gset
SELECT 12 AS walsender \gset
\else
SELECT 3 AS client_backend \gset
SELECT 4 AS bgworker \gset
SELECT 9 AS walsender \gset
\endif
-- say, we set it to bgworker
-- the shards and indexes do not show up
SELECT set_backend_type(4);
SELECT set_backend_type(:bgworker);
NOTICE: backend type switched to: background worker
set_backend_type
---------------------------------------------------------------------
@ -445,7 +461,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
-- or, we set it to walsender
-- the shards and indexes do not show up
SELECT set_backend_type(9);
SELECT set_backend_type(:walsender);
NOTICE: backend type switched to: walsender
set_backend_type
---------------------------------------------------------------------
@ -480,7 +496,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
RESET application_name;
-- but, client backends to see the shards
SELECT set_backend_type(3);
SELECT set_backend_type(:client_backend);
NOTICE: backend type switched to: client backend
set_backend_type
---------------------------------------------------------------------

View File

@ -374,7 +374,7 @@ LIMIT 2;
(2 rows)
EXPLAIN (COSTS OFF)
SELECT ut.user_id, count(DISTINCT ut.value_2)
SELECT ut.user_id, avg(ut.value_2)
FROM users_table ut, events_table et
WHERE ut.user_id = et.user_id and et.value_2 < 5
GROUP BY ut.user_id
@ -384,7 +384,7 @@ LIMIT 5;
---------------------------------------------------------------------
Limit
-> Sort
Sort Key: remote_scan.count, remote_scan.worker_column_3, remote_scan.user_id DESC
Sort Key: remote_scan.avg, remote_scan.worker_column_3, remote_scan.user_id DESC
-> Custom Scan (Citus Adaptive)
Task Count: 4
Tasks Shown: One of 4
@ -392,16 +392,14 @@ LIMIT 5;
Node: host=localhost port=xxxxx dbname=regression
-> Limit
-> Sort
Sort Key: (count(DISTINCT ut.value_2)), (avg(ut.value_1)), ut.user_id DESC
-> GroupAggregate
Sort Key: (avg(ut.value_2)), (avg(ut.value_1)), ut.user_id DESC
-> HashAggregate
Group Key: ut.user_id
-> Sort
Sort Key: ut.user_id DESC
-> Hash Join
Hash Cond: (ut.user_id = et.user_id)
-> Seq Scan on users_table_1400256 ut
-> Hash
-> Seq Scan on events_table_1400260 et
Filter: (value_2 < 5)
(21 rows)
(19 rows)

View File

@ -86,7 +86,7 @@ SELECT prune_using_both_values('pruning', 'tomato', 'rose');
SELECT debug_equality_expression('pruning');
debug_equality_expression
---------------------------------------------------------------------
{OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1}
{OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1}
(1 row)
-- print the initial ordering of shard intervals

View File

@ -813,7 +813,7 @@ SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
EXPLAIN (COSTS FALSE)
SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
FROM lineitem_hash_part
GROUP BY l_orderkey
GROUP BY l_orderkey, l_partkey, l_shipmode
ORDER BY 1,2;
QUERY PLAN
---------------------------------------------------------------------
@ -827,9 +827,9 @@ EXPLAIN (COSTS FALSE)
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> GroupAggregate
Group Key: l_orderkey
Group Key: l_orderkey, l_partkey, l_shipmode
-> Sort
Sort Key: l_orderkey
Sort Key: l_orderkey, l_partkey, l_shipmode
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(14 rows)
@ -839,7 +839,7 @@ SET enable_hashagg TO off;
EXPLAIN (COSTS FALSE)
SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
FROM lineitem_hash_part
GROUP BY l_orderkey
GROUP BY l_orderkey, l_partkey, l_shipmode
ORDER BY 1,2;
QUERY PLAN
---------------------------------------------------------------------
@ -852,9 +852,9 @@ EXPLAIN (COSTS FALSE)
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> GroupAggregate
Group Key: l_orderkey
Group Key: l_orderkey, l_partkey, l_shipmode
-> Sort
Sort Key: l_orderkey
Sort Key: l_orderkey, l_partkey, l_shipmode
-> Seq Scan on lineitem_hash_part_360041 lineitem_hash_part
(13 rows)

View File

@ -1062,7 +1062,7 @@ SELECT count(*) FROM keyval1 GROUP BY key HAVING sum(value) > (SELECT sum(value)
(26 rows)
EXPLAIN (COSTS OFF)
SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 GROUP BY key ORDER BY 1 DESC LIMIT 1);
SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 ORDER BY 1 DESC LIMIT 1);
QUERY PLAN
---------------------------------------------------------------------
Custom Scan (Citus Adaptive)
@ -1070,20 +1070,18 @@ SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value)
Tasks Shown: All
-> Task
Node: host=localhost port=xxxxx dbname=regression
-> GroupAggregate
Group Key: k1.key
-> Aggregate
Filter: (sum(k1.value) > $0)
InitPlan 1 (returns $0)
-> Limit
-> Sort
Sort Key: (sum(k2.value)) DESC
-> GroupAggregate
Group Key: k2.key
-> Aggregate
-> Seq Scan on keyval2_xxxxxxx k2
Filter: (key = 2)
-> Seq Scan on keyval1_xxxxxxx k1
Filter: (key = 2)
(18 rows)
(16 rows)
-- Simple join subquery pushdown
SELECT

View File

@ -152,7 +152,7 @@ SELECT
FROM
users_table RIGHT JOIN users_reference_table USING (user_id)
WHERE
users_table.value_2 IN
users_reference_table.value_2 IN
(SELECT
value_2
FROM

View File

@ -92,7 +92,7 @@ SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1
326 | 7
(5 rows)
CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR';
CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part table_name_for_view WHERE l_shipmode = 'AIR';
-- join between view and table
SELECT count(*) FROM orders_hash_part join air_shipped_lineitems ON (o_orderkey = l_orderkey);
count
@ -179,7 +179,7 @@ SELECT o_orderkey, l_linenumber FROM priority_orders left join air_shipped_linei
-- it passes planning, fails at execution stage
SET client_min_messages TO DEBUG1;
SELECT * FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey) ORDER BY o_orderkey DESC, o_custkey DESC, o_orderpriority DESC LIMIT 5;
DEBUG: generating subplan XXX_1 for subquery SELECT lineitem_hash_part.l_orderkey, lineitem_hash_part.l_partkey, lineitem_hash_part.l_suppkey, lineitem_hash_part.l_linenumber, lineitem_hash_part.l_quantity, lineitem_hash_part.l_extendedprice, lineitem_hash_part.l_discount, lineitem_hash_part.l_tax, lineitem_hash_part.l_returnflag, lineitem_hash_part.l_linestatus, lineitem_hash_part.l_shipdate, lineitem_hash_part.l_commitdate, lineitem_hash_part.l_receiptdate, lineitem_hash_part.l_shipinstruct, lineitem_hash_part.l_shipmode, lineitem_hash_part.l_comment FROM public.lineitem_hash_part WHERE (lineitem_hash_part.l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar)
DEBUG: generating subplan XXX_1 for subquery SELECT l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment FROM public.lineitem_hash_part table_name_for_view WHERE (l_shipmode OPERATOR(pg_catalog.=) 'AIR'::bpchar)
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT priority_orders.o_orderkey, priority_orders.o_custkey, priority_orders.o_orderstatus, priority_orders.o_totalprice, priority_orders.o_orderdate, priority_orders.o_orderpriority, priority_orders.o_clerk, priority_orders.o_shippriority, priority_orders.o_comment, air_shipped_lineitems.l_orderkey, air_shipped_lineitems.l_partkey, air_shipped_lineitems.l_suppkey, air_shipped_lineitems.l_linenumber, air_shipped_lineitems.l_quantity, air_shipped_lineitems.l_extendedprice, air_shipped_lineitems.l_discount, air_shipped_lineitems.l_tax, air_shipped_lineitems.l_returnflag, air_shipped_lineitems.l_linestatus, air_shipped_lineitems.l_shipdate, air_shipped_lineitems.l_commitdate, air_shipped_lineitems.l_receiptdate, air_shipped_lineitems.l_shipinstruct, air_shipped_lineitems.l_shipmode, air_shipped_lineitems.l_comment FROM ((SELECT orders_hash_part.o_orderkey, orders_hash_part.o_custkey, orders_hash_part.o_orderstatus, orders_hash_part.o_totalprice, orders_hash_part.o_orderdate, orders_hash_part.o_orderpriority, orders_hash_part.o_clerk, orders_hash_part.o_shippriority, orders_hash_part.o_comment FROM public.orders_hash_part WHERE (orders_hash_part.o_orderpriority OPERATOR(pg_catalog.<) '3-MEDIUM'::bpchar)) priority_orders JOIN (SELECT intermediate_result.l_orderkey, intermediate_result.l_partkey, intermediate_result.l_suppkey, intermediate_result.l_linenumber, intermediate_result.l_quantity, intermediate_result.l_extendedprice, intermediate_result.l_discount, intermediate_result.l_tax, intermediate_result.l_returnflag, intermediate_result.l_linestatus, intermediate_result.l_shipdate, intermediate_result.l_commitdate, intermediate_result.l_receiptdate, intermediate_result.l_shipinstruct, intermediate_result.l_shipmode, intermediate_result.l_comment FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint, l_partkey integer, l_suppkey integer, l_linenumber integer, l_quantity numeric(15,2), l_extendedprice numeric(15,2), l_discount numeric(15,2), l_tax numeric(15,2), l_returnflag character(1), l_linestatus character(1), l_shipdate date, l_commitdate date, l_receiptdate date, l_shipinstruct character(25), l_shipmode character(10), l_comment character varying(44))) air_shipped_lineitems ON ((priority_orders.o_custkey OPERATOR(pg_catalog.=) air_shipped_lineitems.l_suppkey))) ORDER BY priority_orders.o_orderkey DESC, priority_orders.o_custkey DESC, priority_orders.o_orderpriority DESC LIMIT 5
DEBUG: push down of limit count: 5
o_orderkey | o_custkey | o_orderstatus | o_totalprice | o_orderdate | o_orderpriority | o_clerk | o_shippriority | o_comment | l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment

View File

@ -1079,19 +1079,19 @@ SELECT create_distributed_table('table1','tenant_id');
(1 row)
CREATE VIEW table1_view AS SELECT * from table1 where id < 100;
CREATE VIEW table1_view AS SELECT * from table1 table_name_for_view where id < 100;
-- all of the above queries are non-colocated subquery joins
-- because the views are replaced with subqueries
UPDATE table2 SET id=20 FROM table1_view WHERE table1_view.id=table2.id;
DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_1 for subquery SELECT table1.id, table1.tenant_id FROM non_colocated_subquery.table1 WHERE (table1.id OPERATOR(pg_catalog.<) 100)
DEBUG: generating subplan XXX_1 for subquery SELECT id, tenant_id FROM non_colocated_subquery.table1 table_name_for_view WHERE (id OPERATOR(pg_catalog.<) 100)
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE non_colocated_subquery.table2 SET id = 20 FROM (SELECT intermediate_result.id, intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, tenant_id integer)) table1_view WHERE (table1_view.id OPERATOR(pg_catalog.=) table2.id)
DEBUG: Creating router plan
UPDATE table2_p1 SET id=20 FROM table1_view WHERE table1_view.id=table2_p1.id;
DEBUG: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
DEBUG: Router planner cannot handle multi-shard select queries
DEBUG: generating subplan XXX_1 for subquery SELECT table1.id, table1.tenant_id FROM non_colocated_subquery.table1 WHERE (table1.id OPERATOR(pg_catalog.<) 100)
DEBUG: generating subplan XXX_1 for subquery SELECT id, tenant_id FROM non_colocated_subquery.table1 table_name_for_view WHERE (id OPERATOR(pg_catalog.<) 100)
DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE non_colocated_subquery.table2_p1 SET id = 20 FROM (SELECT intermediate_result.id, intermediate_result.tenant_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, tenant_id integer)) table1_view WHERE (table1_view.id OPERATOR(pg_catalog.=) table2_p1.id)
DEBUG: Creating router plan
RESET client_min_messages;

View File

@ -370,11 +370,13 @@ SELECT DISTINCT y FROM test;
(1 row)
-- non deterministic collations
SET client_min_messages TO WARNING;
CREATE COLLATION test_pg12.case_insensitive (
provider = icu,
locale = '@colStrength=secondary',
deterministic = false
);
RESET client_min_messages;
CREATE TABLE col_test (
id int,
val text collate case_insensitive

View File

@ -18,21 +18,21 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST) t1;
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
NOTICE: issuing VACUUM (FULL) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST true) t1;
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980000
NOTICE: issuing VACUUM (FULL) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST) pg14.t1_980001
NOTICE: issuing VACUUM (FULL) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, PROCESS_TOAST false) t1;
ERROR: PROCESS_TOAST required with VACUUM FULL
VACUUM (PROCESS_TOAST false) t1;
NOTICE: issuing VACUUM pg14.t1_980000
NOTICE: issuing VACUUM (PROCESS_TOAST FALSE) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM pg14.t1_980001
NOTICE: issuing VACUUM (PROCESS_TOAST FALSE) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP AUTO) t1;
NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980000
@ -62,14 +62,14 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP "AUTOX") t1;
ERROR: index_cleanup requires a Boolean value
VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1;
NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,PROCESS_TOAST,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980000
NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,PROCESS_TOAST,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980001
NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1;
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000
NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001
NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
CREATE TABLE local_vacuum_table(name text);

View File

@ -0,0 +1,70 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q
\endif
CREATE SCHEMA pg16;
SET search_path TO pg16;
SET citus.next_shard_id TO 950000;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
-- test the new vacuum and analyze options
-- Relevant PG commits:
-- https://github.com/postgres/postgres/commit/1cbbee03385763b066ae3961fc61f2cd01a0d0d7
-- https://github.com/postgres/postgres/commit/4211fbd8413b26e0abedbe4338aa7cda2cd469b4
-- https://github.com/postgres/postgres/commit/a46a7011b27188af526047a111969f257aaf4db8
CREATE TABLE t1 (a int);
SELECT create_distributed_table('t1','a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SET citus.log_remote_commands TO ON;
VACUUM (PROCESS_MAIN FALSE) t1;
NOTICE: issuing VACUUM (PROCESS_MAIN FALSE) pg16.t1_950000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (PROCESS_MAIN FALSE, PROCESS_TOAST FALSE) t1;
NOTICE: issuing VACUUM (PROCESS_TOAST FALSE,PROCESS_MAIN FALSE) pg16.t1_950000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (PROCESS_MAIN TRUE) t1;
NOTICE: issuing VACUUM pg16.t1_950000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (PROCESS_MAIN FALSE, FULL) t1;
NOTICE: issuing VACUUM (FULL,PROCESS_MAIN FALSE) pg16.t1_950000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (SKIP_DATABASE_STATS) t1;
NOTICE: issuing VACUUM (SKIP_DATABASE_STATS) pg16.t1_950000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (ONLY_DATABASE_STATS) t1;
ERROR: ONLY_DATABASE_STATS cannot be specified with a list of tables
VACUUM (BUFFER_USAGE_LIMIT '512 kB') t1;
NOTICE: issuing VACUUM (BUFFER_USAGE_LIMIT 512) pg16.t1_950000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (BUFFER_USAGE_LIMIT 0) t1;
NOTICE: issuing VACUUM (BUFFER_USAGE_LIMIT 0) pg16.t1_950000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (BUFFER_USAGE_LIMIT 16777220) t1;
ERROR: BUFFER_USAGE_LIMIT option must be 0 or between 128 kB and 16777216 kB
VACUUM (BUFFER_USAGE_LIMIT -1) t1;
ERROR: BUFFER_USAGE_LIMIT option must be 0 or between 128 kB and 16777216 kB
VACUUM (BUFFER_USAGE_LIMIT 'test') t1;
ERROR: BUFFER_USAGE_LIMIT option must be 0 or between 128 kB and 16777216 kB
ANALYZE (BUFFER_USAGE_LIMIT '512 kB') t1;
NOTICE: issuing ANALYZE (BUFFER_USAGE_LIMIT 512) pg16.t1_950000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
ANALYZE (BUFFER_USAGE_LIMIT 0) t1;
NOTICE: issuing ANALYZE (BUFFER_USAGE_LIMIT 0) pg16.t1_950000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
SET citus.log_remote_commands TO OFF;
-- only verifying it works and not printing log
-- remote commands because it can be flaky
VACUUM (ONLY_DATABASE_STATS);
\set VERBOSITY terse
SET client_min_messages TO ERROR;
DROP SCHEMA pg16 CASCADE;

View File

@ -0,0 +1,9 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q

View File

@ -1187,17 +1187,16 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c
-- same test using a view, can be recursively planned
CREATE VIEW my_view_1 AS
SELECT * FROM dist_1 t2 WHERE EXISTS (
SELECT * FROM dist_1 table_name_for_view WHERE EXISTS (
SELECT * FROM dist_1 t4
WHERE t4.a = t2.a
);
WHERE t4.a = table_name_for_view.a);
SELECT COUNT(*) FROM ref_1 t1
LEFT JOIN
my_view_1 t3
USING (a);
DEBUG: recursively planning right side of the left join since the outer side is a recurring rel
DEBUG: recursively planning the distributed subquery since it is part of a distributed join node that is outer joined with a recurring rel
DEBUG: generating subplan XXX_1 for subquery SELECT t2.a, t2.b FROM recurring_outer_join.dist_1 t2 WHERE (EXISTS (SELECT t4.a, t4.b FROM recurring_outer_join.dist_1 t4 WHERE (t4.a OPERATOR(pg_catalog.=) t2.a)))
DEBUG: generating subplan XXX_1 for subquery SELECT a, b FROM recurring_outer_join.dist_1 table_name_for_view WHERE (EXISTS (SELECT t4.a, t4.b FROM recurring_outer_join.dist_1 t4 WHERE (t4.a OPERATOR(pg_catalog.=) table_name_for_view.a)))
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (recurring_outer_join.ref_1 t1 LEFT JOIN (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) t3 USING (a))
count
---------------------------------------------------------------------

View File

@ -396,10 +396,10 @@ DROP VIEW numbers_v, local_table_v;
-- Joins between reference tables and materialized views are allowed to
-- be planned to be executed locally.
--
CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers WHERE a BETWEEN 1 AND 10;
NOTICE: executing the command locally: SELECT a FROM replicate_ref_to_coordinator.numbers_8000001 numbers WHERE ((a OPERATOR(pg_catalog.>=) 1) AND (a OPERATOR(pg_catalog.<=) 10))
CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers table_name_for_view WHERE a BETWEEN 1 AND 10;
NOTICE: executing the command locally: SELECT a FROM replicate_ref_to_coordinator.numbers_8000001 table_name_for_view WHERE ((a OPERATOR(pg_catalog.>=) 1) AND (a OPERATOR(pg_catalog.<=) 10))
REFRESH MATERIALIZED VIEW numbers_v;
NOTICE: executing the command locally: SELECT numbers.a FROM replicate_ref_to_coordinator.numbers_8000001 numbers WHERE ((numbers.a OPERATOR(pg_catalog.>=) 1) AND (numbers.a OPERATOR(pg_catalog.<=) 10))
NOTICE: executing the command locally: SELECT a FROM replicate_ref_to_coordinator.numbers_8000001 table_name_for_view WHERE ((a OPERATOR(pg_catalog.>=) 1) AND (a OPERATOR(pg_catalog.<=) 10))
SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a ORDER BY 1;
NOTICE: executing the command locally: SELECT squares.a, squares.b, numbers_v.a FROM (replicate_ref_to_coordinator.squares_8000000 squares JOIN replicate_ref_to_coordinator.numbers_v ON ((squares.a OPERATOR(pg_catalog.=) numbers_v.a))) ORDER BY squares.a
a | b | a

View File

@ -121,7 +121,7 @@ FROM pg_dist_partition WHERE logicalrelid = 'null_dist_key_table'::regclass;
SELECT column_name_to_column('null_dist_key_table', 'a');
column_name_to_column
---------------------------------------------------------------------
{VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
{VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
(1 row)
SELECT master_update_shard_statistics(shardid)

View File

@ -158,12 +158,12 @@ SELECT * FROM test_matview;
SELECT * FROM pg_dist_partition WHERE logicalrelid::text LIKE 'events%' ORDER BY logicalrelid::text;
logicalrelid | partmethod | partkey | colocationid | repmodel | autoconverted
---------------------------------------------------------------------
events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
events | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
events_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
events_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980000 | s | f
events_replicated | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
events_replicated_2021_feb | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
events_replicated_2021_jan | h | {VAR :varno 1 :varattno 1 :vartype 1184 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1} | 980001 | c | f
(6 rows)
SELECT count(*) > 0 FROM pg_dist_node;

View File

@ -126,43 +126,6 @@ DEBUG: push down of limit count: 3
4
(3 rows)
-- subquery in FROM -> FROM -> WHERE -> WHERE should be replaced if
-- it contains onle local tables
-- Later the upper level query is also recursively planned due to LIMIT
SELECT user_id, array_length(events_table, 1)
FROM (
SELECT user_id, array_agg(event ORDER BY time) AS events_table
FROM (
SELECT
u.user_id, e.event_type::text AS event, e.time
FROM
users_table AS u,
events_table AS e
WHERE u.user_id = e.user_id AND
u.user_id IN
(
SELECT
user_id
FROM
users_table
WHERE value_2 >= 5
AND EXISTS (SELECT user_id FROM events_table_local WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1)
AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id)
LIMIT 5
)
) t
GROUP BY user_id
) q
ORDER BY 2 DESC, 1;
DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM subquery_local_tables.events_table_local WHERE ((event_type OPERATOR(pg_catalog.>) 1) AND (event_type OPERATOR(pg_catalog.<=) 3) AND (value_3 OPERATOR(pg_catalog.>) (1)::double precision))
DEBUG: push down of limit count: 5
DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) AND (NOT (EXISTS (SELECT events_table.user_id FROM public.events_table WHERE ((events_table.event_type OPERATOR(pg_catalog.>) 3) AND (events_table.event_type OPERATOR(pg_catalog.<=) 4) AND (events_table.value_3 OPERATOR(pg_catalog.>) (1)::double precision) AND (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id)))))) LIMIT 5
DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, array_length(events_table, 1) AS array_length FROM (SELECT t.user_id, array_agg(t.event ORDER BY t."time") AS events_table FROM (SELECT u.user_id, (e.event_type)::text AS event, e."time" FROM public.users_table u, public.events_table e WHERE ((u.user_id OPERATOR(pg_catalog.=) e.user_id) AND (u.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) t GROUP BY t.user_id) q ORDER BY (array_length(events_table, 1)) DESC, user_id
user_id | array_length
---------------------------------------------------------------------
5 | 364
(1 row)
-- subquery (i.e., subquery_2) in WHERE->FROM should be replaced due to local tables
SELECT
user_id

View File

@ -304,18 +304,18 @@ SELECT create_distributed_table('view_table', 'a');
INSERT INTO view_table VALUES (1, 2, 3), (2, 4, 6), (3, 6, 9);
CREATE SCHEMA another_schema;
CREATE VIEW undis_view1 AS SELECT a, b FROM view_table;
CREATE VIEW undis_view2 AS SELECT a, c FROM view_table;
CREATE VIEW undis_view1 AS SELECT a, b FROM view_table table_name_for_view;
CREATE VIEW undis_view2 AS SELECT a, c FROM view_table table_name_for_view;
CREATE VIEW another_schema.undis_view3 AS SELECT b, c FROM undis_view1 JOIN undis_view2 ON undis_view1.a = undis_view2.a;
SELECT schemaname, viewname, viewowner, definition FROM pg_views WHERE viewname LIKE 'undis\_view%' ORDER BY viewname;
schemaname | viewname | viewowner | definition
---------------------------------------------------------------------
undistribute_table | undis_view1 | postgres | SELECT view_table.a, +
| | | view_table.b +
| | | FROM view_table;
undistribute_table | undis_view2 | postgres | SELECT view_table.a, +
| | | view_table.c +
| | | FROM view_table;
undistribute_table | undis_view1 | postgres | SELECT a, +
| | | b +
| | | FROM view_table table_name_for_view;
undistribute_table | undis_view2 | postgres | SELECT a, +
| | | c +
| | | FROM view_table table_name_for_view;
another_schema | undis_view3 | postgres | SELECT undis_view1.b, +
| | | undis_view2.c +
| | | FROM (undis_view1 +
@ -348,12 +348,12 @@ NOTICE: renaming the new table to undistribute_table.view_table
SELECT schemaname, viewname, viewowner, definition FROM pg_views WHERE viewname LIKE 'undis\_view%' ORDER BY viewname;
schemaname | viewname | viewowner | definition
---------------------------------------------------------------------
undistribute_table | undis_view1 | postgres | SELECT view_table.a, +
| | | view_table.b +
| | | FROM view_table;
undistribute_table | undis_view2 | postgres | SELECT view_table.a, +
| | | view_table.c +
| | | FROM view_table;
undistribute_table | undis_view1 | postgres | SELECT a, +
| | | b +
| | | FROM view_table table_name_for_view;
undistribute_table | undis_view2 | postgres | SELECT a, +
| | | c +
| | | FROM view_table table_name_for_view;
another_schema | undis_view3 | postgres | SELECT undis_view1.b, +
| | | undis_view2.c +
| | | FROM (undis_view1 +
@ -400,22 +400,6 @@ NOTICE: renaming the new table to undistribute_table.dist_type_table
(1 row)
-- test CREATE RULE with ON SELECT
CREATE TABLE rule_table_1 (a INT);
CREATE TABLE rule_table_2 (a INT);
SELECT create_distributed_table('rule_table_2', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE RULE "_RETURN" AS ON SELECT TO rule_table_1 DO INSTEAD SELECT * FROM rule_table_2;
-- the CREATE RULE turns rule_table_1 into a view
ALTER EXTENSION plpgsql ADD VIEW rule_table_1;
NOTICE: Citus does not propagate adding/dropping member objects
HINT: You can add/drop the member objects on the workers as well.
SELECT undistribute_table('rule_table_2');
ERROR: cannot alter table because an extension depends on it
-- test CREATE RULE without ON SELECT
CREATE TABLE rule_table_3 (a INT);
CREATE TABLE rule_table_4 (a INT);
@ -444,9 +428,6 @@ NOTICE: renaming the new table to undistribute_table.rule_table_4
ALTER EXTENSION plpgsql DROP VIEW extension_view;
NOTICE: Citus does not propagate adding/dropping member objects
HINT: You can add/drop the member objects on the workers as well.
ALTER EXTENSION plpgsql DROP VIEW rule_table_1;
NOTICE: Citus does not propagate adding/dropping member objects
HINT: You can add/drop the member objects on the workers as well.
ALTER EXTENSION plpgsql DROP TABLE rule_table_3;
NOTICE: Citus does not propagate adding/dropping member objects
HINT: You can add/drop the member objects on the workers as well.
@ -456,11 +437,9 @@ DETAIL: drop cascades to view undis_view1
drop cascades to view undis_view2
drop cascades to view another_schema.undis_view3
DROP SCHEMA undistribute_table, another_schema CASCADE;
NOTICE: drop cascades to 7 other objects
NOTICE: drop cascades to 5 other objects
DETAIL: drop cascades to table extension_table
drop cascades to view extension_view
drop cascades to table dist_type_table
drop cascades to table rule_table_2
drop cascades to view rule_table_1
drop cascades to table rule_table_3
drop cascades to table rule_table_4

View File

@ -400,3 +400,12 @@ SELECT * FROM t_range ORDER BY id;
(9 rows)
ROLLBACK;
-- There is a difference in partkey Var representation between PG16 and older versions
-- Sanity check here that we can properly do column_to_column_name
SELECT column_to_column_name(logicalrelid, partkey)
FROM pg_dist_partition WHERE partkey IS NOT NULL ORDER BY 1 LIMIT 1;
column_to_column_name
---------------------------------------------------------------------
a
(1 row)

View File

@ -65,3 +65,12 @@ UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '3' WHERE shardid
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '7' WHERE shardid = :shardid2;
\copy t_range FROM STDIN with (DELIMITER ',')
\copy t_range FROM STDIN with (DELIMITER ',')
-- There is a difference in partkey Var representation between PG16 and older versions
-- Sanity check here that we can properly do column_to_column_name
SELECT column_to_column_name(logicalrelid, partkey)
FROM pg_dist_partition WHERE partkey IS NOT NULL ORDER BY 1 LIMIT 1;
column_to_column_name
---------------------------------------------------------------------
a
(1 row)

View File

@ -316,13 +316,13 @@ UNION ALL
employees e
INNER JOIN reporting_line rl ON e.manager_id = rl.employee_id;
-- Aliases are supported
CREATE VIEW aliased_opt_prop_view(alias_1, alias_2) AS SELECT * FROM view_table_6;
CREATE VIEW aliased_opt_prop_view(alias_1, alias_2) AS SELECT * FROM view_table_6 table_name_for_view;
-- View options are supported
CREATE VIEW opt_prop_view
WITH(check_option=CASCADED, security_barrier=true)
AS SELECT * FROM view_table_6;
AS SELECT * FROM view_table_6 table_name_for_view;
CREATE VIEW sep_opt_prop_view
AS SELECT * FROM view_table_6
AS SELECT * FROM view_table_6 table_name_for_view
WITH LOCAL CHECK OPTION;
SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as obj_identifier from pg_catalog.pg_dist_object) as obj_identifiers where obj_identifier::text like '%opt_prop_view%' ORDER BY 1;
obj_identifier
@ -337,25 +337,25 @@ SELECT * FROM (SELECT pg_identify_object_as_address(classid, objid, objsubid) as
SELECT definition FROM pg_views WHERE viewname = 'aliased_opt_prop_view';
definition
---------------------------------------------------------------------
SELECT view_table_6.id AS alias_1, +
view_table_6.val_1 AS alias_2 +
FROM view_prop_schema.view_table_6;
SELECT id AS alias_1, +
val_1 AS alias_2 +
FROM view_prop_schema.view_table_6 table_name_for_view;
(1 row)
SELECT definition FROM pg_views WHERE viewname = 'opt_prop_view';
definition
---------------------------------------------------------------------
SELECT view_table_6.id, +
view_table_6.val_1 +
FROM view_prop_schema.view_table_6;
SELECT id, +
val_1 +
FROM view_prop_schema.view_table_6 table_name_for_view;
(1 row)
SELECT definition FROM pg_views WHERE viewname = 'sep_opt_prop_view';
definition
---------------------------------------------------------------------
SELECT view_table_6.id, +
view_table_6.val_1 +
FROM view_prop_schema.view_table_6;
SELECT id, +
val_1 +
FROM view_prop_schema.view_table_6 table_name_for_view;
(1 row)
SELECT relname, reloptions
@ -444,7 +444,7 @@ SELECT create_distributed_table('alter_view_table','id');
(1 row)
CREATE VIEW alter_view_1 AS SELECT * FROM alter_view_table;
CREATE VIEW alter_view_1 AS SELECT * FROM alter_view_table table_name_for_view;
-- Set/drop default value is not supported by Citus
ALTER VIEW alter_view_1 ALTER COLUMN val1 SET DEFAULT random()::text;
ERROR: Citus doesn't support setting or resetting default values for a column of view
@ -467,9 +467,9 @@ ALTER TABLE alter_view_1 SET (check_option=cascaded, security_barrier = true);
SELECT definition FROM pg_views WHERE viewname = 'alter_view_1';
definition
---------------------------------------------------------------------
SELECT alter_view_table.id,+
alter_view_table.val1 +
FROM alter_view_table;
SELECT id, +
val1 +
FROM alter_view_table table_name_for_view;
(1 row)
SELECT relname, reloptions
@ -484,9 +484,9 @@ WHERE oid = 'view_prop_schema.alter_view_1'::regclass::oid;
SELECT definition FROM pg_views WHERE viewname = 'alter_view_1';
definition
---------------------------------------------------------------------
SELECT alter_view_table.id, +
alter_view_table.val1 +
FROM view_prop_schema.alter_view_table;
SELECT id, +
val1 +
FROM view_prop_schema.alter_view_table table_name_for_view;
(1 row)
SELECT relname, reloptions

View File

@ -65,6 +65,7 @@ test: pg13 pg12
test: pg14
test: pg15
test: pg15_jsonpath detect_conn_close
test: pg16
test: drop_column_partitioned_table
test: tableam

View File

@ -473,7 +473,7 @@ select run_command_on_workers($$SELECT count(*)=0 from citus_local_tables_mx.v10
select run_command_on_workers($$SELECT count(*)=0 from citus_local_tables_mx.v102$$);
CREATE TABLE loc_tb_2 (a int);
CREATE VIEW v104 AS SELECT * from loc_tb_2;
CREATE VIEW v104 AS SELECT * from loc_tb_2 table_name_for_view;
SET client_min_messages TO DEBUG1;
-- verify the CREATE command for the view is generated correctly

View File

@ -1,6 +1,10 @@
--
-- Test chunk filtering in columnar using min/max values in stripe skip lists.
--
-- It has an alternative test output file
-- because PG16 changed the order of some Filters in EXPLAIN
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/2489d76c4906f4461a364ca8ad7e0751ead8aa0d
--

View File

@ -77,10 +77,10 @@ SELECT CASE WHEN 1.0 * TopMemoryContext / :top_post BETWEEN 0.98 AND 1.03 THEN 1
FROM columnar_test_helpers.columnar_store_memory_stats();
-- before this change, max mem usage while executing inserts was 28MB and
-- with this change it's less than 8MB.
-- with this change it's less than 9MB.
SELECT
(SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
(SELECT max(memusage) < 8 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
(SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='large batch') AS large_batch_ok,
(SELECT max(memusage) < 9 * 1024 * 1024 FROM t WHERE tag='first batch') AS first_batch_ok;
\x

View File

@ -193,7 +193,7 @@ WHERE w2.a = 123;
EXPLAIN (COSTS OFF) SELECT sub_1.b, sub_2.a, sub_3.avg
FROM
(SELECT b FROM full_correlated WHERE (a > 2) GROUP BY b HAVING count(DISTINCT a) > 0 ORDER BY 1 DESC LIMIT 5) AS sub_1,
(SELECT b FROM full_correlated WHERE (a > 2) GROUP BY b ORDER BY 1 DESC LIMIT 5) AS sub_1,
(SELECT a FROM full_correlated WHERE (a > 10) GROUP BY a HAVING count(DISTINCT a) >= 1 ORDER BY 1 DESC LIMIT 3) AS sub_2,
(SELECT avg(a) AS AVG FROM full_correlated WHERE (a > 2) GROUP BY a HAVING sum(a) > 10 ORDER BY (sum(d) - avg(a) - COALESCE(array_upper(ARRAY[max(a)],1) * 5, 0)) DESC LIMIT 3) AS sub_3
WHERE sub_2.a < sub_1.b::integer

View File

@ -117,7 +117,7 @@ GRANT non_dist_role_4 TO dist_role_4;
SELECT 1 FROM master_add_node('localhost', :worker_2_port);
SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2;
SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1;
\c - - - :worker_1_port

View File

@ -47,9 +47,12 @@ RESET client_min_messages;
SELECT pg_typeof(:maintenance_daemon_gpid);
\set VERBOSITY terse
SELECT pg_cancel_backend(:maintenance_daemon_gpid);
SELECT pg_terminate_backend(:maintenance_daemon_gpid);
\set VERBOSITY default
-- we can cancel our own backend
SELECT pg_cancel_backend(citus_backend_gpid());

View File

@ -611,7 +611,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
GROUP BY c1, c2, c3, c4, c5, c6
GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,
@ -625,7 +625,7 @@ SELECT c1, c2, c3, c4, -1::float AS c5,
sum(cardinality),
sum(sum)
FROM source_table
GROUP BY c1, c2, c3, c4, c5, c6
GROUP BY c1, c2, c3, c4, c6
ON CONFLICT(c1, c2, c3, c4, c5, c6)
DO UPDATE SET
cardinality = enriched.cardinality + excluded.cardinality,

View File

@ -78,14 +78,13 @@ SELECT count(*) FROM distributed JOIN unlogged_local USING (id);
CREATE MATERIALIZED VIEW mat_view AS SELECT * FROM local;
SELECT count(*) FROM distributed JOIN mat_view USING (id);
CREATE VIEW local_regular_view AS SELECT * FROM local;
CREATE VIEW local_regular_view AS SELECT * FROM local table_name_for_view;
CREATE VIEW dist_regular_view AS SELECT * FROM distributed;
SELECT count(*) FROM distributed JOIN local_regular_view USING (id);
SELECT count(*) FROM local JOIN dist_regular_view USING (id);
SELECT count(*) FROM dist_regular_view JOIN local_regular_view USING (id);
-- join alias/table alias
SELECT COUNT(*) FROM (distributed JOIN local USING (id)) AS t(a,b,c,d) ORDER BY d,c,a,b LIMIT 3;
SELECT COUNT(*) FROM (distributed d1(x,y,y1) JOIN local l1(x,t) USING (x)) AS t(a,b,c,d) ORDER BY d,c,a,b LIMIT 3;

View File

@ -362,9 +362,6 @@ select typdefault from (
select a from tbl
where typdefault > 'a'
limit 1) as subq_0
where (
select true as bool from pg_catalog.pg_am limit 1
)
) as subq_1
) as subq_2;
@ -379,9 +376,6 @@ select typdefault from (
select a from tbl
where typdefault > 'a'
limit 1) as subq_0
where (
select true as bool from pg_catalog.pg_am limit 1
)
) as subq_1
) as subq_2;

View File

@ -798,8 +798,19 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;
SET application_name to 'citus_internal gpid=10000000001';
-- with an ugly trick, update the vartype of table from int to bigint
-- so that making two tables colocated fails
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}'
-- include varnullingrels for PG16
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
\else
UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}'
WHERE logicalrelid = 'test_2'::regclass;
\endif
SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251);
ROLLBACK;

View File

@ -1,7 +1,13 @@
--
-- COMPLEX_COUNT_DISTINCT
--
-- This test file has an alternative output because of the following in PG16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
SET citus.next_shard_id TO 240000;
SET citus.shard_count TO 8;

View File

@ -1,6 +1,13 @@
--
-- MULTI_EXPLAIN
--
-- This test file has an alternative output because of the following in PG16:
-- https://github.com/postgres/postgres/commit/1349d2790bf48a4de072931c722f39337e72055e
-- https://github.com/postgres/postgres/commit/f4c7c410ee4a7baa06f51ebb8d5333c169691dd3
-- The alternative output can be deleted when we drop support for PG15
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16;
SET citus.next_shard_id TO 570000;

View File

@ -458,7 +458,7 @@ DELETE FROM pg_dist_shard WHERE shardid = 1;
CREATE TABLE e_transactions(order_id varchar(255) NULL, transaction_id int) PARTITION BY LIST(transaction_id);
CREATE TABLE orders_2020_07_01
PARTITION OF e_transactions FOR VALUES IN (1,2,3);
INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's');
INSERT INTO pg_dist_partition VALUES ('e_transactions'::regclass,'h', '{VAR :varno 1 :varattno 1 :vartype 1043 :vartypmod 259 :varcollid 100 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}', 7, 's');
SELECT
(metadata->>'partitioned_citus_table_exists_pre_11')::boolean as partitioned_citus_table_exists_pre_11,

View File

@ -336,12 +336,14 @@ FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
WHERE o_orderkey IN (1, 2)
OR l_orderkey IN (2, 3);
SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS OFF)
SELECT count(*)
FROM orders_hash_partitioned
FULL OUTER JOIN lineitem_hash_partitioned ON (o_orderkey = l_orderkey)
WHERE o_orderkey IN (1, 2)
AND l_orderkey IN (2, 3);
$Q$);
SET citus.task_executor_type TO DEFAULT;

View File

@ -43,7 +43,7 @@ EXPLAIN (COSTS FALSE)
SELECT sum(l_extendedprice * l_discount) as revenue
FROM lineitem_hash, orders_hash
WHERE o_orderkey = l_orderkey
GROUP BY l_orderkey, o_orderkey, l_shipmode HAVING sum(l_quantity) > 24
GROUP BY l_orderkey, l_shipmode HAVING sum(l_quantity) > 24
ORDER BY 1 DESC LIMIT 3;
EXPLAIN (COSTS FALSE)

View File

@ -226,14 +226,32 @@ RESET citus.enable_metadata_sync;
-- the shards and indexes do not show up
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- PG16 added one more backend type B_STANDALONE_BACKEND
-- and also alphabetized the backend types, hence the orders changed
-- Relevant PG commit:
-- https://github.com/postgres/postgres/commit/0c679464a837079acc75ff1d45eaa83f79e05690
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
SELECT 4 AS client_backend \gset
SELECT 5 AS bgworker \gset
SELECT 12 AS walsender \gset
\else
SELECT 3 AS client_backend \gset
SELECT 4 AS bgworker \gset
SELECT 9 AS walsender \gset
\endif
-- say, we set it to bgworker
-- the shards and indexes do not show up
SELECT set_backend_type(4);
SELECT set_backend_type(:bgworker);
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- or, we set it to walsender
-- the shards and indexes do not show up
SELECT set_backend_type(9);
SELECT set_backend_type(:walsender);
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;
-- unless the application name starts with citus_shard
@ -242,7 +260,7 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name
RESET application_name;
-- but, client backends to see the shards
SELECT set_backend_type(3);
SELECT set_backend_type(:client_backend);
SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname;

View File

@ -177,7 +177,7 @@ ORDER BY 2, AVG(ut.value_1), 1 DESC
LIMIT 2;
EXPLAIN (COSTS OFF)
SELECT ut.user_id, count(DISTINCT ut.value_2)
SELECT ut.user_id, avg(ut.value_2)
FROM users_table ut, events_table et
WHERE ut.user_id = et.user_id and et.value_2 < 5
GROUP BY ut.user_id

View File

@ -303,7 +303,7 @@ SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
EXPLAIN (COSTS FALSE)
SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
FROM lineitem_hash_part
GROUP BY l_orderkey
GROUP BY l_orderkey, l_partkey, l_shipmode
ORDER BY 1,2;
-- check the plan if the hash aggreate is disabled. We expect to see sort + unique
@ -312,7 +312,7 @@ SET enable_hashagg TO off;
EXPLAIN (COSTS FALSE)
SELECT DISTINCT count(DISTINCT l_partkey), count(DISTINCT l_shipmode)
FROM lineitem_hash_part
GROUP BY l_orderkey
GROUP BY l_orderkey, l_partkey, l_shipmode
ORDER BY 1,2;
SET enable_hashagg TO on;

View File

@ -676,7 +676,7 @@ EXPLAIN (COSTS OFF)
SELECT count(*) FROM keyval1 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 GROUP BY key ORDER BY 1 DESC LIMIT 1);
EXPLAIN (COSTS OFF)
SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 GROUP BY key HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 GROUP BY key ORDER BY 1 DESC LIMIT 1);
SELECT count(*) FROM keyval1 k1 WHERE k1.key = 2 HAVING sum(value) > (SELECT sum(value) FROM keyval2 k2 WHERE k2.key = 2 ORDER BY 1 DESC LIMIT 1);
-- Simple join subquery pushdown
SELECT

View File

@ -132,7 +132,7 @@ SELECT
FROM
users_table RIGHT JOIN users_reference_table USING (user_id)
WHERE
users_table.value_2 IN
users_reference_table.value_2 IN
(SELECT
value_2
FROM

View File

@ -37,7 +37,7 @@ CREATE VIEW priority_lineitem AS SELECT li.* FROM lineitem_hash_part li JOIN pri
SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1 LIMIT 5;
CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR';
CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part table_name_for_view WHERE l_shipmode = 'AIR';
-- join between view and table
SELECT count(*) FROM orders_hash_part join air_shipped_lineitems ON (o_orderkey = l_orderkey);

View File

@ -792,7 +792,7 @@ SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('table2','tenant_id');
SELECT create_distributed_table('table1','tenant_id');
CREATE VIEW table1_view AS SELECT * from table1 where id < 100;
CREATE VIEW table1_view AS SELECT * from table1 table_name_for_view where id < 100;
-- all of the above queries are non-colocated subquery joins
-- because the views are replaced with subqueries

View File

@ -242,11 +242,13 @@ COMMIT;
SELECT DISTINCT y FROM test;
-- non deterministic collations
SET client_min_messages TO WARNING;
CREATE COLLATION test_pg12.case_insensitive (
provider = icu,
locale = '@colStrength=secondary',
deterministic = false
);
RESET client_min_messages;
CREATE TABLE col_test (
id int,

View File

@ -0,0 +1,50 @@
--
-- PG16
--
SHOW server_version \gset
SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16
\gset
\if :server_version_ge_16
\else
\q
\endif
CREATE SCHEMA pg16;
SET search_path TO pg16;
SET citus.next_shard_id TO 950000;
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
-- test the new vacuum and analyze options
-- Relevant PG commits:
-- https://github.com/postgres/postgres/commit/1cbbee03385763b066ae3961fc61f2cd01a0d0d7
-- https://github.com/postgres/postgres/commit/4211fbd8413b26e0abedbe4338aa7cda2cd469b4
-- https://github.com/postgres/postgres/commit/a46a7011b27188af526047a111969f257aaf4db8
CREATE TABLE t1 (a int);
SELECT create_distributed_table('t1','a');
SET citus.log_remote_commands TO ON;
VACUUM (PROCESS_MAIN FALSE) t1;
VACUUM (PROCESS_MAIN FALSE, PROCESS_TOAST FALSE) t1;
VACUUM (PROCESS_MAIN TRUE) t1;
VACUUM (PROCESS_MAIN FALSE, FULL) t1;
VACUUM (SKIP_DATABASE_STATS) t1;
VACUUM (ONLY_DATABASE_STATS) t1;
VACUUM (BUFFER_USAGE_LIMIT '512 kB') t1;
VACUUM (BUFFER_USAGE_LIMIT 0) t1;
VACUUM (BUFFER_USAGE_LIMIT 16777220) t1;
VACUUM (BUFFER_USAGE_LIMIT -1) t1;
VACUUM (BUFFER_USAGE_LIMIT 'test') t1;
ANALYZE (BUFFER_USAGE_LIMIT '512 kB') t1;
ANALYZE (BUFFER_USAGE_LIMIT 0) t1;
SET citus.log_remote_commands TO OFF;
-- only verifying it works and not printing log
-- remote commands because it can be flaky
VACUUM (ONLY_DATABASE_STATS);
\set VERBOSITY terse
SET client_min_messages TO ERROR;
DROP SCHEMA pg16 CASCADE;

View File

@ -612,10 +612,9 @@ USING (a);
-- same test using a view, can be recursively planned
CREATE VIEW my_view_1 AS
SELECT * FROM dist_1 t2 WHERE EXISTS (
SELECT * FROM dist_1 table_name_for_view WHERE EXISTS (
SELECT * FROM dist_1 t4
WHERE t4.a = t2.a
);
WHERE t4.a = table_name_for_view.a);
SELECT COUNT(*) FROM ref_1 t1
LEFT JOIN

View File

@ -192,7 +192,7 @@ DROP VIEW numbers_v, local_table_v;
-- Joins between reference tables and materialized views are allowed to
-- be planned to be executed locally.
--
CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers WHERE a BETWEEN 1 AND 10;
CREATE MATERIALIZED VIEW numbers_v AS SELECT * FROM numbers table_name_for_view WHERE a BETWEEN 1 AND 10;
REFRESH MATERIALIZED VIEW numbers_v;
SELECT * FROM squares JOIN numbers_v ON squares.a = numbers_v.a ORDER BY 1;

View File

@ -95,37 +95,6 @@ FROM
LIMIT 3;
-- subquery in FROM -> FROM -> WHERE -> WHERE should be replaced if
-- it contains onle local tables
-- Later the upper level query is also recursively planned due to LIMIT
SELECT user_id, array_length(events_table, 1)
FROM (
SELECT user_id, array_agg(event ORDER BY time) AS events_table
FROM (
SELECT
u.user_id, e.event_type::text AS event, e.time
FROM
users_table AS u,
events_table AS e
WHERE u.user_id = e.user_id AND
u.user_id IN
(
SELECT
user_id
FROM
users_table
WHERE value_2 >= 5
AND EXISTS (SELECT user_id FROM events_table_local WHERE event_type > 1 AND event_type <= 3 AND value_3 > 1)
AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 3 AND event_type <= 4 AND value_3 > 1 AND user_id = users_table.user_id)
LIMIT 5
)
) t
GROUP BY user_id
) q
ORDER BY 2 DESC, 1;
-- subquery (i.e., subquery_2) in WHERE->FROM should be replaced due to local tables
SELECT
user_id

View File

@ -105,8 +105,8 @@ INSERT INTO view_table VALUES (1, 2, 3), (2, 4, 6), (3, 6, 9);
CREATE SCHEMA another_schema;
CREATE VIEW undis_view1 AS SELECT a, b FROM view_table;
CREATE VIEW undis_view2 AS SELECT a, c FROM view_table;
CREATE VIEW undis_view1 AS SELECT a, b FROM view_table table_name_for_view;
CREATE VIEW undis_view2 AS SELECT a, c FROM view_table table_name_for_view;
CREATE VIEW another_schema.undis_view3 AS SELECT b, c FROM undis_view1 JOIN undis_view2 ON undis_view1.a = undis_view2.a;
SELECT schemaname, viewname, viewowner, definition FROM pg_views WHERE viewname LIKE 'undis\_view%' ORDER BY viewname;
@ -131,18 +131,6 @@ SELECT create_distributed_table('dist_type_table', 'a');
SELECT undistribute_table('dist_type_table');
-- test CREATE RULE with ON SELECT
CREATE TABLE rule_table_1 (a INT);
CREATE TABLE rule_table_2 (a INT);
SELECT create_distributed_table('rule_table_2', 'a');
CREATE RULE "_RETURN" AS ON SELECT TO rule_table_1 DO INSTEAD SELECT * FROM rule_table_2;
-- the CREATE RULE turns rule_table_1 into a view
ALTER EXTENSION plpgsql ADD VIEW rule_table_1;
SELECT undistribute_table('rule_table_2');
-- test CREATE RULE without ON SELECT
CREATE TABLE rule_table_3 (a INT);
CREATE TABLE rule_table_4 (a INT);
@ -155,7 +143,6 @@ ALTER EXTENSION plpgsql ADD TABLE rule_table_3;
SELECT undistribute_table('rule_table_4');
ALTER EXTENSION plpgsql DROP VIEW extension_view;
ALTER EXTENSION plpgsql DROP VIEW rule_table_1;
ALTER EXTENSION plpgsql DROP TABLE rule_table_3;
DROP TABLE view_table CASCADE;

View File

@ -143,3 +143,8 @@ SELECT * FROM t_range ORDER BY id;
ROLLBACK;
-- There is a difference in partkey Var representation between PG16 and older versions
-- Sanity check here that we can properly do column_to_column_name
SELECT column_to_column_name(logicalrelid, partkey)
FROM pg_dist_partition WHERE partkey IS NOT NULL ORDER BY 1 LIMIT 1;

Some files were not shown because too many files have changed in this diff Show More