mirror of https://github.com/citusdata/citus.git
Don't segfault on queries using GROUPING
GROUPING will always return 0 outside of GROUPING SETS, CUBE, or ROLLUP Since we don't support those, it makes sense to reject GROUPING in queriespull/3653/head
parent
0ad1956551
commit
917cb6ae93
|
@ -225,13 +225,12 @@ HasNonPartitionColumnDistinctAgg(List *targetEntryList, Node *havingQual,
|
||||||
ListCell *varCell = NULL;
|
ListCell *varCell = NULL;
|
||||||
bool isPartitionColumn = false;
|
bool isPartitionColumn = false;
|
||||||
|
|
||||||
if (IsA(targetNode, Var))
|
if (!IsA(targetNode, Aggref))
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
Assert(IsA(targetNode, Aggref));
|
Aggref *targetAgg = castNode(Aggref, targetNode);
|
||||||
Aggref *targetAgg = (Aggref *) targetNode;
|
|
||||||
if (targetAgg->aggdistinct == NIL)
|
if (targetAgg->aggdistinct == NIL)
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -82,6 +82,7 @@ static bool IsReadIntermediateResultFunction(Node *node);
|
||||||
static bool IsReadIntermediateResultArrayFunction(Node *node);
|
static bool IsReadIntermediateResultArrayFunction(Node *node);
|
||||||
static bool IsCitusExtraDataContainerFunc(Node *node);
|
static bool IsCitusExtraDataContainerFunc(Node *node);
|
||||||
static bool IsFunctionWithOid(Node *node, Oid funcOid);
|
static bool IsFunctionWithOid(Node *node, Oid funcOid);
|
||||||
|
static bool IsGroupingFunc(Node *node);
|
||||||
static bool ExtractFromExpressionWalker(Node *node,
|
static bool ExtractFromExpressionWalker(Node *node,
|
||||||
QualifierWalkerContext *walkerContext);
|
QualifierWalkerContext *walkerContext);
|
||||||
static List * MultiTableNodeList(List *tableEntryList, List *rangeTableList);
|
static List * MultiTableNodeList(List *tableEntryList, List *rangeTableList);
|
||||||
|
@ -884,6 +885,16 @@ IsFunctionWithOid(Node *node, Oid funcOid)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IsGroupingFunc returns whether node is a GroupingFunc.
|
||||||
|
*/
|
||||||
|
static bool
|
||||||
|
IsGroupingFunc(Node *node)
|
||||||
|
{
|
||||||
|
return IsA(node, GroupingFunc);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FindIntermediateResultIdIfExists extracts the id of the intermediate result
|
* FindIntermediateResultIdIfExists extracts the id of the intermediate result
|
||||||
* if the given RTE contains a read_intermediate_results function, NULL otherwise
|
* if the given RTE contains a read_intermediate_results function, NULL otherwise
|
||||||
|
@ -978,6 +989,13 @@ DeferErrorIfQueryNotSupported(Query *queryTree)
|
||||||
errorHint = filterHint;
|
errorHint = filterHint;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (FindNodeCheck((Node *) queryTree, IsGroupingFunc))
|
||||||
|
{
|
||||||
|
preconditionsSatisfied = false;
|
||||||
|
errorMessage = "could not run distributed query with GROUPING";
|
||||||
|
errorHint = filterHint;
|
||||||
|
}
|
||||||
|
|
||||||
bool hasTablesample = HasTablesample(queryTree);
|
bool hasTablesample = HasTablesample(queryTree);
|
||||||
if (hasTablesample)
|
if (hasTablesample)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1017,7 +1017,7 @@ AddAnyValueAggregates(Node *node, void *context)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (IsA(node, Aggref))
|
if (IsA(node, Aggref) || IsA(node, GroupingFunc))
|
||||||
{
|
{
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
|
|
|
@ -432,6 +432,21 @@ select key, count(distinct aggdata)
|
||||||
from aggdata group by key order by 1, 2;
|
from aggdata group by key order by 1, 2;
|
||||||
ERROR: type "aggregate_support.aggdata" does not exist
|
ERROR: type "aggregate_support.aggdata" does not exist
|
||||||
CONTEXT: while executing command on localhost:xxxxx
|
CONTEXT: while executing command on localhost:xxxxx
|
||||||
|
-- GROUPING parses to GroupingFunc, distinct from Aggref
|
||||||
|
-- These three queries represent edge cases implementation would have to consider
|
||||||
|
-- For now we error out of all three
|
||||||
|
select grouping(id)
|
||||||
|
from aggdata group by id order by 1 limit 3;
|
||||||
|
ERROR: could not run distributed query with GROUPING
|
||||||
|
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||||
|
select key, grouping(val)
|
||||||
|
from aggdata group by key, val order by 1, 2;
|
||||||
|
ERROR: could not run distributed query with GROUPING
|
||||||
|
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||||
|
select key, grouping(val), sum(distinct valf)
|
||||||
|
from aggdata group by key, val order by 1, 2;
|
||||||
|
ERROR: could not run distributed query with GROUPING
|
||||||
|
HINT: Consider using an equality filter on the distributed table's partition column.
|
||||||
-- Test https://github.com/citusdata/citus/issues/3328
|
-- Test https://github.com/citusdata/citus/issues/3328
|
||||||
create table nulltable(id int);
|
create table nulltable(id int);
|
||||||
insert into nulltable values (0);
|
insert into nulltable values (0);
|
||||||
|
|
|
@ -204,6 +204,19 @@ RESET citus.task_executor_type;
|
||||||
select key, count(distinct aggdata)
|
select key, count(distinct aggdata)
|
||||||
from aggdata group by key order by 1, 2;
|
from aggdata group by key order by 1, 2;
|
||||||
|
|
||||||
|
-- GROUPING parses to GroupingFunc, distinct from Aggref
|
||||||
|
-- These three queries represent edge cases implementation would have to consider
|
||||||
|
-- For now we error out of all three
|
||||||
|
select grouping(id)
|
||||||
|
from aggdata group by id order by 1 limit 3;
|
||||||
|
|
||||||
|
select key, grouping(val)
|
||||||
|
from aggdata group by key, val order by 1, 2;
|
||||||
|
|
||||||
|
select key, grouping(val), sum(distinct valf)
|
||||||
|
from aggdata group by key, val order by 1, 2;
|
||||||
|
|
||||||
|
|
||||||
-- Test https://github.com/citusdata/citus/issues/3328
|
-- Test https://github.com/citusdata/citus/issues/3328
|
||||||
create table nulltable(id int);
|
create table nulltable(id int);
|
||||||
insert into nulltable values (0);
|
insert into nulltable values (0);
|
||||||
|
|
Loading…
Reference in New Issue