mirror of https://github.com/citusdata/citus.git
make sure to correctly decrement ExecutorLevel (#3311)
DESCRIPTION: Fix counter that keeps track of internal depth in executor While reviewing #3302 I ran into the `ExecutorLevel` variable which used a variable to keep the original value to restore on successful exit. I haven't explored the full space and if it is possible to get into an inconsistent state. However using `PG_TRY`/`PG_CATCH` seems generally more correct. Given very bad things will happen if this level is not reset, I kept the failsafe of setting the variiable back to 0 on the `XactCallback` but I did add an assert to treat it as a developer bug.release-9.1
parent
9f8e34f874
commit
b4facca9c3
|
@ -122,64 +122,70 @@ CitusExecutorRun(QueryDesc *queryDesc,
|
||||||
ScanDirection direction, uint64 count, bool execute_once)
|
ScanDirection direction, uint64 count, bool execute_once)
|
||||||
{
|
{
|
||||||
DestReceiver *dest = queryDesc->dest;
|
DestReceiver *dest = queryDesc->dest;
|
||||||
int originalLevel = ExecutorLevel;
|
|
||||||
|
|
||||||
ExecutorLevel++;
|
PG_TRY();
|
||||||
if (CitusHasBeenLoaded())
|
|
||||||
{
|
{
|
||||||
if (IsLocalReferenceTableJoinPlan(queryDesc->plannedstmt) &&
|
ExecutorLevel++;
|
||||||
IsMultiStatementTransaction())
|
|
||||||
|
if (CitusHasBeenLoaded())
|
||||||
{
|
{
|
||||||
/*
|
if (IsLocalReferenceTableJoinPlan(queryDesc->plannedstmt) &&
|
||||||
* Currently we don't support this to avoid problems with tuple
|
IsMultiStatementTransaction())
|
||||||
* visibility, locking, etc. For example, change to the reference
|
{
|
||||||
* table can go through a MultiConnection, which won't be visible
|
/*
|
||||||
* to the locally planned queries.
|
* Currently we don't support this to avoid problems with tuple
|
||||||
*/
|
* visibility, locking, etc. For example, change to the reference
|
||||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
* table can go through a MultiConnection, which won't be visible
|
||||||
errmsg("cannot join local tables and reference tables in "
|
* to the locally planned queries.
|
||||||
"a transaction block, udf block, or distributed "
|
*/
|
||||||
"CTE subquery")));
|
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||||
|
errmsg("cannot join local tables and reference tables in "
|
||||||
|
"a transaction block, udf block, or distributed "
|
||||||
|
"CTE subquery")));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable execution of ALTER TABLE constraint validation queries. These
|
* Disable execution of ALTER TABLE constraint validation queries. These
|
||||||
* constraints will be validated in worker nodes, so running these queries
|
* constraints will be validated in worker nodes, so running these queries
|
||||||
* from the coordinator would be redundant.
|
* from the coordinator would be redundant.
|
||||||
*
|
*
|
||||||
* For example, ALTER TABLE ... ATTACH PARTITION checks that the new
|
* For example, ALTER TABLE ... ATTACH PARTITION checks that the new
|
||||||
* partition doesn't violate constraints of the parent table, which
|
* partition doesn't violate constraints of the parent table, which
|
||||||
* might involve running some SELECT queries.
|
* might involve running some SELECT queries.
|
||||||
*
|
*
|
||||||
* Ideally we'd completely skip these checks in the coordinator, but we don't
|
* Ideally we'd completely skip these checks in the coordinator, but we don't
|
||||||
* have any means to tell postgres to skip the checks. So the best we can do is
|
* have any means to tell postgres to skip the checks. So the best we can do is
|
||||||
* to not execute the queries and return an empty result set, as if this table has
|
* to not execute the queries and return an empty result set, as if this table has
|
||||||
* no rows, so no constraints will be violated.
|
* no rows, so no constraints will be violated.
|
||||||
*/
|
*/
|
||||||
if (AlterTableConstraintCheck(queryDesc))
|
if (AlterTableConstraintCheck(queryDesc))
|
||||||
{
|
{
|
||||||
EState *estate = queryDesc->estate;
|
EState *estate = queryDesc->estate;
|
||||||
|
|
||||||
estate->es_processed = 0;
|
estate->es_processed = 0;
|
||||||
#if PG_VERSION_NUM < 120000
|
#if PG_VERSION_NUM < 120000
|
||||||
estate->es_lastoid = InvalidOid;
|
estate->es_lastoid = InvalidOid;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* start and shutdown tuple receiver to simulate empty result */
|
/* start and shutdown tuple receiver to simulate empty result */
|
||||||
dest->rStartup(queryDesc->dest, CMD_SELECT, queryDesc->tupDesc);
|
dest->rStartup(queryDesc->dest, CMD_SELECT, queryDesc->tupDesc);
|
||||||
dest->rShutdown(dest);
|
dest->rShutdown(dest);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
standard_ExecutorRun(queryDesc, direction, count, execute_once);
|
standard_ExecutorRun(queryDesc, direction, count, execute_once);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
ExecutorLevel--;
|
||||||
* Restore the original value. It is not sufficient to decrease the value
|
}
|
||||||
* because exceptions might cause us to go back a few levels at once.
|
PG_CATCH();
|
||||||
*/
|
{
|
||||||
ExecutorLevel = originalLevel;
|
ExecutorLevel--;
|
||||||
|
|
||||||
|
PG_RE_THROW();
|
||||||
|
}
|
||||||
|
PG_END_TRY();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -302,6 +302,13 @@ CoordinatedTransactionCallback(XactEvent event, void *arg)
|
||||||
dlist_init(&InProgressTransactions);
|
dlist_init(&InProgressTransactions);
|
||||||
activeSetStmts = NULL;
|
activeSetStmts = NULL;
|
||||||
CoordinatedTransactionUses2PC = false;
|
CoordinatedTransactionUses2PC = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Getting here without ExecutorLevel 0 is a bug, however it is such a big
|
||||||
|
* problem that will persist between reuse of the backend we still assign 0 in
|
||||||
|
* production deploys, but during development and tests we want to crash.
|
||||||
|
*/
|
||||||
|
Assert(ExecutorLevel == 0);
|
||||||
ExecutorLevel = 0;
|
ExecutorLevel = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue