mirror of https://github.com/citusdata/citus.git
Make router planner active at all times
We used to disable router planner and executor when task executor is set to task-tracker. This change enables router planning and execution at all times regardless of task execution mode. We are introducing a hidden flag enable_router_execution to enable/disable router execution. Its default value is true. User may disable router planning by setting it to false.pull/1027/head
parent
6f95875191
commit
c3a60bff70
|
@ -33,10 +33,10 @@
|
|||
#include "distributed/citus_ruleutils.h"
|
||||
#include "distributed/master_protocol.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/multi_router_planner.h"
|
||||
#include "distributed/multi_logical_optimizer.h"
|
||||
#include "distributed/multi_logical_planner.h"
|
||||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/multi_server_executor.h"
|
||||
#include "distributed/pg_dist_partition.h"
|
||||
#include "distributed/pg_dist_shard.h"
|
||||
#include "distributed/shardinterval_utils.h"
|
||||
|
@ -244,6 +244,11 @@ MultiPlanRouterExecutable(MultiPlan *multiPlan)
|
|||
int dependedJobCount = list_length(job->dependedJobList);
|
||||
bool masterQueryHasAggregates = false;
|
||||
|
||||
if (!EnableRouterExecution)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/* router executor cannot execute SELECT queries that hit more than one shard */
|
||||
if (taskCount != 1)
|
||||
{
|
||||
|
@ -277,12 +282,6 @@ MultiPlanRouterExecutable(MultiPlan *multiPlan)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* FIXME: I tend to think it's time to remove this */
|
||||
if (TaskExecutorType != MULTI_EXECUTOR_REAL_TIME)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include "distributed/multi_logical_planner.h"
|
||||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/multi_router_planner.h"
|
||||
#include "distributed/multi_server_executor.h"
|
||||
|
||||
#include "executor/executor.h"
|
||||
|
||||
|
@ -129,7 +128,7 @@ CreatePhysicalPlan(Query *originalQuery, Query *query,
|
|||
RelationRestrictionContext *restrictionContext)
|
||||
{
|
||||
MultiPlan *physicalPlan = MultiRouterPlanCreate(originalQuery, query,
|
||||
TaskExecutorType, restrictionContext);
|
||||
restrictionContext);
|
||||
if (physicalPlan == NULL)
|
||||
{
|
||||
/* Create and optimize logical plan */
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "distributed/multi_logical_planner.h"
|
||||
#include "distributed/multi_logical_optimizer.h"
|
||||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/multi_router_executor.h"
|
||||
#include "distributed/multi_router_planner.h"
|
||||
#include "distributed/listutils.h"
|
||||
#include "distributed/citus_ruleutils.h"
|
||||
|
@ -69,6 +68,8 @@ typedef struct WalkerState
|
|||
bool badCoalesce;
|
||||
} WalkerState;
|
||||
|
||||
bool EnableRouterExecution = true;
|
||||
|
||||
/* planner functions forward declarations */
|
||||
static MultiPlan * CreateSingleTaskRouterPlan(Query *originalQuery, Query *query,
|
||||
RelationRestrictionContext *
|
||||
|
@ -112,7 +113,7 @@ static List * IntersectPlacementList(List *lhsPlacementList, List *rhsPlacementL
|
|||
static bool UpdateRelationNames(Node *node,
|
||||
RelationRestrictionContext *restrictionContext);
|
||||
static Job * RouterQueryJob(Query *query, Task *task, List *placementList);
|
||||
static bool MultiRouterPlannableQuery(Query *query, MultiExecutorType taskExecutorType,
|
||||
static bool MultiRouterPlannableQuery(Query *query,
|
||||
RelationRestrictionContext *restrictionContext);
|
||||
static RelationRestrictionContext * CopyRelationRestrictionContext(
|
||||
RelationRestrictionContext *oldContext);
|
||||
|
@ -142,13 +143,11 @@ static void AddUninstantiatedEqualityQual(Query *query, Var *targetPartitionColu
|
|||
*/
|
||||
MultiPlan *
|
||||
MultiRouterPlanCreate(Query *originalQuery, Query *query,
|
||||
MultiExecutorType taskExecutorType,
|
||||
RelationRestrictionContext *restrictionContext)
|
||||
{
|
||||
MultiPlan *multiPlan = NULL;
|
||||
|
||||
bool routerPlannable = MultiRouterPlannableQuery(query, taskExecutorType,
|
||||
restrictionContext);
|
||||
bool routerPlannable = MultiRouterPlannableQuery(query, restrictionContext);
|
||||
if (!routerPlannable)
|
||||
{
|
||||
return NULL;
|
||||
|
@ -2349,13 +2348,14 @@ RouterQueryJob(Query *query, Task *task, List *placementList)
|
|||
|
||||
/*
|
||||
* MultiRouterPlannableQuery returns true if given query can be router plannable.
|
||||
* The query is router plannable if it is a select query issued on a hash
|
||||
* partitioned distributed table, and it has a exact match comparison on the
|
||||
* partition column. This feature is enabled if task executor is set to real-time
|
||||
* The query is router plannable if it is a modify query, or if its is a select
|
||||
* query issued on a hash partitioned distributed table, and it has a filter
|
||||
* to reduce number of shard pairs to one, and all shard pairs are located on
|
||||
* the same node. Router plannable checks for select queries can be turned off
|
||||
* by setting citus.enable_router_execution flag to false.
|
||||
*/
|
||||
bool
|
||||
MultiRouterPlannableQuery(Query *query, MultiExecutorType taskExecutorType,
|
||||
RelationRestrictionContext *restrictionContext)
|
||||
MultiRouterPlannableQuery(Query *query, RelationRestrictionContext *restrictionContext)
|
||||
{
|
||||
CmdType commandType = query->commandType;
|
||||
ListCell *relationRestrictionContextCell = NULL;
|
||||
|
@ -2366,14 +2366,13 @@ MultiRouterPlannableQuery(Query *query, MultiExecutorType taskExecutorType,
|
|||
return true;
|
||||
}
|
||||
|
||||
/* FIXME: I tend to think it's time to remove this */
|
||||
if (taskExecutorType != MULTI_EXECUTOR_REAL_TIME)
|
||||
Assert(commandType == CMD_SELECT);
|
||||
|
||||
if (!EnableRouterExecution)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
Assert(commandType == CMD_SELECT);
|
||||
|
||||
if (query->hasForUpdate)
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -351,6 +351,16 @@ RegisterCitusConfigVariables(void)
|
|||
0,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.enable_router_execution",
|
||||
gettext_noop("Enables router execution"),
|
||||
NULL,
|
||||
&EnableRouterExecution,
|
||||
true,
|
||||
PGC_USERSET,
|
||||
GUC_NO_SHOW_ALL,
|
||||
NULL, NULL, NULL);
|
||||
|
||||
DefineCustomIntVariable(
|
||||
"citus.shard_count",
|
||||
gettext_noop("Sets the number of shards for a new hash-partitioned table"
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include "distributed/multi_logical_planner.h"
|
||||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/multi_planner.h"
|
||||
#include "distributed/multi_server_executor.h"
|
||||
#include "nodes/parsenodes.h"
|
||||
|
||||
|
||||
|
@ -27,9 +26,9 @@
|
|||
/* reserved alias name for UPSERTs */
|
||||
#define CITUS_TABLE_ALIAS "citus_table_alias"
|
||||
|
||||
extern bool EnableRouterExecution;
|
||||
|
||||
extern MultiPlan * MultiRouterPlanCreate(Query *originalQuery, Query *query,
|
||||
MultiExecutorType taskExecutorType,
|
||||
RelationRestrictionContext *restrictionContext);
|
||||
extern void AddUninstantiatedPartitionRestriction(Query *originalQuery);
|
||||
extern void ErrorIfModifyQueryNotSupported(Query *queryTree);
|
||||
|
|
|
@ -4,13 +4,6 @@
|
|||
-- Tests for shard and join pruning logic on hash partitioned tables.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
|
||||
-- Print the executor type for clarity in test output
|
||||
SHOW citus.task_executor_type;
|
||||
citus.task_executor_type
|
||||
--------------------------
|
||||
real-time
|
||||
(1 row)
|
||||
|
||||
-- Create a table partitioned on integer column and update partition type to
|
||||
-- hash. Then load data into this table and update shard min max values with
|
||||
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
|
||||
|
@ -40,14 +33,6 @@ SELECT master_create_worker_shards('orders_hash_partitioned', 4, 1);
|
|||
SET client_min_messages TO DEBUG2;
|
||||
-- Check that we can prune shards for simple cases, boolean expressions and
|
||||
-- immutable functions.
|
||||
-- Since router plans are not triggered for task-tracker executor type,
|
||||
-- we need to run the tests that triggers router planning seperately for
|
||||
-- both executors. Otherwise, check-full fails on the task-tracker.
|
||||
-- Later, we need to switch back to the actual task executor
|
||||
-- to contuinue with correct executor type for check-full.
|
||||
SELECT quote_literal(current_setting('citus.task_executor_type')) AS actual_task_executor
|
||||
\gset
|
||||
SET citus.task_executor_type TO 'real-time';
|
||||
SELECT count(*) FROM orders_hash_partitioned;
|
||||
count
|
||||
-------
|
||||
|
@ -121,7 +106,8 @@ DEBUG: Plan is router executable
|
|||
0
|
||||
(1 row)
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
-- disable router planning
|
||||
SET citus.enable_router_execution TO 'false';
|
||||
SELECT count(*) FROM orders_hash_partitioned;
|
||||
count
|
||||
-------
|
||||
|
@ -183,7 +169,7 @@ DEBUG: predicate pruning for shardId 630003
|
|||
0
|
||||
(1 row)
|
||||
|
||||
SET citus.task_executor_type TO :actual_task_executor;
|
||||
SET citus.enable_router_execution TO DEFAULT;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL;
|
||||
count
|
||||
-------
|
||||
|
|
|
@ -1,336 +0,0 @@
|
|||
--
|
||||
-- MULTI_HASH_PRUNING
|
||||
--
|
||||
-- Tests for shard and join pruning logic on hash partitioned tables.
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
|
||||
-- Print the executor type for clarity in test output
|
||||
SHOW citus.task_executor_type;
|
||||
citus.task_executor_type
|
||||
--------------------------
|
||||
task-tracker
|
||||
(1 row)
|
||||
|
||||
-- Create a table partitioned on integer column and update partition type to
|
||||
-- hash. Then load data into this table and update shard min max values with
|
||||
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
|
||||
-- 1134484726, -28094569 and -1011077333.
|
||||
CREATE TABLE orders_hash_partitioned (
|
||||
o_orderkey integer,
|
||||
o_custkey integer,
|
||||
o_orderstatus char(1),
|
||||
o_totalprice decimal(15,2),
|
||||
o_orderdate date,
|
||||
o_orderpriority char(15),
|
||||
o_clerk char(15),
|
||||
o_shippriority integer,
|
||||
o_comment varchar(79) );
|
||||
SELECT master_create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('orders_hash_partitioned', 4, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO DEBUG2;
|
||||
-- Check that we can prune shards for simple cases, boolean expressions and
|
||||
-- immutable functions.
|
||||
-- Since router plans are not triggered for task-tracker executor type,
|
||||
-- we need to run the tests that triggers router planning seperately for
|
||||
-- both executors. Otherwise, check-full fails on the task-tracker.
|
||||
-- Later, we need to switch back to the actual task executor
|
||||
-- to contuinue with correct executor type for check-full.
|
||||
SELECT quote_literal(current_setting('citus.task_executor_type')) AS actual_task_executor
|
||||
\gset
|
||||
SET citus.task_executor_type TO 'real-time';
|
||||
SELECT count(*) FROM orders_hash_partitioned;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1;
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2;
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3;
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4;
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 AND o_clerk = 'aaa';
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1);
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SELECT count(*) FROM orders_hash_partitioned;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1;
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2;
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3;
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4;
|
||||
DEBUG: predicate pruning for shardId 630000
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 AND o_clerk = 'aaa';
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1);
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SET citus.task_executor_type TO :actual_task_executor;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is not NULL;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR o_orderkey = 2;
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR o_clerk = 'aaa';
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR (o_orderkey = 3 AND o_clerk = 'aaa');
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR o_orderkey is NULL;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM
|
||||
(SELECT o_orderkey FROM orders_hash_partitioned WHERE o_orderkey = 1) AS orderkeys;
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Check that we don't support pruning for ANY (array expression) and give
|
||||
-- a notice message when used with the partition column
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = ANY ('{1,2,3}');
|
||||
NOTICE: cannot use shard pruning with ANY/ALL (array expression)
|
||||
HINT: Consider rewriting the expression with OR/AND clauses.
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Check that we don't show the message if the operator is not
|
||||
-- equality operator
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey < ALL ('{1,2,3}');
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Check that we don't give a spurious hint message when non-partition
|
||||
-- columns are used with ANY/IN/ALL
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = 1 OR o_totalprice IN (2, 5);
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Check that we cannot prune for mutable functions.
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random();
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = random() OR o_orderkey = 1;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM orders_hash_partitioned
|
||||
WHERE o_orderkey = random() AND o_orderkey = 1;
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Check that we can do join pruning.
|
||||
SELECT count(*)
|
||||
FROM orders_hash_partitioned orders1, orders_hash_partitioned orders2
|
||||
WHERE orders1.o_orderkey = orders2.o_orderkey;
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [1073741824,2147483647]
|
||||
DEBUG: join prunable for intervals [-1073741824,-1] and [-2147483648,-1073741825]
|
||||
DEBUG: join prunable for intervals [-1073741824,-1] and [0,1073741823]
|
||||
DEBUG: join prunable for intervals [-1073741824,-1] and [1073741824,2147483647]
|
||||
DEBUG: join prunable for intervals [0,1073741823] and [-2147483648,-1073741825]
|
||||
DEBUG: join prunable for intervals [0,1073741823] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [0,1073741823] and [1073741824,2147483647]
|
||||
DEBUG: join prunable for intervals [1073741824,2147483647] and [-2147483648,-1073741825]
|
||||
DEBUG: join prunable for intervals [1073741824,2147483647] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823]
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*)
|
||||
FROM orders_hash_partitioned orders1, orders_hash_partitioned orders2
|
||||
WHERE orders1.o_orderkey = orders2.o_orderkey
|
||||
AND orders1.o_orderkey = 1
|
||||
AND orders2.o_orderkey is NULL;
|
||||
DEBUG: predicate pruning for shardId 630001
|
||||
DEBUG: predicate pruning for shardId 630002
|
||||
DEBUG: predicate pruning for shardId 630003
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1073741825] and [1073741824,2147483647]
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
|
@ -1962,13 +1962,14 @@ NOTICE: cannot use shard pruning with ANY/ALL (array expression)
|
|||
HINT: Consider rewriting the expression with OR/AND clauses.
|
||||
ERROR: cannot create temporary table within security-restricted operation
|
||||
|
||||
-- router planner/executor is disabled for task-tracker executor
|
||||
-- following query is router plannable, but router planner is disabled
|
||||
-- router planner/executor is now enabled for task-tracker executor
|
||||
SET citus.task_executor_type to 'task-tracker';
|
||||
SELECT id
|
||||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
id
|
||||
----
|
||||
1
|
||||
|
@ -1987,6 +1988,8 @@ SELECT id
|
|||
FROM articles_hash
|
||||
WHERE author_id = 1;
|
||||
DEBUG: predicate pruning for shardId 840001
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
id
|
||||
----
|
||||
1
|
||||
|
|
|
@ -45,7 +45,7 @@ CREATE TABLE orders_subquery (
|
|||
PRIMARY KEY(o_orderkey) );
|
||||
SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range');
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_router_execution TO 'false';
|
||||
|
||||
-- Check that we don't allow subquery pushdown in default settings.
|
||||
|
||||
|
@ -915,4 +915,4 @@ ORDER BY
|
|||
LIMIT
|
||||
10;
|
||||
|
||||
SET citusdb.task_executor_type TO 'real-time';
|
||||
SET citus.enable_router_execution TO 'true';
|
||||
|
|
|
@ -53,7 +53,7 @@ SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range')
|
|||
|
||||
(1 row)
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_router_execution TO 'false';
|
||||
-- Check that we don't allow subquery pushdown in default settings.
|
||||
SELECT
|
||||
avg(unit_price)
|
||||
|
@ -1056,4 +1056,4 @@ LIMIT
|
|||
-> Seq Scan on pg_merge_job_270017 (cost=0.00..0.00 rows=0 width=0)
|
||||
(29 rows)
|
||||
|
||||
SET citusdb.task_executor_type TO 'real-time';
|
||||
SET citus.enable_router_execution TO 'true';
|
||||
|
|
|
@ -53,7 +53,7 @@ SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range')
|
|||
|
||||
(1 row)
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SET citus.enable_router_execution TO 'false';
|
||||
-- Check that we don't allow subquery pushdown in default settings.
|
||||
SELECT
|
||||
avg(unit_price)
|
||||
|
@ -1053,4 +1053,4 @@ LIMIT
|
|||
-> Seq Scan on pg_merge_job_270017 (cost=0.00..0.00 rows=0 width=0)
|
||||
(29 rows)
|
||||
|
||||
SET citusdb.task_executor_type TO 'real-time';
|
||||
SET citus.enable_router_execution TO 'true';
|
||||
|
|
|
@ -8,9 +8,6 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
|
||||
|
||||
-- Print the executor type for clarity in test output
|
||||
SHOW citus.task_executor_type;
|
||||
|
||||
-- Create a table partitioned on integer column and update partition type to
|
||||
-- hash. Then load data into this table and update shard min max values with
|
||||
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
|
||||
|
@ -35,15 +32,6 @@ SET client_min_messages TO DEBUG2;
|
|||
-- immutable functions.
|
||||
|
||||
|
||||
-- Since router plans are not triggered for task-tracker executor type,
|
||||
-- we need to run the tests that triggers router planning seperately for
|
||||
-- both executors. Otherwise, check-full fails on the task-tracker.
|
||||
-- Later, we need to switch back to the actual task executor
|
||||
-- to contuinue with correct executor type for check-full.
|
||||
SELECT quote_literal(current_setting('citus.task_executor_type')) AS actual_task_executor
|
||||
\gset
|
||||
|
||||
SET citus.task_executor_type TO 'real-time';
|
||||
SELECT count(*) FROM orders_hash_partitioned;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2;
|
||||
|
@ -53,8 +41,8 @@ SELECT count(*) FROM orders_hash_partitioned
|
|||
WHERE o_orderkey = 1 AND o_clerk = 'aaa';
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1);
|
||||
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
-- disable router planning
|
||||
SET citus.enable_router_execution TO 'false';
|
||||
SELECT count(*) FROM orders_hash_partitioned;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2;
|
||||
|
@ -64,8 +52,7 @@ SELECT count(*) FROM orders_hash_partitioned
|
|||
WHERE o_orderkey = 1 AND o_clerk = 'aaa';
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1);
|
||||
|
||||
SET citus.task_executor_type TO :actual_task_executor;
|
||||
|
||||
SET citus.enable_router_execution TO DEFAULT;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is not NULL;
|
||||
SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2;
|
||||
|
|
|
@ -870,8 +870,7 @@ SELECT * FROM mv_articles_hash;
|
|||
CREATE MATERIALIZED VIEW mv_articles_hash_error AS
|
||||
SELECT * FROM articles_hash WHERE author_id in (1,2);
|
||||
|
||||
-- router planner/executor is disabled for task-tracker executor
|
||||
-- following query is router plannable, but router planner is disabled
|
||||
-- router planner/executor is now enabled for task-tracker executor
|
||||
SET citus.task_executor_type to 'task-tracker';
|
||||
SELECT id
|
||||
FROM articles_hash
|
||||
|
|
Loading…
Reference in New Issue