mirror of https://github.com/citusdata/citus.git
Remove broadcast join logic
After this change all the logic related to shard data fetch logic will be removed. Planner won't plan any ShardFetchTask anymore. Shard fetch related steps in real time executor and task-tracker executor have been removed.pull/1915/head
parent
4582a4b398
commit
698d585fb5
1
Makefile
1
Makefile
|
@ -29,7 +29,6 @@ OBJS = src/backend/distributed/shared_library_init.o \
|
|||
src/backend/distributed/master/master_citus_tools.o \
|
||||
src/backend/distributed/master/master_create_shards.o \
|
||||
src/backend/distributed/master/master_delete_protocol.o \
|
||||
src/backend/distributed/master/master_expire_table_cache.o \
|
||||
src/backend/distributed/master/master_metadata_utility.o \
|
||||
src/backend/distributed/master/master_modify_multiple_shards.o \
|
||||
src/backend/distributed/master/master_node_protocol.o \
|
||||
|
|
|
@ -14,7 +14,8 @@ EXTVERSIONS = 5.0 5.0-1 5.0-2 \
|
|||
7.0-1 7.0-2 7.0-3 7.0-4 7.0-5 7.0-6 7.0-7 7.0-8 7.0-9 7.0-10 7.0-11 7.0-12 7.0-13 7.0-14 7.0-15 \
|
||||
7.1-1 7.1-2 7.1-3 7.1-4 \
|
||||
7.2-1 7.2-2 7.2-3 \
|
||||
7.3-1 7.3-2 7.3-3
|
||||
7.3-1 7.3-2 7.3-3 \
|
||||
7.4-1
|
||||
|
||||
# All citus--*.sql files in the source directory
|
||||
DATA = $(patsubst $(citus_abs_srcdir)/%.sql,%.sql,$(wildcard $(citus_abs_srcdir)/$(EXTENSION)--*--*.sql))
|
||||
|
@ -192,6 +193,8 @@ $(EXTENSION)--7.3-2.sql: $(EXTENSION)--7.3-1.sql $(EXTENSION)--7.3-1--7.3-2.sql
|
|||
cat $^ > $@
|
||||
$(EXTENSION)--7.3-3.sql: $(EXTENSION)--7.3-2.sql $(EXTENSION)--7.3-2--7.3-3.sql
|
||||
cat $^ > $@
|
||||
$(EXTENSION)--7.4-1.sql: $(EXTENSION)--7.3-3.sql $(EXTENSION)--7.3-3--7.4-1.sql
|
||||
cat $^ > $@
|
||||
|
||||
NO_PGXS = 1
|
||||
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
/* citus--7.3-3--7.4-1 */
|
||||
|
||||
DROP FUNCTION IF EXISTS master_expire_table_cache(regclass);
|
||||
DROP FUNCTION IF EXISTS pg_catalog.worker_fetch_regular_table(text, bigint, text[], integer[]);
|
||||
DROP FUNCTION IF EXISTS pg_catalog.worker_fetch_foreign_file(text, bigint, text[], integer[]);
|
|
@ -1,6 +1,6 @@
|
|||
# Citus extension
|
||||
comment = 'Citus distributed database'
|
||||
default_version = '7.3-3'
|
||||
default_version = '7.4-1'
|
||||
module_pathname = '$libdir/citus'
|
||||
relocatable = false
|
||||
schema = pg_catalog
|
||||
|
|
|
@ -331,20 +331,18 @@ ManageTaskExecution(Task *task, TaskExecution *taskExecution,
|
|||
ConnectStatus pollStatus = MultiClientConnectPoll(connectionId);
|
||||
|
||||
/*
|
||||
* If the connection is established, we reset the data fetch counter and
|
||||
* change our status to data fetching.
|
||||
* If the connection is established, we change our state based on
|
||||
* whether a coordinated transaction has been started.
|
||||
*/
|
||||
if (pollStatus == CLIENT_CONNECTION_READY)
|
||||
{
|
||||
taskExecution->dataFetchTaskIndex = -1;
|
||||
|
||||
if (InCoordinatedTransaction())
|
||||
{
|
||||
taskStatusArray[currentIndex] = EXEC_BEGIN_START;
|
||||
}
|
||||
else
|
||||
{
|
||||
taskStatusArray[currentIndex] = EXEC_FETCH_TASK_LOOP;
|
||||
taskStatusArray[currentIndex] = EXEC_COMPUTE_TASK_START;
|
||||
}
|
||||
}
|
||||
else if (pollStatus == CLIENT_CONNECTION_BUSY)
|
||||
|
@ -393,8 +391,8 @@ ManageTaskExecution(Task *task, TaskExecution *taskExecution,
|
|||
/*
|
||||
* On task failure, we close the connection. We also reset our execution
|
||||
* status assuming that we might fail on all other worker nodes and come
|
||||
* back to this failed node. In that case, we will retry the same fetch
|
||||
* and compute task(s) on this node again.
|
||||
* back to this failed node. In that case, we will retry compute task(s)
|
||||
* on this node again.
|
||||
*/
|
||||
int32 connectionId = connectionIdArray[currentIndex];
|
||||
MultiConnection *connection = MultiClientGetConnection(connectionId);
|
||||
|
@ -453,11 +451,6 @@ ManageTaskExecution(Task *task, TaskExecution *taskExecution,
|
|||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* We skip data fetches when in a distributed transaction since
|
||||
* they cannot be performed in a transactional way (e.g. would
|
||||
* trigger deadlock detection).
|
||||
*/
|
||||
taskStatusArray[currentIndex] = EXEC_COMPUTE_TASK_START;
|
||||
break;
|
||||
}
|
||||
|
@ -493,101 +486,11 @@ ManageTaskExecution(Task *task, TaskExecution *taskExecution,
|
|||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* We skip data fetches when in a distributed transaction since
|
||||
* they cannot be performed in a transactional way (e.g. would
|
||||
* trigger deadlock detection).
|
||||
*/
|
||||
taskStatusArray[currentIndex] = EXEC_COMPUTE_TASK_START;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
case EXEC_FETCH_TASK_LOOP:
|
||||
{
|
||||
List *dataFetchTaskList = task->dependedTaskList;
|
||||
int32 dataFetchTaskCount = list_length(dataFetchTaskList);
|
||||
|
||||
/* move to the next data fetch task */
|
||||
taskExecution->dataFetchTaskIndex++;
|
||||
|
||||
if (taskExecution->dataFetchTaskIndex < dataFetchTaskCount)
|
||||
{
|
||||
taskStatusArray[currentIndex] = EXEC_FETCH_TASK_START;
|
||||
}
|
||||
else
|
||||
{
|
||||
taskStatusArray[currentIndex] = EXEC_COMPUTE_TASK_START;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case EXEC_FETCH_TASK_START:
|
||||
{
|
||||
List *dataFetchTaskList = task->dependedTaskList;
|
||||
int32 dataFetchTaskIndex = taskExecution->dataFetchTaskIndex;
|
||||
Task *dataFetchTask = (Task *) list_nth(dataFetchTaskList,
|
||||
dataFetchTaskIndex);
|
||||
|
||||
char *dataFetchQuery = dataFetchTask->queryString;
|
||||
int32 connectionId = connectionIdArray[currentIndex];
|
||||
|
||||
bool querySent = MultiClientSendQuery(connectionId, dataFetchQuery);
|
||||
if (querySent)
|
||||
{
|
||||
taskStatusArray[currentIndex] = EXEC_FETCH_TASK_RUNNING;
|
||||
}
|
||||
else
|
||||
{
|
||||
taskStatusArray[currentIndex] = EXEC_TASK_FAILED;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case EXEC_FETCH_TASK_RUNNING:
|
||||
{
|
||||
int32 connectionId = connectionIdArray[currentIndex];
|
||||
ResultStatus resultStatus = MultiClientResultStatus(connectionId);
|
||||
QueryStatus queryStatus = CLIENT_INVALID_QUERY;
|
||||
|
||||
/* check if query results are in progress or unavailable */
|
||||
if (resultStatus == CLIENT_RESULT_BUSY)
|
||||
{
|
||||
*executionStatus = TASK_STATUS_SOCKET_READ;
|
||||
taskStatusArray[currentIndex] = EXEC_FETCH_TASK_RUNNING;
|
||||
break;
|
||||
}
|
||||
else if (resultStatus == CLIENT_RESULT_UNAVAILABLE)
|
||||
{
|
||||
taskStatusArray[currentIndex] = EXEC_TASK_FAILED;
|
||||
break;
|
||||
}
|
||||
|
||||
Assert(resultStatus == CLIENT_RESULT_READY);
|
||||
|
||||
/*
|
||||
* If the query executed successfully, loop onto the next data fetch
|
||||
* task. Else if the query failed, try data fetching on another node.
|
||||
*/
|
||||
queryStatus = MultiClientQueryStatus(connectionId);
|
||||
if (queryStatus == CLIENT_QUERY_DONE)
|
||||
{
|
||||
taskStatusArray[currentIndex] = EXEC_FETCH_TASK_LOOP;
|
||||
}
|
||||
else if (queryStatus == CLIENT_QUERY_FAILED)
|
||||
{
|
||||
taskStatusArray[currentIndex] = EXEC_TASK_FAILED;
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(FATAL, (errmsg("invalid query status: %d", queryStatus)));
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case EXEC_COMPUTE_TASK_START:
|
||||
{
|
||||
int32 connectionId = connectionIdArray[currentIndex];
|
||||
|
@ -827,7 +730,6 @@ CancelRequestIfActive(TaskExecStatus taskStatus, int connectionId)
|
|||
/*
|
||||
* We use the task status to determine if we have an active request being
|
||||
* processed by the worker node. If we do, we send a cancellation request.
|
||||
* Note that we don't cancel data fetch tasks, and allow them to complete.
|
||||
*/
|
||||
if (taskStatus == EXEC_COMPUTE_TASK_RUNNING)
|
||||
{
|
||||
|
|
|
@ -182,7 +182,6 @@ InitTaskExecution(Task *task, TaskExecStatus initialTaskExecStatus)
|
|||
taskExecution->nodeCount = nodeCount;
|
||||
taskExecution->connectStartTime = 0;
|
||||
taskExecution->currentNodeIndex = 0;
|
||||
taskExecution->dataFetchTaskIndex = -1;
|
||||
taskExecution->failureCount = 0;
|
||||
|
||||
taskExecution->taskStatusArray = palloc0(nodeCount * sizeof(TaskExecStatus));
|
||||
|
@ -283,7 +282,6 @@ AdjustStateForFailure(TaskExecution *taskExecution)
|
|||
taskExecution->currentNodeIndex = 0; /* go back to the first worker node */
|
||||
}
|
||||
|
||||
taskExecution->dataFetchTaskIndex = -1; /* reset data fetch counter */
|
||||
taskExecution->failureCount++; /* record failure */
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,6 @@ static List * ConstrainedMergeTaskList(List *taskAndExecutionList, Task *task);
|
|||
static List * MergeTaskList(List *taskList);
|
||||
static void ReassignTaskList(List *taskList);
|
||||
static void ReassignMapFetchTaskList(List *mapFetchTaskList);
|
||||
static List * ShardFetchTaskList(List *taskList);
|
||||
|
||||
/* Local functions forward declarations to manage task trackers */
|
||||
static void ManageTaskTracker(TaskTracker *taskTracker);
|
||||
|
@ -1839,9 +1838,9 @@ TransmitTrackerConnectionId(TaskTracker *transmitTracker, Task *task)
|
|||
* ConstrainedTaskList finds the given task's constraint group within the given
|
||||
* task and execution list. We define a constraint group as all tasks that need
|
||||
* to be assigned (or reassigned) to the same task tracker for query execution
|
||||
* to complete. At a high level, compute tasks and their data fetch dependencies
|
||||
* are part of the same constraint group. Also, the transitive closure of tasks
|
||||
* that have the same merge task dependency are part of one constraint group.
|
||||
* to complete. At a high level, compute tasks are part of the same constraint
|
||||
* group. Also, the transitive closure of tasks that have the same merge task
|
||||
* dependency are part of one constraint group.
|
||||
*/
|
||||
static List *
|
||||
ConstrainedTaskList(List *taskAndExecutionList, Task *task)
|
||||
|
@ -1907,8 +1906,7 @@ ConstrainedTaskList(List *taskAndExecutionList, Task *task)
|
|||
/*
|
||||
* ConstrainedNonMergeTaskList finds the constraint group for the given task,
|
||||
* assuming that the given task doesn't have any merge task dependencies. This
|
||||
* constraint group includes a compute task and its downstream data fetch task
|
||||
* dependencies.
|
||||
* constraint group includes compute task.
|
||||
*/
|
||||
static List *
|
||||
ConstrainedNonMergeTaskList(List *taskAndExecutionList, Task *task)
|
||||
|
@ -1923,14 +1921,6 @@ ConstrainedNonMergeTaskList(List *taskAndExecutionList, Task *task)
|
|||
upstreamTask = task;
|
||||
dependedTaskList = upstreamTask->dependedTaskList;
|
||||
}
|
||||
else if (taskType == SHARD_FETCH_TASK)
|
||||
{
|
||||
List *upstreamTaskList = UpstreamDependencyList(taskAndExecutionList, task);
|
||||
Assert(list_length(upstreamTaskList) == 1);
|
||||
|
||||
upstreamTask = (Task *) linitial(upstreamTaskList);
|
||||
dependedTaskList = upstreamTask->dependedTaskList;
|
||||
}
|
||||
Assert(upstreamTask != NULL);
|
||||
|
||||
constrainedTaskList = list_make1(upstreamTask);
|
||||
|
@ -2008,20 +1998,6 @@ ConstrainedMergeTaskList(List *taskAndExecutionList, Task *task)
|
|||
{
|
||||
constrainedMergeTaskList = MergeTaskList(task->dependedTaskList);
|
||||
}
|
||||
else if (taskType == SHARD_FETCH_TASK)
|
||||
{
|
||||
Task *upstreamTask = NULL;
|
||||
List *upstreamTaskList = UpstreamDependencyList(taskAndExecutionList, task);
|
||||
|
||||
/*
|
||||
* A shard fetch task can only have one SQL/map task parent. We now get
|
||||
* that parent. From the parent, we find any merge task dependencies.
|
||||
*/
|
||||
Assert(list_length(upstreamTaskList) == 1);
|
||||
upstreamTask = (Task *) linitial(upstreamTaskList);
|
||||
|
||||
constrainedMergeTaskList = MergeTaskList(upstreamTask->dependedTaskList);
|
||||
}
|
||||
else if (taskType == MAP_OUTPUT_FETCH_TASK)
|
||||
{
|
||||
List *taskList = UpstreamDependencyList(taskAndExecutionList, task);
|
||||
|
@ -2093,8 +2069,7 @@ ReassignTaskList(List *taskList)
|
|||
|
||||
/*
|
||||
* As an optimization, we first find the SQL tasks whose results we already
|
||||
* fetched to the master node. We don't need to re-execute these SQL tasks
|
||||
* or their shard fetch dependencies.
|
||||
* fetched to the master node. We don't need to re-execute these SQL tasks.
|
||||
*/
|
||||
foreach(taskCell, taskList)
|
||||
{
|
||||
|
@ -2104,10 +2079,7 @@ ReassignTaskList(List *taskList)
|
|||
bool transmitCompleted = TransmitExecutionCompleted(taskExecution);
|
||||
if ((task->taskType == SQL_TASK) && transmitCompleted)
|
||||
{
|
||||
List *shardFetchTaskList = ShardFetchTaskList(task->dependedTaskList);
|
||||
|
||||
completedTaskList = lappend(completedTaskList, task);
|
||||
completedTaskList = TaskListUnion(completedTaskList, shardFetchTaskList);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2162,29 +2134,6 @@ ReassignMapFetchTaskList(List *mapFetchTaskList)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ShardFetchTaskList walks over the given task list, finds the shard fetch tasks
|
||||
* in the list, and returns the found tasks in a new list.
|
||||
*/
|
||||
static List *
|
||||
ShardFetchTaskList(List *taskList)
|
||||
{
|
||||
List *shardFetchTaskList = NIL;
|
||||
ListCell *taskCell = NULL;
|
||||
|
||||
foreach(taskCell, taskList)
|
||||
{
|
||||
Task *task = (Task *) lfirst(taskCell);
|
||||
if (task->taskType == SHARD_FETCH_TASK)
|
||||
{
|
||||
shardFetchTaskList = lappend(shardFetchTaskList, task);
|
||||
}
|
||||
}
|
||||
|
||||
return shardFetchTaskList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ManageTaskTracker manages tasks assigned to the given task tracker. For this,
|
||||
* the function coordinates access to the underlying connection. The function
|
||||
|
|
|
@ -1,204 +0,0 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* master_expire_table_cache.c
|
||||
* UDF to refresh shard cache at workers
|
||||
*
|
||||
* This file contains master_expire_table_cache function. The function
|
||||
* accepts a table name and drops tables cached shards from all workers.
|
||||
* It does not change existing shard placement. Only drops cached copies
|
||||
* of shards.
|
||||
*
|
||||
* Copyright (c) 2012-2016, Citus Data, Inc.
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#include "postgres.h"
|
||||
#include "funcapi.h"
|
||||
#include "libpq-fe.h"
|
||||
|
||||
#include "catalog/pg_class.h"
|
||||
#include "distributed/connection_management.h"
|
||||
#include "distributed/master_protocol.h"
|
||||
#include "distributed/metadata_cache.h"
|
||||
#include "distributed/multi_join_order.h"
|
||||
#include "distributed/pg_dist_shard.h"
|
||||
#include "distributed/remote_commands.h"
|
||||
#include "distributed/worker_manager.h"
|
||||
#include "distributed/worker_protocol.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/lsyscache.h"
|
||||
|
||||
|
||||
static List * FindAbsentShardPlacementsOnWorker(WorkerNode *workerNode,
|
||||
ShardInterval **shardIntervalArray,
|
||||
List **placementListArray,
|
||||
int shardCount);
|
||||
static void DropShardsFromWorker(WorkerNode *workerNode, Oid relationId,
|
||||
List *shardIntervalList);
|
||||
|
||||
PG_FUNCTION_INFO_V1(master_expire_table_cache);
|
||||
|
||||
|
||||
/*
|
||||
* master_expire_table_cache drops table's caches shards in all workers. The function
|
||||
* expects a passed table to be a small distributed table meaning it has less than
|
||||
* large_table_shard_count.
|
||||
*/
|
||||
Datum
|
||||
master_expire_table_cache(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
DistTableCacheEntry *cacheEntry = NULL;
|
||||
List *workerNodeList = NIL;
|
||||
ListCell *workerNodeCell = NULL;
|
||||
int shardCount = 0;
|
||||
ShardInterval **shardIntervalArray = NULL;
|
||||
List **placementListArray = NULL;
|
||||
int shardIndex = 0;
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
cacheEntry = DistributedTableCacheEntry(relationId);
|
||||
workerNodeList = ActivePrimaryNodeList();
|
||||
shardCount = cacheEntry->shardIntervalArrayLength;
|
||||
shardIntervalArray = cacheEntry->sortedShardIntervalArray;
|
||||
|
||||
if (shardCount == 0)
|
||||
{
|
||||
ereport(WARNING, (errmsg("Table has no shards, no action is taken")));
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
if (shardCount >= LargeTableShardCount)
|
||||
{
|
||||
ereport(ERROR, (errmsg("Must be called on tables smaller than %d shards",
|
||||
LargeTableShardCount)));
|
||||
}
|
||||
|
||||
placementListArray = palloc(shardCount * sizeof(List *));
|
||||
|
||||
for (shardIndex = 0; shardIndex < shardCount; shardIndex++)
|
||||
{
|
||||
ShardInterval *shardInterval = shardIntervalArray[shardIndex];
|
||||
placementListArray[shardIndex] =
|
||||
FinalizedShardPlacementList(shardInterval->shardId);
|
||||
}
|
||||
|
||||
foreach(workerNodeCell, workerNodeList)
|
||||
{
|
||||
WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell);
|
||||
List *shardDropList = FindAbsentShardPlacementsOnWorker(workerNode,
|
||||
shardIntervalArray,
|
||||
placementListArray,
|
||||
shardCount);
|
||||
DropShardsFromWorker(workerNode, relationId, shardDropList);
|
||||
}
|
||||
|
||||
pfree(placementListArray);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FindAbsentShardPlacementsOnWorker compiles shard interval list of shards
|
||||
* that do not have registered placement at given worker node.
|
||||
*/
|
||||
List *
|
||||
FindAbsentShardPlacementsOnWorker(WorkerNode *workerNode,
|
||||
ShardInterval **shardIntervalArray,
|
||||
List **placementListArray, int shardCount)
|
||||
{
|
||||
List *absentShardIntervalList = NIL;
|
||||
|
||||
int shardIndex = 0;
|
||||
for (shardIndex = 0; shardIndex < shardCount; shardIndex++)
|
||||
{
|
||||
ShardInterval *shardInterval = shardIntervalArray[shardIndex];
|
||||
List *placementList = placementListArray[shardIndex];
|
||||
|
||||
ListCell *placementCell = NULL;
|
||||
foreach(placementCell, placementList)
|
||||
{
|
||||
ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell);
|
||||
|
||||
/*
|
||||
* Append shard interval to absent list if none of its placements is on
|
||||
* the worker.
|
||||
*/
|
||||
if (placement->nodePort == workerNode->workerPort &&
|
||||
strncmp(placement->nodeName, workerNode->workerName, WORKER_LENGTH) == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
else if (lnext(placementCell) == NULL)
|
||||
{
|
||||
absentShardIntervalList = lappend(absentShardIntervalList, shardInterval);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return absentShardIntervalList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DropShardsFromWorker drops provided shards belonging to a relation from
|
||||
* given worker. It does not change any metadata at the master.
|
||||
*/
|
||||
static void
|
||||
DropShardsFromWorker(WorkerNode *workerNode, Oid relationId, List *shardIntervalList)
|
||||
{
|
||||
Oid schemaId = get_rel_namespace(relationId);
|
||||
char *schemaName = get_namespace_name(schemaId);
|
||||
char *relationName = get_rel_name(relationId);
|
||||
char relationKind = get_rel_relkind(relationId);
|
||||
StringInfo workerCommand = makeStringInfo();
|
||||
StringInfo shardNames = makeStringInfo();
|
||||
ListCell *shardIntervalCell = NULL;
|
||||
MultiConnection *connection = NULL;
|
||||
int connectionFlag = FORCE_NEW_CONNECTION;
|
||||
PGresult *result = NULL;
|
||||
|
||||
if (shardIntervalList == NIL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
foreach(shardIntervalCell, shardIntervalList)
|
||||
{
|
||||
ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell);
|
||||
char *shardName = pstrdup(relationName);
|
||||
char *quotedShardName = NULL;
|
||||
|
||||
AppendShardIdToName(&shardName, shardInterval->shardId);
|
||||
quotedShardName = quote_qualified_identifier(schemaName, shardName);
|
||||
appendStringInfo(shardNames, "%s", quotedShardName);
|
||||
|
||||
/* append a comma after the shard name if there are more shards */
|
||||
if (lnext(shardIntervalCell) != NULL)
|
||||
{
|
||||
appendStringInfo(shardNames, ", ");
|
||||
}
|
||||
}
|
||||
|
||||
if (RegularTable(relationId))
|
||||
{
|
||||
appendStringInfo(workerCommand, DROP_REGULAR_TABLE_COMMAND, shardNames->data);
|
||||
}
|
||||
else if (relationKind == RELKIND_FOREIGN_TABLE)
|
||||
{
|
||||
appendStringInfo(workerCommand, DROP_FOREIGN_TABLE_COMMAND, shardNames->data);
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("expire target is not a regular, foreign or partitioned "
|
||||
"table")));
|
||||
}
|
||||
|
||||
connection = GetNodeConnection(connectionFlag, workerNode->workerName,
|
||||
workerNode->workerPort);
|
||||
ExecuteOptionalRemoteCommand(connection, workerCommand->data, &result);
|
||||
}
|
|
@ -157,8 +157,6 @@ static bool JoinPrunable(RangeTableFragment *leftFragment,
|
|||
static ShardInterval * FragmentInterval(RangeTableFragment *fragment);
|
||||
static StringInfo FragmentIntervalString(ShardInterval *fragmentInterval);
|
||||
static List * DataFetchTaskList(uint64 jobId, uint32 taskIdIndex, List *fragmentList);
|
||||
static StringInfo NodeNameArrayString(List *workerNodeList);
|
||||
static StringInfo NodePortArrayString(List *workerNodeList);
|
||||
static StringInfo DatumArrayString(Datum *datumArray, uint32 datumCount, Oid datumTypeId);
|
||||
static List * BuildRelationShardList(List *rangeTableList, List *fragmentList);
|
||||
static void UpdateRangeTableAlias(List *rangeTableList, List *fragmentList);
|
||||
|
@ -3759,8 +3757,9 @@ FragmentIntervalString(ShardInterval *fragmentInterval)
|
|||
|
||||
|
||||
/*
|
||||
* DataFetchTaskList builds a data fetch task for every shard in the given shard
|
||||
* list, appends these data fetch tasks into a list, and returns this list.
|
||||
* DataFetchTaskList builds a merge fetch task for every remote query result
|
||||
* in the given fragment list, appends these merge fetch tasks into a list,
|
||||
* and returns this list.
|
||||
*/
|
||||
static List *
|
||||
DataFetchTaskList(uint64 jobId, uint32 taskIdIndex, List *fragmentList)
|
||||
|
@ -3771,20 +3770,7 @@ DataFetchTaskList(uint64 jobId, uint32 taskIdIndex, List *fragmentList)
|
|||
foreach(fragmentCell, fragmentList)
|
||||
{
|
||||
RangeTableFragment *fragment = (RangeTableFragment *) lfirst(fragmentCell);
|
||||
if (fragment->fragmentType == CITUS_RTE_RELATION)
|
||||
{
|
||||
ShardInterval *shardInterval = fragment->fragmentReference;
|
||||
uint64 shardId = shardInterval->shardId;
|
||||
StringInfo shardFetchQueryString = ShardFetchQueryString(shardId);
|
||||
|
||||
Task *shardFetchTask = CreateBasicTask(jobId, taskIdIndex, SHARD_FETCH_TASK,
|
||||
shardFetchQueryString->data);
|
||||
shardFetchTask->shardId = shardId;
|
||||
|
||||
dataFetchTaskList = lappend(dataFetchTaskList, shardFetchTask);
|
||||
taskIdIndex++;
|
||||
}
|
||||
else if (fragment->fragmentType == CITUS_RTE_REMOTE_QUERY)
|
||||
if (fragment->fragmentType == CITUS_RTE_REMOTE_QUERY)
|
||||
{
|
||||
Task *mergeTask = (Task *) fragment->fragmentReference;
|
||||
char *undefinedQueryString = NULL;
|
||||
|
@ -3803,136 +3789,6 @@ DataFetchTaskList(uint64 jobId, uint32 taskIdIndex, List *fragmentList)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ShardFetchQueryString constructs a query string to fetch the given shard from
|
||||
* the shards' placements.
|
||||
*/
|
||||
StringInfo
|
||||
ShardFetchQueryString(uint64 shardId)
|
||||
{
|
||||
StringInfo shardFetchQuery = NULL;
|
||||
uint64 shardLength = ShardLength(shardId);
|
||||
|
||||
/* construct two array strings for node names and port numbers */
|
||||
List *shardPlacements = FinalizedShardPlacementList(shardId);
|
||||
StringInfo nodeNameArrayString = NodeNameArrayString(shardPlacements);
|
||||
StringInfo nodePortArrayString = NodePortArrayString(shardPlacements);
|
||||
|
||||
/* check storage type to create the correct query string */
|
||||
ShardInterval *shardInterval = LoadShardInterval(shardId);
|
||||
char storageType = shardInterval->storageType;
|
||||
char *shardSchemaName = NULL;
|
||||
char *shardTableName = NULL;
|
||||
|
||||
/* construct the shard name */
|
||||
Oid shardSchemaId = get_rel_namespace(shardInterval->relationId);
|
||||
char *tableName = get_rel_name(shardInterval->relationId);
|
||||
|
||||
shardSchemaName = get_namespace_name(shardSchemaId);
|
||||
shardTableName = pstrdup(tableName);
|
||||
AppendShardIdToName(&shardTableName, shardId);
|
||||
|
||||
shardFetchQuery = makeStringInfo();
|
||||
if (storageType == SHARD_STORAGE_TABLE || storageType == SHARD_STORAGE_RELAY ||
|
||||
storageType == SHARD_STORAGE_COLUMNAR)
|
||||
{
|
||||
if (strcmp(shardSchemaName, "public") != 0)
|
||||
{
|
||||
char *qualifiedTableName = quote_qualified_identifier(shardSchemaName,
|
||||
shardTableName);
|
||||
|
||||
appendStringInfo(shardFetchQuery, TABLE_FETCH_COMMAND, qualifiedTableName,
|
||||
shardLength, nodeNameArrayString->data,
|
||||
nodePortArrayString->data);
|
||||
}
|
||||
else
|
||||
{
|
||||
appendStringInfo(shardFetchQuery, TABLE_FETCH_COMMAND, shardTableName,
|
||||
shardLength, nodeNameArrayString->data,
|
||||
nodePortArrayString->data);
|
||||
}
|
||||
}
|
||||
else if (storageType == SHARD_STORAGE_FOREIGN)
|
||||
{
|
||||
if (strcmp(shardSchemaName, "public") != 0)
|
||||
{
|
||||
char *qualifiedTableName = quote_qualified_identifier(shardSchemaName,
|
||||
shardTableName);
|
||||
|
||||
appendStringInfo(shardFetchQuery, FOREIGN_FETCH_COMMAND, qualifiedTableName,
|
||||
shardLength, nodeNameArrayString->data,
|
||||
nodePortArrayString->data);
|
||||
}
|
||||
else
|
||||
{
|
||||
appendStringInfo(shardFetchQuery, FOREIGN_FETCH_COMMAND, shardTableName,
|
||||
shardLength, nodeNameArrayString->data,
|
||||
nodePortArrayString->data);
|
||||
}
|
||||
}
|
||||
|
||||
return shardFetchQuery;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* NodeNameArrayString extracts the node names from the given node list, stores
|
||||
* these node names in an array, and returns the array's string representation.
|
||||
*/
|
||||
static StringInfo
|
||||
NodeNameArrayString(List *shardPlacementList)
|
||||
{
|
||||
StringInfo nodeNameArrayString = NULL;
|
||||
ListCell *shardPlacementCell = NULL;
|
||||
|
||||
uint32 nodeNameCount = (uint32) list_length(shardPlacementList);
|
||||
Datum *nodeNameArray = palloc0(nodeNameCount * sizeof(Datum));
|
||||
uint32 nodeNameIndex = 0;
|
||||
|
||||
foreach(shardPlacementCell, shardPlacementList)
|
||||
{
|
||||
ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell);
|
||||
Datum nodeName = CStringGetDatum(shardPlacement->nodeName);
|
||||
|
||||
nodeNameArray[nodeNameIndex] = nodeName;
|
||||
nodeNameIndex++;
|
||||
}
|
||||
|
||||
nodeNameArrayString = DatumArrayString(nodeNameArray, nodeNameCount, CSTRINGOID);
|
||||
|
||||
return nodeNameArrayString;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* NodePortArrayString extracts the node ports from the given node list, stores
|
||||
* these node ports in an array, and returns the array's string representation.
|
||||
*/
|
||||
static StringInfo
|
||||
NodePortArrayString(List *shardPlacementList)
|
||||
{
|
||||
StringInfo nodePortArrayString = NULL;
|
||||
ListCell *shardPlacementCell = NULL;
|
||||
|
||||
uint32 nodePortCount = (uint32) list_length(shardPlacementList);
|
||||
Datum *nodePortArray = palloc0(nodePortCount * sizeof(Datum));
|
||||
uint32 nodePortIndex = 0;
|
||||
|
||||
foreach(shardPlacementCell, shardPlacementList)
|
||||
{
|
||||
ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell);
|
||||
Datum nodePort = UInt32GetDatum(shardPlacement->nodePort);
|
||||
|
||||
nodePortArray[nodePortIndex] = nodePort;
|
||||
nodePortIndex++;
|
||||
}
|
||||
|
||||
nodePortArrayString = DatumArrayString(nodePortArray, nodePortCount, INT4OID);
|
||||
|
||||
return nodePortArrayString;
|
||||
}
|
||||
|
||||
|
||||
/* Helper function to return a datum array's external string representation. */
|
||||
static StringInfo
|
||||
DatumArrayString(Datum *datumArray, uint32 datumCount, Oid datumTypeId)
|
||||
|
@ -4153,10 +4009,8 @@ AnchorShardId(List *fragmentList, uint32 anchorRangeTableId)
|
|||
|
||||
/*
|
||||
* PruneSqlTaskDependencies iterates over each sql task from the given sql task
|
||||
* list, and prunes away any data fetch tasks which are redundant or not needed
|
||||
* for the completion of that task. Specifically the function prunes away data
|
||||
* fetch tasks for the anchor shard and any merge-fetch tasks, as the task
|
||||
* assignment algorithm ensures co-location of these tasks.
|
||||
* list, and prunes away merge-fetch tasks, as the task assignment algorithm
|
||||
* ensures co-location of these tasks.
|
||||
*/
|
||||
static List *
|
||||
PruneSqlTaskDependencies(List *sqlTaskList)
|
||||
|
@ -4174,17 +4028,11 @@ PruneSqlTaskDependencies(List *sqlTaskList)
|
|||
Task *dataFetchTask = (Task *) lfirst(dependedTaskCell);
|
||||
|
||||
/*
|
||||
* If we have a shard fetch task for the anchor shard, or if we have
|
||||
* a merge fetch task, our task assignment algorithm makes sure that
|
||||
* the sql task is colocated with the anchor shard / merge task. We
|
||||
* can therefore prune out this data fetch task.
|
||||
* If we have a merge fetch task, our task assignment algorithm makes
|
||||
* sure that the sql task is colocated with the anchor shard / merge
|
||||
* task. We can therefore prune out this data fetch task.
|
||||
*/
|
||||
if (dataFetchTask->taskType == SHARD_FETCH_TASK &&
|
||||
dataFetchTask->shardId != sqlTask->anchorShardId)
|
||||
{
|
||||
prunedDependedTaskList = lappend(prunedDependedTaskList, dataFetchTask);
|
||||
}
|
||||
else if (dataFetchTask->taskType == MERGE_FETCH_TASK)
|
||||
if (dataFetchTask->taskType == MERGE_FETCH_TASK)
|
||||
{
|
||||
Task *mergeTaskReference = NULL;
|
||||
List *mergeFetchDependencyList = dataFetchTask->dependedTaskList;
|
||||
|
@ -4827,31 +4675,6 @@ TaskListDifference(const List *list1, const List *list2)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* TaskListUnion generate the union of two tasks lists. This is calculated by
|
||||
* copying list1 via list_copy(), then adding to it all the members of list2
|
||||
* that aren't already in list1.
|
||||
*/
|
||||
List *
|
||||
TaskListUnion(const List *list1, const List *list2)
|
||||
{
|
||||
const ListCell *taskCell = NULL;
|
||||
List *resultList = NIL;
|
||||
|
||||
resultList = list_copy(list1);
|
||||
|
||||
foreach(taskCell, list2)
|
||||
{
|
||||
if (!TaskListMember(resultList, lfirst(taskCell)))
|
||||
{
|
||||
resultList = lappend(resultList, lfirst(taskCell));
|
||||
}
|
||||
}
|
||||
|
||||
return resultList;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* AssignAnchorShardTaskList assigns locations to the given tasks based on the
|
||||
* configured task assignment policy. The distributed executor later sends these
|
||||
|
@ -5409,8 +5232,7 @@ AssignDataFetchDependencies(List *taskList)
|
|||
foreach(dependedTaskCell, dependedTaskList)
|
||||
{
|
||||
Task *dependedTask = (Task *) lfirst(dependedTaskCell);
|
||||
if (dependedTask->taskType == SHARD_FETCH_TASK ||
|
||||
dependedTask->taskType == MAP_OUTPUT_FETCH_TASK)
|
||||
if (dependedTask->taskType == MAP_OUTPUT_FETCH_TASK)
|
||||
{
|
||||
dependedTask->taskPlacementList = task->taskPlacementList;
|
||||
}
|
||||
|
|
|
@ -1351,7 +1351,6 @@ CreateTask(TaskType taskType)
|
|||
task->upstreamTaskId = INVALID_TASK_ID;
|
||||
task->shardInterval = NULL;
|
||||
task->assignmentConstrained = false;
|
||||
task->shardId = INVALID_SHARD_ID;
|
||||
task->taskExecution = NULL;
|
||||
task->upsertQuery = false;
|
||||
task->replicationModel = REPLICATION_MODEL_INVALID;
|
||||
|
|
|
@ -69,6 +69,9 @@ static void NormalizeWorkerListPath(void);
|
|||
static bool StatisticsCollectionGucCheckHook(bool *newval, void **extra, GucSource
|
||||
source);
|
||||
|
||||
/* static variable to hold value of deprecated GUC variable */
|
||||
static bool ExpireCachedShards = false;
|
||||
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
/* GUC enum definitions */
|
||||
|
@ -360,11 +363,8 @@ RegisterCitusConfigVariables(void)
|
|||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.expire_cached_shards",
|
||||
gettext_noop("Enables shard cache expiration if a shard's size on disk has "
|
||||
"changed."),
|
||||
gettext_noop("When appending to an existing shard, old data may still be cached "
|
||||
"on other workers. This configuration entry activates automatic "
|
||||
"expiration, but should not be used with manual updates to shards."),
|
||||
gettext_noop("This GUC variable has been deprecated."),
|
||||
NULL,
|
||||
&ExpireCachedShards,
|
||||
false,
|
||||
PGC_SIGHUP,
|
||||
|
|
|
@ -241,7 +241,6 @@ CopyNodeTask(COPYFUNC_ARGS)
|
|||
COPY_SCALAR_FIELD(upstreamTaskId);
|
||||
COPY_NODE_FIELD(shardInterval);
|
||||
COPY_SCALAR_FIELD(assignmentConstrained);
|
||||
COPY_SCALAR_FIELD(shardId);
|
||||
COPY_NODE_FIELD(taskExecution);
|
||||
COPY_SCALAR_FIELD(upsertQuery);
|
||||
COPY_SCALAR_FIELD(replicationModel);
|
||||
|
@ -268,7 +267,6 @@ CopyNodeTaskExecution(COPYFUNC_ARGS)
|
|||
COPY_SCALAR_FIELD(connectStartTime);
|
||||
COPY_SCALAR_FIELD(currentNodeIndex);
|
||||
COPY_SCALAR_FIELD(querySourceNodeIndex);
|
||||
COPY_SCALAR_FIELD(dataFetchTaskIndex);
|
||||
COPY_SCALAR_FIELD(failureCount);
|
||||
}
|
||||
|
||||
|
|
|
@ -477,7 +477,6 @@ OutTaskExecution(OUTFUNC_ARGS)
|
|||
WRITE_INT64_FIELD(connectStartTime);
|
||||
WRITE_UINT_FIELD(currentNodeIndex);
|
||||
WRITE_UINT_FIELD(querySourceNodeIndex);
|
||||
WRITE_INT_FIELD(dataFetchTaskIndex);
|
||||
WRITE_UINT_FIELD(failureCount);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,10 +52,6 @@
|
|||
#endif
|
||||
|
||||
|
||||
/* Config variable managed via guc.c */
|
||||
bool ExpireCachedShards = false;
|
||||
|
||||
|
||||
/* Local functions forward declarations */
|
||||
static void FetchRegularFileAsSuperUser(const char *nodeName, uint32 nodePort,
|
||||
StringInfo remoteFilename,
|
||||
|
@ -66,20 +62,7 @@ static bool ReceiveRegularFile(const char *nodeName, uint32 nodePort,
|
|||
static void ReceiveResourceCleanup(int32 connectionId, const char *filename,
|
||||
int32 fileDescriptor);
|
||||
static void CitusDeleteFile(const char *filename);
|
||||
static void FetchTableCommon(text *tableName, uint64 remoteTableSize,
|
||||
ArrayType *nodeNameObject, ArrayType *nodePortObject,
|
||||
bool (*FetchTableFunction)(const char *, uint32,
|
||||
const char *));
|
||||
static uint64 LocalTableSize(Oid relationId);
|
||||
static uint64 ExtractShardId(const char *tableName);
|
||||
static bool FetchRegularTable(const char *nodeName, uint32 nodePort,
|
||||
const char *tableName);
|
||||
static bool FetchForeignTable(const char *nodeName, uint32 nodePort,
|
||||
const char *tableName);
|
||||
static const char * RemoteTableOwner(const char *nodeName, uint32 nodePort,
|
||||
const char *tableName);
|
||||
static StringInfo ForeignFilePath(const char *nodeName, uint32 nodePort,
|
||||
const char *tableName);
|
||||
static bool check_log_statement(List *stmt_list);
|
||||
static void AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName);
|
||||
static void SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg);
|
||||
|
@ -91,9 +74,15 @@ PG_FUNCTION_INFO_V1(worker_fetch_query_results_file);
|
|||
PG_FUNCTION_INFO_V1(worker_apply_shard_ddl_command);
|
||||
PG_FUNCTION_INFO_V1(worker_apply_inter_shard_ddl_command);
|
||||
PG_FUNCTION_INFO_V1(worker_apply_sequence_command);
|
||||
PG_FUNCTION_INFO_V1(worker_append_table_to_shard);
|
||||
|
||||
/*
|
||||
* Following UDFs are stub functions, you can check their comments for more
|
||||
* detail.
|
||||
*/
|
||||
PG_FUNCTION_INFO_V1(worker_fetch_regular_table);
|
||||
PG_FUNCTION_INFO_V1(worker_fetch_foreign_file);
|
||||
PG_FUNCTION_INFO_V1(worker_append_table_to_shard);
|
||||
PG_FUNCTION_INFO_V1(master_expire_table_cache);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -537,236 +526,6 @@ worker_apply_sequence_command(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* worker_fetch_regular_table caches the given PostgreSQL table on the local
|
||||
* node. The function caches this table by trying the given list of node names
|
||||
* and node ports in sequential order. On success, the function simply returns.
|
||||
*/
|
||||
Datum
|
||||
worker_fetch_regular_table(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *regularTableName = PG_GETARG_TEXT_P(0);
|
||||
uint64 generationStamp = PG_GETARG_INT64(1);
|
||||
ArrayType *nodeNameObject = PG_GETARG_ARRAYTYPE_P(2);
|
||||
ArrayType *nodePortObject = PG_GETARG_ARRAYTYPE_P(3);
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
/*
|
||||
* Run common logic to fetch the remote table, and use the provided function
|
||||
* pointer to perform the actual table fetching.
|
||||
*/
|
||||
FetchTableCommon(regularTableName, generationStamp, nodeNameObject, nodePortObject,
|
||||
&FetchRegularTable);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* worker_fetch_foreign_file caches the given file-backed foreign table on the
|
||||
* local node. The function caches this table by trying the given list of node
|
||||
* names and node ports in sequential order. On success, the function returns.
|
||||
*/
|
||||
Datum
|
||||
worker_fetch_foreign_file(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *foreignTableName = PG_GETARG_TEXT_P(0);
|
||||
uint64 foreignFileSize = PG_GETARG_INT64(1);
|
||||
ArrayType *nodeNameObject = PG_GETARG_ARRAYTYPE_P(2);
|
||||
ArrayType *nodePortObject = PG_GETARG_ARRAYTYPE_P(3);
|
||||
|
||||
CheckCitusVersion(ERROR);
|
||||
|
||||
/*
|
||||
* Run common logic to fetch the remote table, and use the provided function
|
||||
* pointer to perform the actual table fetching.
|
||||
*/
|
||||
FetchTableCommon(foreignTableName, foreignFileSize, nodeNameObject, nodePortObject,
|
||||
&FetchForeignTable);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FetchTableCommon executes common logic that wraps around the actual data
|
||||
* fetching function. This common logic includes ensuring that only one process
|
||||
* tries to fetch this table at any given time, and that data fetch operations
|
||||
* are retried in case of node failures.
|
||||
*/
|
||||
static void
|
||||
FetchTableCommon(text *tableNameText, uint64 remoteTableSize,
|
||||
ArrayType *nodeNameObject, ArrayType *nodePortObject,
|
||||
bool (*FetchTableFunction)(const char *, uint32, const char *))
|
||||
{
|
||||
uint64 shardId = INVALID_SHARD_ID;
|
||||
Oid relationId = InvalidOid;
|
||||
List *relationNameList = NIL;
|
||||
RangeVar *relation = NULL;
|
||||
uint32 nodeIndex = 0;
|
||||
bool tableFetched = false;
|
||||
char *tableName = text_to_cstring(tableNameText);
|
||||
|
||||
Datum *nodeNameArray = DeconstructArrayObject(nodeNameObject);
|
||||
Datum *nodePortArray = DeconstructArrayObject(nodePortObject);
|
||||
int32 nodeNameCount = ArrayObjectCount(nodeNameObject);
|
||||
int32 nodePortCount = ArrayObjectCount(nodePortObject);
|
||||
|
||||
/* we should have the same number of node names and port numbers */
|
||||
if (nodeNameCount != nodePortCount)
|
||||
{
|
||||
ereport(ERROR, (errmsg("node name array size: %d and node port array size: %d"
|
||||
" do not match", nodeNameCount, nodePortCount)));
|
||||
}
|
||||
|
||||
/*
|
||||
* We lock on the shardId, but do not unlock. When the function returns, and
|
||||
* the transaction for this function commits, this lock will automatically
|
||||
* be released. This ensures that concurrent caching commands will see the
|
||||
* newly created table when they acquire the lock (in read committed mode).
|
||||
*/
|
||||
shardId = ExtractShardId(tableName);
|
||||
LockShardResource(shardId, AccessExclusiveLock);
|
||||
|
||||
relationNameList = textToQualifiedNameList(tableNameText);
|
||||
relation = makeRangeVarFromNameList(relationNameList);
|
||||
relationId = RangeVarGetRelid(relation, NoLock, true);
|
||||
|
||||
/* check if we already fetched the table */
|
||||
if (relationId != InvalidOid)
|
||||
{
|
||||
uint64 localTableSize = 0;
|
||||
|
||||
if (!ExpireCachedShards)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the cached shard has the same size on disk as it has as on
|
||||
* the placement (is up to date).
|
||||
*
|
||||
* Note 1: performing updates or deletes on the original shard leads to
|
||||
* inconsistent sizes between different databases in which case the data
|
||||
* would be fetched every time, or worse, the placement would get into
|
||||
* a deadlock when it tries to fetch from itself while holding the lock.
|
||||
* Therefore, this option is disabled by default.
|
||||
*
|
||||
* Note 2: when appending data to a shard, the size on disk only
|
||||
* increases when a new page is added (the next 8kB block).
|
||||
*/
|
||||
localTableSize = LocalTableSize(relationId);
|
||||
|
||||
if (remoteTableSize > localTableSize)
|
||||
{
|
||||
/* table is not up to date, drop the table */
|
||||
ObjectAddress tableObject = { InvalidOid, InvalidOid, 0 };
|
||||
|
||||
tableObject.classId = RelationRelationId;
|
||||
tableObject.objectId = relationId;
|
||||
tableObject.objectSubId = 0;
|
||||
|
||||
performDeletion(&tableObject, DROP_RESTRICT, PERFORM_DELETION_INTERNAL);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* table is up to date */
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* loop until we fetch the table or try all nodes */
|
||||
while (!tableFetched && (nodeIndex < nodeNameCount))
|
||||
{
|
||||
Datum nodeNameDatum = nodeNameArray[nodeIndex];
|
||||
Datum nodePortDatum = nodePortArray[nodeIndex];
|
||||
char *nodeName = TextDatumGetCString(nodeNameDatum);
|
||||
uint32 nodePort = DatumGetUInt32(nodePortDatum);
|
||||
|
||||
tableFetched = (*FetchTableFunction)(nodeName, nodePort, tableName);
|
||||
|
||||
nodeIndex++;
|
||||
}
|
||||
|
||||
/* error out if we tried all nodes and could not fetch the table */
|
||||
if (!tableFetched)
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not fetch relation: \"%s\"", tableName)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* LocalTableSize returns the size on disk of the given table. */
|
||||
static uint64
|
||||
LocalTableSize(Oid relationId)
|
||||
{
|
||||
uint64 tableSize = 0;
|
||||
char relationType = 0;
|
||||
Datum relationIdDatum = ObjectIdGetDatum(relationId);
|
||||
|
||||
relationType = get_rel_relkind(relationId);
|
||||
if (RegularTable(relationId))
|
||||
{
|
||||
Datum tableSizeDatum = DirectFunctionCall1(pg_table_size, relationIdDatum);
|
||||
|
||||
tableSize = DatumGetInt64(tableSizeDatum);
|
||||
}
|
||||
else if (relationType == RELKIND_FOREIGN_TABLE)
|
||||
{
|
||||
bool cstoreTable = CStoreTable(relationId);
|
||||
if (cstoreTable)
|
||||
{
|
||||
/* extract schema name of cstore */
|
||||
Oid cstoreId = get_extension_oid(CSTORE_FDW_NAME, false);
|
||||
Oid cstoreSchemaOid = get_extension_schema(cstoreId);
|
||||
const char *cstoreSchemaName = get_namespace_name(cstoreSchemaOid);
|
||||
|
||||
const int tableSizeArgumentCount = 1;
|
||||
|
||||
Oid tableSizeFunctionOid = FunctionOid(cstoreSchemaName,
|
||||
CSTORE_TABLE_SIZE_FUNCTION_NAME,
|
||||
tableSizeArgumentCount);
|
||||
Datum tableSizeDatum = OidFunctionCall1(tableSizeFunctionOid,
|
||||
relationIdDatum);
|
||||
|
||||
tableSize = DatumGetInt64(tableSizeDatum);
|
||||
}
|
||||
else
|
||||
{
|
||||
char *relationName = get_rel_name(relationId);
|
||||
struct stat fileStat;
|
||||
|
||||
int statOK = 0;
|
||||
|
||||
StringInfo localFilePath = makeStringInfo();
|
||||
appendStringInfo(localFilePath, FOREIGN_CACHED_FILE_PATH, relationName);
|
||||
|
||||
/* extract the file size using stat, analogous to pg_stat_file */
|
||||
statOK = stat(localFilePath->data, &fileStat);
|
||||
if (statOK < 0)
|
||||
{
|
||||
ereport(ERROR, (errcode_for_file_access(),
|
||||
errmsg("could not stat file \"%s\": %m",
|
||||
localFilePath->data)));
|
||||
}
|
||||
|
||||
tableSize = (uint64) fileStat.st_size;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
char *relationName = get_rel_name(relationId);
|
||||
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("cannot get size for table \"%s\"", relationName),
|
||||
errdetail("Only regular and foreign tables are supported.")));
|
||||
}
|
||||
|
||||
return tableSize;
|
||||
}
|
||||
|
||||
|
||||
/* Extracts shard id from the given table name, and returns it. */
|
||||
static uint64
|
||||
ExtractShardId(const char *tableName)
|
||||
|
@ -797,222 +556,6 @@ ExtractShardId(const char *tableName)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* FetchRegularTable fetches the given table's data using the copy out command.
|
||||
* The function then fetches the DDL commands necessary to create this table's
|
||||
* replica, and locally applies these DDL commands. Last, the function copies
|
||||
* the fetched table data into the created table; and on success, returns true.
|
||||
* On failure due to connectivity issues with remote node, the function returns
|
||||
* false. On other types of failures, the function errors out.
|
||||
*/
|
||||
static bool
|
||||
FetchRegularTable(const char *nodeName, uint32 nodePort, const char *tableName)
|
||||
{
|
||||
StringInfo localFilePath = NULL;
|
||||
StringInfo remoteCopyCommand = NULL;
|
||||
List *ddlCommandList = NIL;
|
||||
ListCell *ddlCommandCell = NULL;
|
||||
CopyStmt *localCopyCommand = NULL;
|
||||
RangeVar *localTable = NULL;
|
||||
uint64 shardId = 0;
|
||||
bool received = false;
|
||||
StringInfo queryString = NULL;
|
||||
const char *tableOwner = NULL;
|
||||
Oid tableOwnerId = InvalidOid;
|
||||
Oid savedUserId = InvalidOid;
|
||||
int savedSecurityContext = 0;
|
||||
List *tableNameList = NIL;
|
||||
|
||||
/* copy remote table's data to this node in an idempotent manner */
|
||||
shardId = ExtractShardId(tableName);
|
||||
localFilePath = makeStringInfo();
|
||||
appendStringInfo(localFilePath, "base/%s/%s" UINT64_FORMAT,
|
||||
PG_JOB_CACHE_DIR, TABLE_FILE_PREFIX, shardId);
|
||||
|
||||
remoteCopyCommand = makeStringInfo();
|
||||
appendStringInfo(remoteCopyCommand, COPY_OUT_COMMAND, tableName);
|
||||
|
||||
received = ReceiveRegularFile(nodeName, nodePort, NULL, remoteCopyCommand,
|
||||
localFilePath);
|
||||
if (!received)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/* fetch the ddl commands needed to create the table */
|
||||
tableOwner = RemoteTableOwner(nodeName, nodePort, tableName);
|
||||
if (tableOwner == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
tableOwnerId = get_role_oid(tableOwner, false);
|
||||
|
||||
/* fetch the ddl commands needed to create the table */
|
||||
ddlCommandList = TableDDLCommandList(nodeName, nodePort, tableName);
|
||||
if (ddlCommandList == NIL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply DDL commands against the database. Note that on failure from here
|
||||
* on, we immediately error out instead of returning false. Have to do
|
||||
* this as the table's owner to ensure the local table is created with
|
||||
* compatible permissions.
|
||||
*/
|
||||
GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
|
||||
SetUserIdAndSecContext(tableOwnerId, SECURITY_LOCAL_USERID_CHANGE);
|
||||
|
||||
foreach(ddlCommandCell, ddlCommandList)
|
||||
{
|
||||
StringInfo ddlCommand = (StringInfo) lfirst(ddlCommandCell);
|
||||
Node *ddlCommandNode = ParseTreeNode(ddlCommand->data);
|
||||
|
||||
CitusProcessUtility(ddlCommandNode, ddlCommand->data, PROCESS_UTILITY_TOPLEVEL,
|
||||
NULL, None_Receiver, NULL);
|
||||
CommandCounterIncrement();
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy local file into the relation. We call ProcessUtility() instead of
|
||||
* directly calling DoCopy() because some extensions (e.g. cstore_fdw) hook
|
||||
* into process utility to provide their custom COPY behavior.
|
||||
*/
|
||||
tableNameList = stringToQualifiedNameList(tableName);
|
||||
localTable = makeRangeVarFromNameList(tableNameList);
|
||||
localCopyCommand = CopyStatement(localTable, localFilePath->data);
|
||||
|
||||
queryString = makeStringInfo();
|
||||
appendStringInfo(queryString, COPY_IN_COMMAND, tableName, localFilePath->data);
|
||||
|
||||
CitusProcessUtility((Node *) localCopyCommand, queryString->data,
|
||||
PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL);
|
||||
|
||||
/* finally delete the temporary file we created */
|
||||
CitusDeleteFile(localFilePath->data);
|
||||
|
||||
SetUserIdAndSecContext(savedUserId, savedSecurityContext);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* FetchForeignTable fetches the foreign file for the given table name from the
|
||||
* remote node. The function then fetches the DDL commands needed to create the
|
||||
* table, and applies these DDL commands locally to create the foreign table.
|
||||
* On success, the function returns true. On failure due to connectivity issues
|
||||
* with remote node, the function returns false. On failure due to applying DDL
|
||||
* commands against the local database, the function errors out.
|
||||
*/
|
||||
static bool
|
||||
FetchForeignTable(const char *nodeName, uint32 nodePort, const char *tableName)
|
||||
{
|
||||
const char *nodeUser = NULL;
|
||||
StringInfo localFilePath = NULL;
|
||||
StringInfo remoteFilePath = NULL;
|
||||
StringInfo transmitCommand = NULL;
|
||||
StringInfo alterTableCommand = NULL;
|
||||
bool received = false;
|
||||
List *ddlCommandList = NIL;
|
||||
ListCell *ddlCommandCell = NULL;
|
||||
|
||||
/*
|
||||
* Fetch a foreign file to this node in an idempotent manner. It's OK that
|
||||
* this file name lacks the schema, as the table name will have a shard id
|
||||
* attached to it, which is unique (so conflicts are avoided even if two
|
||||
* tables in different schemas have the same name).
|
||||
*/
|
||||
localFilePath = makeStringInfo();
|
||||
appendStringInfo(localFilePath, FOREIGN_CACHED_FILE_PATH, tableName);
|
||||
|
||||
remoteFilePath = ForeignFilePath(nodeName, nodePort, tableName);
|
||||
if (remoteFilePath == NULL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
transmitCommand = makeStringInfo();
|
||||
appendStringInfo(transmitCommand, TRANSMIT_REGULAR_COMMAND, remoteFilePath->data);
|
||||
|
||||
/*
|
||||
* We allow some arbitrary input in the file name and connect to the remote
|
||||
* node as superuser to transmit. Therefore, we only allow calling this
|
||||
* function when already running as superuser.
|
||||
*/
|
||||
EnsureSuperUser();
|
||||
nodeUser = CitusExtensionOwnerName();
|
||||
|
||||
received = ReceiveRegularFile(nodeName, nodePort, nodeUser, transmitCommand,
|
||||
localFilePath);
|
||||
if (!received)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/* fetch the ddl commands needed to create the table */
|
||||
ddlCommandList = TableDDLCommandList(nodeName, nodePort, tableName);
|
||||
if (ddlCommandList == NIL)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
alterTableCommand = makeStringInfo();
|
||||
appendStringInfo(alterTableCommand, SET_FOREIGN_TABLE_FILENAME, tableName,
|
||||
localFilePath->data);
|
||||
|
||||
ddlCommandList = lappend(ddlCommandList, alterTableCommand);
|
||||
|
||||
/*
|
||||
* Apply DDL commands against the database. Note that on failure here, we
|
||||
* immediately error out instead of returning false.
|
||||
*/
|
||||
foreach(ddlCommandCell, ddlCommandList)
|
||||
{
|
||||
StringInfo ddlCommand = (StringInfo) lfirst(ddlCommandCell);
|
||||
Node *ddlCommandNode = ParseTreeNode(ddlCommand->data);
|
||||
|
||||
CitusProcessUtility(ddlCommandNode, ddlCommand->data, PROCESS_UTILITY_TOPLEVEL,
|
||||
NULL, None_Receiver, NULL);
|
||||
CommandCounterIncrement();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* RemoteTableOwner takes in the given table name, and fetches the owner of
|
||||
* the table. If an error occurs during fetching, return NULL.
|
||||
*/
|
||||
static const char *
|
||||
RemoteTableOwner(const char *nodeName, uint32 nodePort, const char *tableName)
|
||||
{
|
||||
List *ownerList = NIL;
|
||||
StringInfo queryString = NULL;
|
||||
StringInfo relationOwner;
|
||||
MultiConnection *connection = NULL;
|
||||
uint32 connectionFlag = FORCE_NEW_CONNECTION;
|
||||
PGresult *result = NULL;
|
||||
|
||||
queryString = makeStringInfo();
|
||||
appendStringInfo(queryString, GET_TABLE_OWNER, tableName);
|
||||
connection = GetNodeConnection(connectionFlag, nodeName, nodePort);
|
||||
|
||||
ExecuteOptionalRemoteCommand(connection, queryString->data, &result);
|
||||
|
||||
ownerList = ReadFirstColumnAsText(result);
|
||||
if (list_length(ownerList) != 1)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
relationOwner = (StringInfo) linitial(ownerList);
|
||||
|
||||
return relationOwner->data;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* TableDDLCommandList takes in the given table name, and fetches the list of
|
||||
* DDL commands used in creating the table. If an error occurs during fetching,
|
||||
|
@ -1041,37 +584,6 @@ TableDDLCommandList(const char *nodeName, uint32 nodePort, const char *tableName
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ForeignFilePath takes in the foreign table name, and fetches this table's
|
||||
* remote file path. If an error occurs during fetching, the function returns
|
||||
* null.
|
||||
*/
|
||||
static StringInfo
|
||||
ForeignFilePath(const char *nodeName, uint32 nodePort, const char *tableName)
|
||||
{
|
||||
List *foreignPathList = NIL;
|
||||
StringInfo foreignPathCommand = NULL;
|
||||
StringInfo foreignPath = NULL;
|
||||
MultiConnection *connection = NULL;
|
||||
PGresult *result = NULL;
|
||||
int connectionFlag = FORCE_NEW_CONNECTION;
|
||||
|
||||
foreignPathCommand = makeStringInfo();
|
||||
appendStringInfo(foreignPathCommand, FOREIGN_FILE_PATH_COMMAND, tableName);
|
||||
connection = GetNodeConnection(connectionFlag, nodeName, nodePort);
|
||||
|
||||
ExecuteOptionalRemoteCommand(connection, foreignPathCommand->data, &result);
|
||||
|
||||
foreignPathList = ReadFirstColumnAsText(result);
|
||||
if (foreignPathList != NIL)
|
||||
{
|
||||
foreignPath = (StringInfo) linitial(foreignPathList);
|
||||
}
|
||||
|
||||
return foreignPath;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ExecuteRemoteQuery executes the given query, copies the query's results to a
|
||||
* sorted list, and returns this list. The function assumes that query results
|
||||
|
@ -1427,3 +939,39 @@ SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg)
|
|||
|
||||
statement->options = lappend(statement->options, defElem);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* worker_fetch_regular_table UDF is a stub UDF to install Citus flawlessly.
|
||||
* Otherwise we need to delete them from our sql files, which is confusing
|
||||
*/
|
||||
Datum
|
||||
worker_fetch_regular_table(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ereport(DEBUG2, (errmsg("this function is deprecated and no longer is used")));
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* worker_fetch_foreign_file UDF is a stub UDF to install Citus flawlessly.
|
||||
* Otherwise we need to delete them from our sql files, which is confusing
|
||||
*/
|
||||
Datum
|
||||
worker_fetch_foreign_file(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ereport(DEBUG2, (errmsg("this function is deprecated and no longer is used")));
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* master_expire_table_cache UDF is a stub UDF to install Citus flawlessly.
|
||||
* Otherwise we need to delete them from our sql files, which is confusing
|
||||
*/
|
||||
Datum
|
||||
master_expire_table_cache(PG_FUNCTION_ARGS)
|
||||
{
|
||||
ereport(DEBUG2, (errmsg("this function is deprecated and no longer is used")));
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
|
|
@ -33,10 +33,6 @@
|
|||
#define NON_PRUNABLE_JOIN -1
|
||||
#define RESERVED_HASHED_COLUMN_ID MaxAttrNumber
|
||||
#define MERGE_COLUMN_FORMAT "merge_column_%u"
|
||||
#define TABLE_FETCH_COMMAND "SELECT worker_fetch_regular_table \
|
||||
('%s', " UINT64_FORMAT ", '%s', '%s')"
|
||||
#define FOREIGN_FETCH_COMMAND "SELECT worker_fetch_foreign_file \
|
||||
('%s', " UINT64_FORMAT ", '%s', '%s')"
|
||||
#define MAP_OUTPUT_FETCH_COMMAND "SELECT worker_fetch_partition_file \
|
||||
(" UINT64_FORMAT ", %u, %u, %u, '%s', %u)"
|
||||
#define RANGE_PARTITION_COMMAND "SELECT worker_range_partition_table \
|
||||
|
@ -84,12 +80,11 @@ typedef enum
|
|||
SQL_TASK = 1,
|
||||
MAP_TASK = 2,
|
||||
MERGE_TASK = 3,
|
||||
SHARD_FETCH_TASK = 4,
|
||||
MAP_OUTPUT_FETCH_TASK = 5,
|
||||
MERGE_FETCH_TASK = 6,
|
||||
MODIFY_TASK = 7,
|
||||
ROUTER_TASK = 8,
|
||||
DDL_TASK = 9
|
||||
MAP_OUTPUT_FETCH_TASK = 4,
|
||||
MERGE_FETCH_TASK = 5,
|
||||
MODIFY_TASK = 6,
|
||||
ROUTER_TASK = 7,
|
||||
DDL_TASK = 8
|
||||
} TaskType;
|
||||
|
||||
|
||||
|
@ -151,7 +146,7 @@ typedef struct MapMergeJob
|
|||
/*
|
||||
* Task represents an executable unit of work. We conceptualize our tasks into
|
||||
* compute and data fetch task types. SQL, map, and merge tasks are considered
|
||||
* as compute tasks; and shard fetch, map fetch, and merge fetch tasks are data
|
||||
* as compute tasks; and map fetch, and merge fetch tasks are data
|
||||
* fetch tasks. We also forward declare the task execution struct here to avoid
|
||||
* including the executor header files.
|
||||
*
|
||||
|
@ -180,7 +175,6 @@ typedef struct Task
|
|||
uint32 upstreamTaskId; /* only applies to data fetch tasks */
|
||||
ShardInterval *shardInterval; /* only applies to merge tasks */
|
||||
bool assignmentConstrained; /* only applies to merge tasks */
|
||||
uint64 shardId; /* only applies to shard fetch tasks */
|
||||
TaskExecution *taskExecution; /* used by task tracker executor */
|
||||
bool upsertQuery; /* only applies to modify tasks */
|
||||
char replicationModel; /* only applies to modify tasks */
|
||||
|
@ -303,7 +297,6 @@ extern bool EnableUniqueJobIds;
|
|||
extern DistributedPlan * CreatePhysicalDistributedPlan(MultiTreeRoot *multiTree,
|
||||
PlannerRestrictionContext *
|
||||
plannerRestrictionContext);
|
||||
extern StringInfo ShardFetchQueryString(uint64 shardId);
|
||||
extern Task * CreateBasicTask(uint64 jobId, uint32 taskId, TaskType taskType,
|
||||
char *queryString);
|
||||
|
||||
|
@ -331,7 +324,6 @@ extern List * TaskListAppendUnique(List *list, Task *task);
|
|||
extern List * TaskListConcatUnique(List *list1, List *list2);
|
||||
extern bool TaskListMember(const List *taskList, const Task *task);
|
||||
extern List * TaskListDifference(const List *list1, const List *list2);
|
||||
extern List * TaskListUnion(const List *list1, const List *list2);
|
||||
extern List * AssignAnchorShardTaskList(List *taskList);
|
||||
extern List * FirstReplicaAssignTaskList(List *taskList);
|
||||
|
||||
|
|
|
@ -44,13 +44,10 @@ typedef enum
|
|||
EXEC_TASK_CONNECT_START = 1,
|
||||
EXEC_TASK_CONNECT_POLL = 2,
|
||||
EXEC_TASK_FAILED = 3,
|
||||
EXEC_FETCH_TASK_LOOP = 4,
|
||||
EXEC_FETCH_TASK_START = 5,
|
||||
EXEC_FETCH_TASK_RUNNING = 6,
|
||||
EXEC_COMPUTE_TASK_START = 7,
|
||||
EXEC_COMPUTE_TASK_RUNNING = 8,
|
||||
EXEC_COMPUTE_TASK_COPYING = 9,
|
||||
EXEC_TASK_DONE = 10,
|
||||
EXEC_COMPUTE_TASK_START = 4,
|
||||
EXEC_COMPUTE_TASK_RUNNING = 5,
|
||||
EXEC_COMPUTE_TASK_COPYING = 6,
|
||||
EXEC_TASK_DONE = 7,
|
||||
|
||||
/* used for task tracker executor */
|
||||
EXEC_TASK_UNASSIGNED = 11,
|
||||
|
@ -143,7 +140,6 @@ struct TaskExecution
|
|||
uint32 nodeCount;
|
||||
uint32 currentNodeIndex;
|
||||
uint32 querySourceNodeIndex; /* only applies to map fetch tasks */
|
||||
int32 dataFetchTaskIndex;
|
||||
uint32 failureCount;
|
||||
bool criticalErrorOccurred;
|
||||
};
|
||||
|
|
|
@ -47,14 +47,8 @@
|
|||
#define COPY_OUT_COMMAND "COPY %s TO STDOUT"
|
||||
#define COPY_IN_COMMAND "COPY %s FROM '%s'"
|
||||
|
||||
/* Defines that relate to fetching foreign tables */
|
||||
#define FOREIGN_CACHED_FILE_PATH "pg_foreign_file/cached/%s"
|
||||
#define GET_TABLE_OWNER \
|
||||
"SELECT rolname FROM pg_class JOIN pg_roles ON (pg_roles.oid = pg_class.relowner) " \
|
||||
"WHERE pg_class.oid = '%s'::regclass"
|
||||
/* Defines that relate to creating tables */
|
||||
#define GET_TABLE_DDL_EVENTS "SELECT master_get_table_ddl_events('%s')"
|
||||
#define SET_FOREIGN_TABLE_FILENAME "ALTER FOREIGN TABLE %s OPTIONS (SET filename '%s')"
|
||||
#define FOREIGN_FILE_PATH_COMMAND "SELECT worker_foreign_file_path('%s')"
|
||||
#define SET_SEARCH_PATH_COMMAND "SET search_path TO %s"
|
||||
#define CREATE_TABLE_COMMAND "CREATE TABLE %s (%s)"
|
||||
#define CREATE_TABLE_AS_COMMAND "CREATE TABLE %s (%s) AS (%s)"
|
||||
|
@ -99,7 +93,6 @@ typedef struct FileOutputStream
|
|||
|
||||
/* Config variables managed via guc.c */
|
||||
extern int PartitionBufferSize;
|
||||
extern bool ExpireCachedShards;
|
||||
extern bool BinaryWorkerCopyFormat;
|
||||
|
||||
|
||||
|
|
|
@ -1,108 +0,0 @@
|
|||
---
|
||||
--- MULTI_EXPIRE_TABLE_CACHE
|
||||
---
|
||||
SET citus.next_shard_id TO 1220000;
|
||||
-- create test table
|
||||
CREATE TABLE large_table(a int, b int);
|
||||
SELECT master_create_distributed_table('large_table', 'a', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('large_table', 8, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE broadcast_table(a int, b int);
|
||||
SELECT master_create_distributed_table('broadcast_table', 'a', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('broadcast_table', 2, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- verify only small tables are supported
|
||||
SELECT master_expire_table_cache('large_table');
|
||||
ERROR: Must be called on tables smaller than 4 shards
|
||||
SELECT master_expire_table_cache('broadcast_table');
|
||||
master_expire_table_cache
|
||||
---------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- run a join so that broadcast tables are cached on other workers
|
||||
SELECT * from large_table l, broadcast_table b where l.a = b.b;
|
||||
a | b | a | b
|
||||
---+---+---+---
|
||||
(0 rows)
|
||||
|
||||
-- insert some data
|
||||
INSERT INTO large_table VALUES(1, 1);
|
||||
INSERT INTO large_table VALUES(1, 2);
|
||||
INSERT INTO large_table VALUES(2, 1);
|
||||
INSERT INTO large_table VALUES(2, 2);
|
||||
INSERT INTO large_table VALUES(3, 1);
|
||||
INSERT INTO large_table VALUES(3, 2);
|
||||
INSERT INTO broadcast_table VALUES(1, 1);
|
||||
-- verify returned results are wrong
|
||||
SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b;
|
||||
a | b | a | b
|
||||
---+---+---+---
|
||||
1 | 1 | 1 | 1
|
||||
2 | 1 | 1 | 1
|
||||
(2 rows)
|
||||
|
||||
-- expire cache and re-run, results should be correct this time
|
||||
SELECT master_expire_table_cache('broadcast_table');
|
||||
master_expire_table_cache
|
||||
---------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b;
|
||||
a | b | a | b
|
||||
---+---+---+---
|
||||
1 | 1 | 1 | 1
|
||||
2 | 1 | 1 | 1
|
||||
3 | 1 | 1 | 1
|
||||
(3 rows)
|
||||
|
||||
-- insert some more data into broadcast table
|
||||
INSERT INTO broadcast_table VALUES(2, 2);
|
||||
-- run the same query, get wrong results
|
||||
SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b;
|
||||
a | b | a | b
|
||||
---+---+---+---
|
||||
1 | 1 | 1 | 1
|
||||
2 | 1 | 1 | 1
|
||||
3 | 1 | 1 | 1
|
||||
3 | 2 | 2 | 2
|
||||
(4 rows)
|
||||
|
||||
-- expire cache and re-run, results should be correct this time
|
||||
SELECT master_expire_table_cache('broadcast_table');
|
||||
master_expire_table_cache
|
||||
---------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b;
|
||||
a | b | a | b
|
||||
---+---+---+---
|
||||
1 | 1 | 1 | 1
|
||||
1 | 2 | 2 | 2
|
||||
2 | 1 | 1 | 1
|
||||
2 | 2 | 2 | 2
|
||||
3 | 1 | 1 | 1
|
||||
3 | 2 | 2 | 2
|
||||
(6 rows)
|
||||
|
||||
DROP TABLE large_table, broadcast_table;
|
|
@ -133,6 +133,7 @@ ALTER EXTENSION citus UPDATE TO '7.2-1';
|
|||
ALTER EXTENSION citus UPDATE TO '7.2-2';
|
||||
ALTER EXTENSION citus UPDATE TO '7.2-3';
|
||||
ALTER EXTENSION citus UPDATE TO '7.3-3';
|
||||
ALTER EXTENSION citus UPDATE TO '7.4-1';
|
||||
-- show running version
|
||||
SHOW citus.version;
|
||||
citus.version
|
||||
|
|
|
@ -49,63 +49,63 @@ DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
|||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
DEBUG: generated sql query for task 3
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 6
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 9
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 12
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 15
|
||||
DEBUG: generated sql query for task 5
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 18
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 21
|
||||
DEBUG: generated sql query for task 7
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 24
|
||||
DEBUG: generated sql query for task 8
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 18 to node localhost:57637
|
||||
DEBUG: assigned task 15 to node localhost:57638
|
||||
DEBUG: assigned task 24 to node localhost:57637
|
||||
DEBUG: assigned task 21 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000025".intermediate_column_1_0, "pg_merge_job_0001.task_000025".intermediate_column_1_1, "pg_merge_job_0001.task_000025".intermediate_column_1_2, "pg_merge_job_0001.task_000025".intermediate_column_1_3, "pg_merge_job_0001.task_000025".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000025 "pg_merge_job_0001.task_000025" JOIN part_290011 part ON (("pg_merge_job_0001.task_000025".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000034".intermediate_column_1_0, "pg_merge_job_0001.task_000034".intermediate_column_1_1, "pg_merge_job_0001.task_000034".intermediate_column_1_2, "pg_merge_job_0001.task_000034".intermediate_column_1_3, "pg_merge_job_0001.task_000034".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000034 "pg_merge_job_0001.task_000034" JOIN part_280002 part ON (("pg_merge_job_0001.task_000034".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000009".intermediate_column_1_0, "pg_merge_job_0001.task_000009".intermediate_column_1_1, "pg_merge_job_0001.task_000009".intermediate_column_1_2, "pg_merge_job_0001.task_000009".intermediate_column_1_3, "pg_merge_job_0001.task_000009".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000009 "pg_merge_job_0001.task_000009" JOIN part_290011 part ON (("pg_merge_job_0001.task_000009".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000018".intermediate_column_1_0, "pg_merge_job_0001.task_000018".intermediate_column_1_1, "pg_merge_job_0001.task_000018".intermediate_column_1_2, "pg_merge_job_0001.task_000018".intermediate_column_1_3, "pg_merge_job_0001.task_000018".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000018 "pg_merge_job_0001.task_000018" JOIN part_280002 part ON (("pg_merge_job_0001.task_000018".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 25
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 34
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000007".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000007".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000007 "pg_merge_job_0002.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000007".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000007".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000007".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000007".intermediate_column_2_0, "pg_merge_job_0002.task_000007".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000005".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000005".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000005 "pg_merge_job_0002.task_000005" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000005".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000005".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000005".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000005".intermediate_column_2_0, "pg_merge_job_0002.task_000005".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000008".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000008".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000008 "pg_merge_job_0002.task_000008" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000008".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000008".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000008".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000008".intermediate_column_2_0, "pg_merge_job_0002.task_000008".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000010".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000010".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000010 "pg_merge_job_0002.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000010".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000010".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000010".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000010".intermediate_column_2_0, "pg_merge_job_0002.task_000010".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000013".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000013".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000013 "pg_merge_job_0002.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000013".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000013".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000013".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000013".intermediate_column_2_0, "pg_merge_job_0002.task_000013".intermediate_column_2_1"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000011".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000011".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000011 "pg_merge_job_0002.task_000011" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000011".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000011".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000011".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000011".intermediate_column_2_0, "pg_merge_job_0002.task_000011".intermediate_column_2_1"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 13
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: completed cleanup query for job 3
|
||||
DEBUG: completed cleanup query for job 3
|
||||
DEBUG: completed cleanup query for job 2
|
||||
|
@ -156,36 +156,36 @@ GROUP BY
|
|||
l_partkey, o_orderkey
|
||||
ORDER BY
|
||||
l_partkey, o_orderkey;
|
||||
DEBUG: generated sql query for task 2
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 8
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 10
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 14
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 16
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 10 to node localhost:57638
|
||||
DEBUG: assigned task 16 to node localhost:57637
|
||||
DEBUG: assigned task 14 to node localhost:57638
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 5
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 7
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 8
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -199,29 +199,29 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000017".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000005".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000017 "pg_merge_job_0004.task_000017" JOIN pg_merge_job_0005.task_000005 "pg_merge_job_0005.task_000005" ON (("pg_merge_job_0004.task_000017".intermediate_column_4_1 = "pg_merge_job_0005.task_000005".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000017".intermediate_column_4_0, "pg_merge_job_0005.task_000005".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000009".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000003".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000009 "pg_merge_job_0004.task_000009" JOIN pg_merge_job_0005.task_000003 "pg_merge_job_0005.task_000003" ON (("pg_merge_job_0004.task_000009".intermediate_column_4_1 = "pg_merge_job_0005.task_000003".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000009".intermediate_column_4_0, "pg_merge_job_0005.task_000003".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000026".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000008".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000026 "pg_merge_job_0004.task_000026" JOIN pg_merge_job_0005.task_000008 "pg_merge_job_0005.task_000008" ON (("pg_merge_job_0004.task_000026".intermediate_column_4_1 = "pg_merge_job_0005.task_000008".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000026".intermediate_column_4_0, "pg_merge_job_0005.task_000008".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000018".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000006".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000018 "pg_merge_job_0004.task_000018" JOIN pg_merge_job_0005.task_000006 "pg_merge_job_0005.task_000006" ON (("pg_merge_job_0004.task_000018".intermediate_column_4_1 = "pg_merge_job_0005.task_000006".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000018".intermediate_column_4_0, "pg_merge_job_0005.task_000006".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000035".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000011".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000035 "pg_merge_job_0004.task_000035" JOIN pg_merge_job_0005.task_000011 "pg_merge_job_0005.task_000011" ON (("pg_merge_job_0004.task_000035".intermediate_column_4_1 = "pg_merge_job_0005.task_000011".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000035".intermediate_column_4_0, "pg_merge_job_0005.task_000011".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000027".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000009".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000027 "pg_merge_job_0004.task_000027" JOIN pg_merge_job_0005.task_000009 "pg_merge_job_0005.task_000009" ON (("pg_merge_job_0004.task_000027".intermediate_column_4_1 = "pg_merge_job_0005.task_000009".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000027".intermediate_column_4_0, "pg_merge_job_0005.task_000009".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000044".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000014".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000044 "pg_merge_job_0004.task_000044" JOIN pg_merge_job_0005.task_000014 "pg_merge_job_0005.task_000014" ON (("pg_merge_job_0004.task_000044".intermediate_column_4_1 = "pg_merge_job_0005.task_000014".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000044".intermediate_column_4_0, "pg_merge_job_0005.task_000014".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000036".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000012".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000036 "pg_merge_job_0004.task_000036" JOIN pg_merge_job_0005.task_000012 "pg_merge_job_0005.task_000012" ON (("pg_merge_job_0004.task_000036".intermediate_column_4_1 = "pg_merge_job_0005.task_000012".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000036".intermediate_column_4_0, "pg_merge_job_0005.task_000012".intermediate_column_5_0"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 17
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 44
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
|
|
|
@ -57,63 +57,63 @@ DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
|||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||
DEBUG: generated sql query for task 3
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 6
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 9
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 12
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 15
|
||||
DEBUG: generated sql query for task 5
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 18
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 21
|
||||
DEBUG: generated sql query for task 7
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: generated sql query for task 24
|
||||
DEBUG: generated sql query for task 8
|
||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 18 to node localhost:57637
|
||||
DEBUG: assigned task 15 to node localhost:57638
|
||||
DEBUG: assigned task 24 to node localhost:57637
|
||||
DEBUG: assigned task 21 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000025".intermediate_column_1_0, "pg_merge_job_0001.task_000025".intermediate_column_1_1, "pg_merge_job_0001.task_000025".intermediate_column_1_2, "pg_merge_job_0001.task_000025".intermediate_column_1_3, "pg_merge_job_0001.task_000025".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000025 "pg_merge_job_0001.task_000025" JOIN part_290011 part ON (("pg_merge_job_0001.task_000025".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000034".intermediate_column_1_0, "pg_merge_job_0001.task_000034".intermediate_column_1_1, "pg_merge_job_0001.task_000034".intermediate_column_1_2, "pg_merge_job_0001.task_000034".intermediate_column_1_3, "pg_merge_job_0001.task_000034".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000034 "pg_merge_job_0001.task_000034" JOIN part_280002 part ON (("pg_merge_job_0001.task_000034".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000009".intermediate_column_1_0, "pg_merge_job_0001.task_000009".intermediate_column_1_1, "pg_merge_job_0001.task_000009".intermediate_column_1_2, "pg_merge_job_0001.task_000009".intermediate_column_1_3, "pg_merge_job_0001.task_000009".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000009 "pg_merge_job_0001.task_000009" JOIN part_290011 part ON (("pg_merge_job_0001.task_000009".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000018".intermediate_column_1_0, "pg_merge_job_0001.task_000018".intermediate_column_1_1, "pg_merge_job_0001.task_000018".intermediate_column_1_2, "pg_merge_job_0001.task_000018".intermediate_column_1_3, "pg_merge_job_0001.task_000018".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000018 "pg_merge_job_0001.task_000018" JOIN part_280002 part ON (("pg_merge_job_0001.task_000018".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 25
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 34
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000007".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000007".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000007 "pg_merge_job_0002.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000007".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000007".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000007".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000007".intermediate_column_2_0, "pg_merge_job_0002.task_000007".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000005".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000005".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000005 "pg_merge_job_0002.task_000005" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000005".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000005".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000005".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000005".intermediate_column_2_0, "pg_merge_job_0002.task_000005".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000008".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000008".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000008 "pg_merge_job_0002.task_000008" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000008".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000008".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000008".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000008".intermediate_column_2_0, "pg_merge_job_0002.task_000008".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000010".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000010".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000010 "pg_merge_job_0002.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000010".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000010".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000010".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000010".intermediate_column_2_0, "pg_merge_job_0002.task_000010".intermediate_column_2_1"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000013".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000013".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000013 "pg_merge_job_0002.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000013".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000013".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000013".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000013".intermediate_column_2_0, "pg_merge_job_0002.task_000013".intermediate_column_2_1"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000011".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000011".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000011 "pg_merge_job_0002.task_000011" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000011".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000011".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000011".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000011".intermediate_column_2_0, "pg_merge_job_0002.task_000011".intermediate_column_2_1"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 13
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: completed cleanup query for job 3
|
||||
DEBUG: completed cleanup query for job 3
|
||||
DEBUG: completed cleanup query for job 2
|
||||
|
@ -166,36 +166,36 @@ GROUP BY
|
|||
ORDER BY
|
||||
l_partkey, o_orderkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: generated sql query for task 2
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 8
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 10
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 14
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 16
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 10 to node localhost:57638
|
||||
DEBUG: assigned task 16 to node localhost:57637
|
||||
DEBUG: assigned task 14 to node localhost:57638
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 4
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 5
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 7
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: generated sql query for task 8
|
||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: generated sql query for task 1
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: generated sql query for task 2
|
||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -209,29 +209,29 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: generated sql query for task 3
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000017".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000005".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000017 "pg_merge_job_0004.task_000017" JOIN pg_merge_job_0005.task_000005 "pg_merge_job_0005.task_000005" ON (("pg_merge_job_0004.task_000017".intermediate_column_4_1 = "pg_merge_job_0005.task_000005".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000017".intermediate_column_4_0, "pg_merge_job_0005.task_000005".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000009".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000003".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000009 "pg_merge_job_0004.task_000009" JOIN pg_merge_job_0005.task_000003 "pg_merge_job_0005.task_000003" ON (("pg_merge_job_0004.task_000009".intermediate_column_4_1 = "pg_merge_job_0005.task_000003".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000009".intermediate_column_4_0, "pg_merge_job_0005.task_000003".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 6
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000026".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000008".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000026 "pg_merge_job_0004.task_000026" JOIN pg_merge_job_0005.task_000008 "pg_merge_job_0005.task_000008" ON (("pg_merge_job_0004.task_000026".intermediate_column_4_1 = "pg_merge_job_0005.task_000008".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000026".intermediate_column_4_0, "pg_merge_job_0005.task_000008".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000018".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000006".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000018 "pg_merge_job_0004.task_000018" JOIN pg_merge_job_0005.task_000006 "pg_merge_job_0005.task_000006" ON (("pg_merge_job_0004.task_000018".intermediate_column_4_1 = "pg_merge_job_0005.task_000006".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000018".intermediate_column_4_0, "pg_merge_job_0005.task_000006".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 9
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000035".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000011".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000035 "pg_merge_job_0004.task_000035" JOIN pg_merge_job_0005.task_000011 "pg_merge_job_0005.task_000011" ON (("pg_merge_job_0004.task_000035".intermediate_column_4_1 = "pg_merge_job_0005.task_000011".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000035".intermediate_column_4_0, "pg_merge_job_0005.task_000011".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000027".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000009".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000027 "pg_merge_job_0004.task_000027" JOIN pg_merge_job_0005.task_000009 "pg_merge_job_0005.task_000009" ON (("pg_merge_job_0004.task_000027".intermediate_column_4_1 = "pg_merge_job_0005.task_000009".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000027".intermediate_column_4_0, "pg_merge_job_0005.task_000009".intermediate_column_5_0"
|
||||
DEBUG: generated sql query for task 12
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000044".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000014".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000044 "pg_merge_job_0004.task_000044" JOIN pg_merge_job_0005.task_000014 "pg_merge_job_0005.task_000014" ON (("pg_merge_job_0004.task_000044".intermediate_column_4_1 = "pg_merge_job_0005.task_000014".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000044".intermediate_column_4_0, "pg_merge_job_0005.task_000014".intermediate_column_5_0"
|
||||
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000036".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000012".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000036 "pg_merge_job_0004.task_000036" JOIN pg_merge_job_0005.task_000012 "pg_merge_job_0005.task_000012" ON (("pg_merge_job_0004.task_000036".intermediate_column_4_1 = "pg_merge_job_0005.task_000012".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000036".intermediate_column_4_0, "pg_merge_job_0005.task_000012".intermediate_column_5_0"
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 17
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 44
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
|
|
|
@ -23,11 +23,11 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
|||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
|
@ -52,11 +52,11 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
|||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
count
|
||||
-------
|
||||
2984
|
||||
|
@ -151,21 +151,21 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 17
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 44
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 16
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------
|
||||
Aggregate
|
||||
|
@ -199,21 +199,21 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 17
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 44
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 16
|
||||
count
|
||||
-------
|
||||
125
|
||||
|
|
|
@ -26,8 +26,8 @@ FROM
|
|||
orders, customer
|
||||
WHERE
|
||||
o_custkey = c_custkey;
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||
|
@ -35,14 +35,14 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
|||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
count
|
||||
-------
|
||||
2984
|
||||
|
@ -60,12 +60,12 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
o_orderkey = l_orderkey;
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 15 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 18 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
|
||||
|
@ -111,65 +111,65 @@ DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
|
|||
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 13
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DEBUG: pruning merge fetch taskId 16
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DETAIL: Creating dependency on merge taskId 21
|
||||
DEBUG: pruning merge fetch taskId 15
|
||||
DETAIL: Creating dependency on merge taskId 21
|
||||
DEBUG: pruning merge fetch taskId 17
|
||||
DETAIL: Creating dependency on merge taskId 28
|
||||
DEBUG: pruning merge fetch taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 33
|
||||
DEBUG: pruning merge fetch taskId 22
|
||||
DETAIL: Creating dependency on merge taskId 33
|
||||
DETAIL: Creating dependency on merge taskId 28
|
||||
DEBUG: pruning merge fetch taskId 21
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DEBUG: pruning merge fetch taskId 23
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DEBUG: pruning merge fetch taskId 25
|
||||
DETAIL: Creating dependency on merge taskId 40
|
||||
DEBUG: pruning merge fetch taskId 28
|
||||
DETAIL: Creating dependency on merge taskId 40
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 27
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 29
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 31
|
||||
DETAIL: Creating dependency on merge taskId 47
|
||||
DEBUG: pruning merge fetch taskId 34
|
||||
DETAIL: Creating dependency on merge taskId 47
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 33
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 35
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 37
|
||||
DETAIL: Creating dependency on merge taskId 54
|
||||
DEBUG: pruning merge fetch taskId 40
|
||||
DETAIL: Creating dependency on merge taskId 54
|
||||
DEBUG: pruning merge fetch taskId 43
|
||||
DETAIL: Creating dependency on merge taskId 54
|
||||
DEBUG: pruning merge fetch taskId 46
|
||||
DETAIL: Creating dependency on merge taskId 61
|
||||
DEBUG: pruning merge fetch taskId 49
|
||||
DETAIL: Creating dependency on merge taskId 61
|
||||
DEBUG: pruning merge fetch taskId 52
|
||||
DETAIL: Creating dependency on merge taskId 61
|
||||
DEBUG: pruning merge fetch taskId 55
|
||||
DETAIL: Creating dependency on merge taskId 68
|
||||
DEBUG: pruning merge fetch taskId 58
|
||||
DETAIL: Creating dependency on merge taskId 68
|
||||
DEBUG: assigned task 21 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 27 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 48 to node localhost:57637
|
||||
DEBUG: assigned task 33 to node localhost:57638
|
||||
DEBUG: assigned task 39 to node localhost:57637
|
||||
DEBUG: assigned task 57 to node localhost:57638
|
||||
DEBUG: propagating assignment from merge task 19 to constrained sql task 6
|
||||
DEBUG: propagating assignment from merge task 26 to constrained sql task 12
|
||||
DEBUG: propagating assignment from merge task 26 to constrained sql task 15
|
||||
DEBUG: propagating assignment from merge task 26 to constrained sql task 18
|
||||
DEBUG: propagating assignment from merge task 33 to constrained sql task 24
|
||||
DEBUG: propagating assignment from merge task 40 to constrained sql task 30
|
||||
DEBUG: propagating assignment from merge task 47 to constrained sql task 36
|
||||
DEBUG: propagating assignment from merge task 54 to constrained sql task 42
|
||||
DEBUG: propagating assignment from merge task 54 to constrained sql task 45
|
||||
DEBUG: propagating assignment from merge task 61 to constrained sql task 51
|
||||
DEBUG: propagating assignment from merge task 61 to constrained sql task 54
|
||||
DEBUG: propagating assignment from merge task 68 to constrained sql task 60
|
||||
DETAIL: Creating dependency on merge taskId 56
|
||||
DEBUG: pruning merge fetch taskId 39
|
||||
DETAIL: Creating dependency on merge taskId 56
|
||||
DEBUG: assigned task 14 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 18 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 32 to node localhost:57637
|
||||
DEBUG: assigned task 22 to node localhost:57638
|
||||
DEBUG: assigned task 26 to node localhost:57637
|
||||
DEBUG: assigned task 38 to node localhost:57638
|
||||
DEBUG: propagating assignment from merge task 7 to constrained sql task 4
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 8
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 10
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 12
|
||||
DEBUG: propagating assignment from merge task 21 to constrained sql task 16
|
||||
DEBUG: propagating assignment from merge task 28 to constrained sql task 20
|
||||
DEBUG: propagating assignment from merge task 35 to constrained sql task 24
|
||||
DEBUG: propagating assignment from merge task 42 to constrained sql task 28
|
||||
DEBUG: propagating assignment from merge task 42 to constrained sql task 30
|
||||
DEBUG: propagating assignment from merge task 49 to constrained sql task 34
|
||||
DEBUG: propagating assignment from merge task 49 to constrained sql task 36
|
||||
DEBUG: propagating assignment from merge task 56 to constrained sql task 40
|
||||
count
|
||||
-------
|
||||
11998
|
||||
|
@ -184,17 +184,17 @@ FROM
|
|||
lineitem, customer
|
||||
WHERE
|
||||
l_partkey = c_nationkey;
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 10 to node localhost:57638
|
||||
DEBUG: assigned task 16 to node localhost:57637
|
||||
DEBUG: assigned task 14 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -208,21 +208,21 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 17
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 44
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 16
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
|
|
|
@ -34,8 +34,8 @@ FROM
|
|||
WHERE
|
||||
o_custkey = c_custkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1000] and [1001,2000]
|
||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||
DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||
|
@ -43,14 +43,14 @@ DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
|||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: CommitTransactionCommand
|
||||
count
|
||||
-------
|
||||
|
@ -73,12 +73,12 @@ WHERE
|
|||
o_custkey = c_custkey AND
|
||||
o_orderkey = l_orderkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: assigned task 9 to node localhost:57637
|
||||
DEBUG: assigned task 15 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 18 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: join prunable for intervals [1,1509] and [2951,4455]
|
||||
DEBUG: join prunable for intervals [1,1509] and [4480,5986]
|
||||
DEBUG: join prunable for intervals [1,1509] and [8997,10560]
|
||||
|
@ -124,65 +124,65 @@ DEBUG: join prunable for intervals [13473,14947] and [4480,5986]
|
|||
DEBUG: join prunable for intervals [13473,14947] and [8997,10560]
|
||||
DEBUG: join prunable for intervals [13473,14947] and [10560,12036]
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DEBUG: pruning merge fetch taskId 13
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DEBUG: pruning merge fetch taskId 16
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DETAIL: Creating dependency on merge taskId 21
|
||||
DEBUG: pruning merge fetch taskId 15
|
||||
DETAIL: Creating dependency on merge taskId 21
|
||||
DEBUG: pruning merge fetch taskId 17
|
||||
DETAIL: Creating dependency on merge taskId 28
|
||||
DEBUG: pruning merge fetch taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 33
|
||||
DEBUG: pruning merge fetch taskId 22
|
||||
DETAIL: Creating dependency on merge taskId 33
|
||||
DETAIL: Creating dependency on merge taskId 28
|
||||
DEBUG: pruning merge fetch taskId 21
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DEBUG: pruning merge fetch taskId 23
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DEBUG: pruning merge fetch taskId 25
|
||||
DETAIL: Creating dependency on merge taskId 40
|
||||
DEBUG: pruning merge fetch taskId 28
|
||||
DETAIL: Creating dependency on merge taskId 40
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 27
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 29
|
||||
DETAIL: Creating dependency on merge taskId 42
|
||||
DEBUG: pruning merge fetch taskId 31
|
||||
DETAIL: Creating dependency on merge taskId 47
|
||||
DEBUG: pruning merge fetch taskId 34
|
||||
DETAIL: Creating dependency on merge taskId 47
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 33
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 35
|
||||
DETAIL: Creating dependency on merge taskId 49
|
||||
DEBUG: pruning merge fetch taskId 37
|
||||
DETAIL: Creating dependency on merge taskId 54
|
||||
DEBUG: pruning merge fetch taskId 40
|
||||
DETAIL: Creating dependency on merge taskId 54
|
||||
DEBUG: pruning merge fetch taskId 43
|
||||
DETAIL: Creating dependency on merge taskId 54
|
||||
DEBUG: pruning merge fetch taskId 46
|
||||
DETAIL: Creating dependency on merge taskId 61
|
||||
DEBUG: pruning merge fetch taskId 49
|
||||
DETAIL: Creating dependency on merge taskId 61
|
||||
DEBUG: pruning merge fetch taskId 52
|
||||
DETAIL: Creating dependency on merge taskId 61
|
||||
DEBUG: pruning merge fetch taskId 55
|
||||
DETAIL: Creating dependency on merge taskId 68
|
||||
DEBUG: pruning merge fetch taskId 58
|
||||
DETAIL: Creating dependency on merge taskId 68
|
||||
DEBUG: assigned task 21 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 27 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
DEBUG: assigned task 48 to node localhost:57637
|
||||
DEBUG: assigned task 33 to node localhost:57638
|
||||
DEBUG: assigned task 39 to node localhost:57637
|
||||
DEBUG: assigned task 57 to node localhost:57638
|
||||
DEBUG: propagating assignment from merge task 19 to constrained sql task 6
|
||||
DEBUG: propagating assignment from merge task 26 to constrained sql task 12
|
||||
DEBUG: propagating assignment from merge task 26 to constrained sql task 15
|
||||
DEBUG: propagating assignment from merge task 26 to constrained sql task 18
|
||||
DEBUG: propagating assignment from merge task 33 to constrained sql task 24
|
||||
DEBUG: propagating assignment from merge task 40 to constrained sql task 30
|
||||
DEBUG: propagating assignment from merge task 47 to constrained sql task 36
|
||||
DEBUG: propagating assignment from merge task 54 to constrained sql task 42
|
||||
DEBUG: propagating assignment from merge task 54 to constrained sql task 45
|
||||
DEBUG: propagating assignment from merge task 61 to constrained sql task 51
|
||||
DEBUG: propagating assignment from merge task 61 to constrained sql task 54
|
||||
DEBUG: propagating assignment from merge task 68 to constrained sql task 60
|
||||
DETAIL: Creating dependency on merge taskId 56
|
||||
DEBUG: pruning merge fetch taskId 39
|
||||
DETAIL: Creating dependency on merge taskId 56
|
||||
DEBUG: assigned task 14 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 18 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 32 to node localhost:57637
|
||||
DEBUG: assigned task 22 to node localhost:57638
|
||||
DEBUG: assigned task 26 to node localhost:57637
|
||||
DEBUG: assigned task 38 to node localhost:57638
|
||||
DEBUG: propagating assignment from merge task 7 to constrained sql task 4
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 8
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 10
|
||||
DEBUG: propagating assignment from merge task 14 to constrained sql task 12
|
||||
DEBUG: propagating assignment from merge task 21 to constrained sql task 16
|
||||
DEBUG: propagating assignment from merge task 28 to constrained sql task 20
|
||||
DEBUG: propagating assignment from merge task 35 to constrained sql task 24
|
||||
DEBUG: propagating assignment from merge task 42 to constrained sql task 28
|
||||
DEBUG: propagating assignment from merge task 42 to constrained sql task 30
|
||||
DEBUG: propagating assignment from merge task 49 to constrained sql task 34
|
||||
DEBUG: propagating assignment from merge task 49 to constrained sql task 36
|
||||
DEBUG: propagating assignment from merge task 56 to constrained sql task 40
|
||||
DEBUG: CommitTransactionCommand
|
||||
count
|
||||
-------
|
||||
|
@ -202,17 +202,17 @@ FROM
|
|||
WHERE
|
||||
l_partkey = c_nationkey;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 12 to node localhost:57637
|
||||
DEBUG: assigned task 10 to node localhost:57638
|
||||
DEBUG: assigned task 16 to node localhost:57637
|
||||
DEBUG: assigned task 14 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 5 to node localhost:57638
|
||||
DEBUG: assigned task 8 to node localhost:57637
|
||||
DEBUG: assigned task 7 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
|
@ -226,21 +226,21 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 17
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 26
|
||||
DETAIL: Creating dependency on merge taskId 18
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 35
|
||||
DETAIL: Creating dependency on merge taskId 27
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 44
|
||||
DETAIL: Creating dependency on merge taskId 36
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 16
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 9 to node localhost:57638
|
||||
|
|
|
@ -136,7 +136,7 @@ BEGIN;
|
|||
INSERT INTO researchers VALUES (8, 5, 'Douglas Engelbart');
|
||||
INSERT INTO labs VALUES (5, 'Los Alamos');
|
||||
COMMIT;
|
||||
SELECT * FROM researchers, labs WHERE labs.id = researchers.lab_id;
|
||||
SELECT * FROM researchers, labs WHERE labs.id = researchers.lab_id AND researchers.lab_id = 5;
|
||||
id | lab_id | name | id | name
|
||||
----+--------+-------------------+----+------------
|
||||
8 | 5 | Douglas Engelbart | 5 | Los Alamos
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
SET citus.next_shard_id TO 1420000;
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1420000;
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_count TO 2;
|
||||
CREATE TABLE test (id integer, val integer);
|
||||
SELECT create_distributed_table('test', 'id');
|
||||
create_distributed_table
|
||||
|
@ -40,6 +39,8 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
|||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
GRANT ALL ON TABLE test_1420000 TO full_access;
|
||||
GRANT SELECT ON TABLE test_1420000 TO read_access;
|
||||
GRANT ALL ON TABLE test_1420002 TO full_access;
|
||||
GRANT SELECT ON TABLE test_1420002 TO read_access;
|
||||
\c - - - :worker_2_port
|
||||
CREATE USER full_access;
|
||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||
|
@ -52,6 +53,8 @@ NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
|||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||
GRANT ALL ON TABLE test_1420001 TO full_access;
|
||||
GRANT SELECT ON TABLE test_1420001 TO read_access;
|
||||
GRANT ALL ON TABLE test_1420003 TO full_access;
|
||||
GRANT SELECT ON TABLE test_1420003 TO read_access;
|
||||
\c - - - :master_port
|
||||
-- create prepare tests
|
||||
PREPARE prepare_insert AS INSERT INTO test VALUES ($1);
|
||||
|
|
|
@ -8,6 +8,10 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
|
|||
\a\t
|
||||
SET citus.task_executor_type TO 'real-time';
|
||||
SET citus.explain_distributed_queries TO on;
|
||||
VACUUM ANALYZE lineitem_mx;
|
||||
VACUUM ANALYZE orders_mx;
|
||||
VACUUM ANALYZE customer_mx;
|
||||
VACUUM ANALYZE supplier_mx;
|
||||
\c - - - :worker_1_port
|
||||
-- Function that parses explain output as JSON
|
||||
CREATE FUNCTION explain_json(query text)
|
||||
|
|
|
@ -8,6 +8,10 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
|
|||
\a\t
|
||||
SET citus.task_executor_type TO 'real-time';
|
||||
SET citus.explain_distributed_queries TO on;
|
||||
VACUUM ANALYZE lineitem_mx;
|
||||
VACUUM ANALYZE orders_mx;
|
||||
VACUUM ANALYZE customer_mx;
|
||||
VACUUM ANALYZE supplier_mx;
|
||||
\c - - - :worker_1_port
|
||||
-- Function that parses explain output as JSON
|
||||
CREATE FUNCTION explain_json(query text)
|
||||
|
|
|
@ -129,7 +129,7 @@ BEGIN;
|
|||
INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart');
|
||||
INSERT INTO labs_mx VALUES (5, 'Los Alamos');
|
||||
COMMIT;
|
||||
SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id;
|
||||
SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id and researchers_mx.lab_id = 5;;
|
||||
id | lab_id | name | id | name
|
||||
----+--------+-------------------+----+------------
|
||||
8 | 5 | Douglas Engelbart | 5 | Los Alamos
|
||||
|
@ -147,7 +147,7 @@ BEGIN;
|
|||
INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart');
|
||||
INSERT INTO labs_mx VALUES (5, 'Los Alamos');
|
||||
COMMIT;
|
||||
SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id;
|
||||
SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id and researchers_mx.lab_id = 5;
|
||||
id | lab_id | name | id | name
|
||||
----+--------+-------------------+----+------------
|
||||
8 | 5 | Douglas Engelbart | 5 | Los Alamos
|
||||
|
|
|
@ -156,10 +156,12 @@ INSERT INTO repartition_udt_other values (10, '(2,1)'::test_udt, 'foo');
|
|||
INSERT INTO repartition_udt_other values (11, '(2,2)'::test_udt, 'foo');
|
||||
INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo');
|
||||
SET client_min_messages = LOG;
|
||||
-- Query that should result in a repartition join on int column, and be empty.
|
||||
-- This query was intended to test "Query that should result in a repartition
|
||||
-- join on int column, and be empty." While removing broadcast logic, we
|
||||
-- manually make the query router plannable.
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.pk = repartition_udt_other.pk
|
||||
WHERE repartition_udt.pk > 1;
|
||||
WHERE repartition_udt.pk = 1;
|
||||
pk | udtcol | txtcol | pk | udtcol | txtcol
|
||||
----+--------+--------+----+--------+--------
|
||||
(0 rows)
|
||||
|
|
|
@ -7,11 +7,12 @@ SET client_min_messages = LOG;
|
|||
SET citus.large_table_shard_count = 1;
|
||||
SET citus.task_executor_type = 'task-tracker';
|
||||
SET citus.log_multi_join_order = true;
|
||||
-- Query that should result in a repartition join on int column, and be empty.
|
||||
-- This query was intended to test "Query that should result in a repartition
|
||||
-- join on int column, and be empty." In order to remove broadcast logic, we
|
||||
-- manually make the query router plannable.
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.pk = repartition_udt_other.pk
|
||||
WHERE repartition_udt.pk > 1;
|
||||
LOG: join order: [ "repartition_udt" ][ local partition join "repartition_udt_other" ]
|
||||
WHERE repartition_udt.pk = 1;
|
||||
pk | udtcol | txtcol | pk | udtcol | txtcol
|
||||
----+--------+--------+----+--------+--------
|
||||
(0 rows)
|
||||
|
|
|
@ -7,11 +7,12 @@ SET client_min_messages = LOG;
|
|||
SET citus.large_table_shard_count = 1;
|
||||
SET citus.task_executor_type = 'task-tracker';
|
||||
SET citus.log_multi_join_order = true;
|
||||
-- Query that should result in a repartition join on int column, and be empty.
|
||||
-- This query was intended to test "Query that should result in a repartition
|
||||
-- join on int column, and be empty." In order to remove broadcast logic, we
|
||||
-- manually make the query router plannable.
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.pk = repartition_udt_other.pk
|
||||
WHERE repartition_udt.pk > 1;
|
||||
LOG: join order: [ "repartition_udt" ][ local partition join "repartition_udt_other" ]
|
||||
WHERE repartition_udt.pk = 1;
|
||||
pk | udtcol | txtcol | pk | udtcol | txtcol
|
||||
----+--------+--------+----+--------+--------
|
||||
(0 rows)
|
||||
|
|
|
@ -164,10 +164,12 @@ INSERT INTO repartition_udt_other values (10, '(2,1)'::test_udt, 'foo');
|
|||
INSERT INTO repartition_udt_other values (11, '(2,2)'::test_udt, 'foo');
|
||||
INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo');
|
||||
SET client_min_messages = LOG;
|
||||
-- Query that should result in a repartition join on int column, and be empty.
|
||||
-- This query was intended to test "Query that should result in a repartition
|
||||
-- join on int column, and be empty." In order to remove broadcast logic, we
|
||||
-- manually make the query router plannable.
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.pk = repartition_udt_other.pk
|
||||
WHERE repartition_udt.pk > 1;
|
||||
WHERE repartition_udt.pk = 1;
|
||||
pk | udtcol | txtcol | pk | udtcol | txtcol
|
||||
----+--------+--------+----+--------+--------
|
||||
(0 rows)
|
||||
|
|
|
@ -1667,31 +1667,81 @@ DEBUG: Plan is router executable
|
|||
----+-----------+-------+------------+------+----
|
||||
(0 rows)
|
||||
|
||||
-- multi-shard join is not router plannable
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id)
|
||||
-- This query was intended to test "multi-shard join is not router plannable"
|
||||
-- To run it using repartition join logic we change the join columns
|
||||
SET citus.task_executor_type to "task-tracker";
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.title = au.name)
|
||||
WHERE ar.author_id = 35;
|
||||
DEBUG: join prunable for intervals [21,40] and [1,10]
|
||||
DEBUG: join prunable for intervals [31,40] and [1,10]
|
||||
DEBUG: join prunable for intervals [31,40] and [11,30]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
DEBUG: join prunable for task partitionId 1 and 0
|
||||
DEBUG: join prunable for task partitionId 1 and 2
|
||||
DEBUG: join prunable for task partitionId 1 and 3
|
||||
DEBUG: join prunable for task partitionId 2 and 0
|
||||
DEBUG: join prunable for task partitionId 2 and 1
|
||||
DEBUG: join prunable for task partitionId 2 and 3
|
||||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 20
|
||||
id | author_id | title | word_count | name | id
|
||||
----+-----------+-------+------------+------+----
|
||||
(0 rows)
|
||||
|
||||
-- this is a bug, it is a single shard join query but not router plannable
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id)
|
||||
-- This query was intended to test "this is a bug, it is a single shard join
|
||||
-- query but not router plannable". To run it using repartition join logic we
|
||||
-- change the join columns.
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.title = au.name)
|
||||
WHERE ar.author_id = 1 or au.id = 5;
|
||||
DEBUG: join prunable for intervals [1,10] and [11,30]
|
||||
DEBUG: join prunable for intervals [1,10] and [21,40]
|
||||
DEBUG: join prunable for intervals [1,10] and [31,40]
|
||||
DEBUG: join prunable for intervals [11,30] and [1,10]
|
||||
DEBUG: join prunable for intervals [11,30] and [31,40]
|
||||
DEBUG: join prunable for intervals [21,40] and [1,10]
|
||||
DEBUG: join prunable for intervals [31,40] and [1,10]
|
||||
DEBUG: join prunable for intervals [31,40] and [11,30]
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
DEBUG: join prunable for task partitionId 1 and 0
|
||||
DEBUG: join prunable for task partitionId 1 and 2
|
||||
DEBUG: join prunable for task partitionId 1 and 3
|
||||
DEBUG: join prunable for task partitionId 2 and 0
|
||||
DEBUG: join prunable for task partitionId 2 and 1
|
||||
DEBUG: join prunable for task partitionId 2 and 3
|
||||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 20
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 20
|
||||
id | author_id | title | word_count | name | id
|
||||
----+-----------+-------+------------+------+----
|
||||
(0 rows)
|
||||
|
||||
RESET citus.task_executor_type;
|
||||
-- bogus query, join on non-partition column, but router plannable due to filters
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.id = au.id)
|
||||
WHERE ar.author_id = 1 and au.id < 10;
|
||||
|
@ -1726,21 +1776,21 @@ DEBUG: join prunable for intervals [21,40] and [1,10]
|
|||
DEBUG: join prunable for intervals [31,40] and [1,10]
|
||||
DEBUG: join prunable for intervals [31,40] and [11,30]
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 2
|
||||
DEBUG: pruning merge fetch taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 13
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 16
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DEBUG: pruning merge fetch taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 22
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 15
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
ERROR: the query contains a join that requires repartitioning
|
||||
HINT: Set citus.enable_repartition_joins to on to enable repartitioning
|
||||
-- join between a range partitioned table and reference table is router plannable
|
||||
|
@ -1794,7 +1844,7 @@ WARNING: relation "public.articles_append" does not exist
|
|||
CONTEXT: while executing command on localhost:57638
|
||||
WARNING: relation "public.articles_append" does not exist
|
||||
CONTEXT: while executing command on localhost:57638
|
||||
ERROR: failed to execute task 2
|
||||
ERROR: failed to execute task 1
|
||||
-- same query with where false but evaluation left to worker
|
||||
SELECT author_id FROM articles_append
|
||||
WHERE
|
||||
|
@ -1809,7 +1859,7 @@ WARNING: relation "public.articles_append" does not exist
|
|||
CONTEXT: while executing command on localhost:57638
|
||||
WARNING: relation "public.articles_append" does not exist
|
||||
CONTEXT: while executing command on localhost:57638
|
||||
ERROR: failed to execute task 2
|
||||
ERROR: failed to execute task 1
|
||||
-- same query on router planner with where false but evaluation left to worker
|
||||
SELECT author_id FROM articles_single_shard_hash
|
||||
WHERE
|
||||
|
|
|
@ -215,6 +215,7 @@ SELECT * FROM articles WHERE author_id IN (SELECT id FROM authors WHERE name LIK
|
|||
(0 rows)
|
||||
|
||||
-- subqueries are supported in FROM clause
|
||||
SET citus.large_table_shard_count TO 1;
|
||||
SELECT articles.id,test.word_count
|
||||
FROM articles, (SELECT id, word_count FROM articles) AS test WHERE test.id = articles.id
|
||||
ORDER BY articles.id;
|
||||
|
@ -272,6 +273,7 @@ ORDER BY articles.id;
|
|||
50 | 19519
|
||||
(50 rows)
|
||||
|
||||
RESET citus.large_table_shard_count;
|
||||
-- subqueries are not supported in SELECT clause
|
||||
SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard a2 WHERE a.id = a2.id LIMIT 1)
|
||||
AS special_price FROM articles a;
|
||||
|
@ -575,21 +577,21 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 2
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
ERROR: the query contains a join that requires repartitioning
|
||||
HINT: Set citus.enable_repartition_joins to on to enable repartitioning
|
||||
-- system columns from shard tables can be queried and retrieved
|
||||
|
|
|
@ -212,10 +212,12 @@ SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DE
|
|||
SELECT * FROM articles WHERE author_id IN (SELECT id FROM authors WHERE name LIKE '%a');
|
||||
ERROR: Complex subqueries and CTEs are not supported when task_executor_type is set to 'task-tracker'
|
||||
-- subqueries are supported in FROM clause
|
||||
SET citus.large_table_shard_count TO 1;
|
||||
SELECT articles.id,test.word_count
|
||||
FROM articles, (SELECT id, word_count FROM articles) AS test WHERE test.id = articles.id
|
||||
ORDER BY articles.id;
|
||||
ERROR: Complex subqueries and CTEs are not supported when task_executor_type is set to 'task-tracker'
|
||||
RESET citus.large_table_shard_count;
|
||||
-- subqueries are not supported in SELECT clause
|
||||
SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard a2 WHERE a.id = a2.id LIMIT 1)
|
||||
AS special_price FROM articles a;
|
||||
|
@ -519,21 +521,21 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DETAIL: Creating dependency on merge taskId 2
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 11
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
ERROR: the query contains a join that requires repartitioning
|
||||
HINT: Set citus.enable_repartition_joins to on to enable repartitioning
|
||||
-- system columns from shard tables can be queried and retrieved
|
||||
|
|
|
@ -60,53 +60,6 @@ FROM
|
|||
l_orderkey) AS unit_prices;
|
||||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Cartesian products are currently unsupported
|
||||
-- this query is only required to execute
|
||||
-- the following query given that recursive planning
|
||||
-- (in general real-time queries in transactions)
|
||||
-- do not execute shard fetch tasks and the next
|
||||
-- query relies on that
|
||||
SELECT
|
||||
l_orderkey,
|
||||
avg(o_totalprice / l_quantity) AS unit_price
|
||||
FROM
|
||||
lineitem_subquery,
|
||||
orders_subquery
|
||||
WHERE
|
||||
l_orderkey = o_custkey
|
||||
GROUP BY
|
||||
l_orderkey
|
||||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 5;
|
||||
DEBUG: push down of limit count: 5
|
||||
l_orderkey | unit_price
|
||||
------------+------------------------
|
||||
421 | 102749.853333333333
|
||||
806 | 96257.4480681818181818
|
||||
418 | 57151.4156630824373871
|
||||
1124 | 56102.2804738959822181
|
||||
230 | 53847.0509778948909754
|
||||
(5 rows)
|
||||
|
||||
SELECT
|
||||
avg(unit_price)
|
||||
FROM
|
||||
(SELECT
|
||||
l_orderkey,
|
||||
avg(o_totalprice / l_quantity) AS unit_price
|
||||
FROM
|
||||
lineitem_subquery,
|
||||
orders_subquery
|
||||
WHERE
|
||||
l_orderkey = o_custkey
|
||||
GROUP BY
|
||||
l_orderkey) AS unit_prices;
|
||||
DEBUG: generating subplan 7_1 for subquery SELECT lineitem_subquery.l_orderkey, avg((orders_subquery.o_totalprice / lineitem_subquery.l_quantity)) AS unit_price FROM public.lineitem_subquery, public.orders_subquery WHERE (lineitem_subquery.l_orderkey = orders_subquery.o_custkey) GROUP BY lineitem_subquery.l_orderkey
|
||||
DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT avg(unit_price) AS avg FROM (SELECT intermediate_result.l_orderkey, intermediate_result.unit_price FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint, unit_price numeric)) unit_prices
|
||||
avg
|
||||
------------------------
|
||||
12973.7343244916919367
|
||||
(1 row)
|
||||
|
||||
RESET client_min_messages;
|
||||
-- Subqueries without relation with a volatile functions (non-constant) are planned recursively
|
||||
SELECT count(*) FROM (
|
||||
|
@ -124,11 +77,11 @@ SELECT count(*) FROM
|
|||
(SELECT l_orderkey FROM lineitem_subquery) UNION ALL
|
||||
(SELECT 1::bigint)
|
||||
) b;
|
||||
DEBUG: generating subplan 10_1 for subquery SELECT l_orderkey FROM public.lineitem_subquery
|
||||
DEBUG: generating subplan 7_1 for subquery SELECT l_orderkey FROM public.lineitem_subquery
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 10_2 for subquery SELECT intermediate_result.l_orderkey FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint) UNION ALL SELECT (1)::bigint AS int8
|
||||
DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) b
|
||||
DEBUG: generating subplan 7_2 for subquery SELECT intermediate_result.l_orderkey FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint) UNION ALL SELECT (1)::bigint AS int8
|
||||
DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('7_2'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) b
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
|
@ -142,12 +95,12 @@ SELECT count(*) FROM
|
|||
(SELECT l_orderkey FROM lineitem_subquery) UNION
|
||||
(SELECT l_partkey FROM lineitem_subquery)
|
||||
) b;
|
||||
DEBUG: generating subplan 13_1 for subquery SELECT l_orderkey FROM public.lineitem_subquery
|
||||
DEBUG: generating subplan 13_2 for subquery SELECT l_partkey FROM public.lineitem_subquery
|
||||
DEBUG: generating subplan 10_1 for subquery SELECT l_orderkey FROM public.lineitem_subquery
|
||||
DEBUG: generating subplan 10_2 for subquery SELECT l_partkey FROM public.lineitem_subquery
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
DEBUG: generating subplan 13_3 for subquery SELECT intermediate_result.l_orderkey FROM read_intermediate_result('13_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint) UNION SELECT intermediate_result.l_partkey FROM read_intermediate_result('13_2'::text, 'binary'::citus_copy_format) intermediate_result(l_partkey integer)
|
||||
DEBUG: Plan 13 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('13_3'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) b
|
||||
DEBUG: generating subplan 10_3 for subquery SELECT intermediate_result.l_orderkey FROM read_intermediate_result('10_1'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint) UNION SELECT intermediate_result.l_partkey FROM read_intermediate_result('10_2'::text, 'binary'::citus_copy_format) intermediate_result(l_partkey integer)
|
||||
DEBUG: Plan 10 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.l_orderkey FROM read_intermediate_result('10_3'::text, 'binary'::citus_copy_format) intermediate_result(l_orderkey bigint)) b
|
||||
DEBUG: Creating router plan
|
||||
DEBUG: Plan is router executable
|
||||
count
|
||||
|
|
|
@ -2228,6 +2228,27 @@ FROM
|
|||
5 | 0 | 0.50000000000000000000
|
||||
(5 rows)
|
||||
|
||||
-- Getting aggregation of value which is created by aggregation in subquery
|
||||
SELECT
|
||||
count(1),
|
||||
avg(agg_value)
|
||||
FROM
|
||||
(SELECT
|
||||
users_table.user_id,
|
||||
avg(users_table.value_1 / events_table.value_4) AS agg_value
|
||||
FROM
|
||||
users_table,
|
||||
events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id
|
||||
GROUP BY
|
||||
1
|
||||
) AS temp;
|
||||
count | avg
|
||||
-------+-----
|
||||
6 |
|
||||
(1 row)
|
||||
|
||||
DROP FUNCTION test_join_function_2(integer, integer);
|
||||
SELECT run_command_on_workers($f$
|
||||
|
||||
|
|
|
@ -57,9 +57,9 @@ SET client_min_messages TO DEBUG3;
|
|||
-- First test the default greedy task assignment policy
|
||||
SET citus.task_assignment_policy TO 'greedy';
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
|
@ -68,9 +68,9 @@ DEBUG: assigned task 4 to node localhost:57637
|
|||
(3 rows)
|
||||
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
|
@ -81,9 +81,9 @@ DEBUG: assigned task 4 to node localhost:57637
|
|||
-- Next test the first-replica task assignment policy
|
||||
SET citus.task_assignment_policy TO 'first-replica';
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
|
@ -92,9 +92,9 @@ DEBUG: assigned task 2 to node localhost:57638
|
|||
(3 rows)
|
||||
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
|
@ -105,20 +105,9 @@ DEBUG: assigned task 2 to node localhost:57638
|
|||
-- Finally test the round-robin task assignment policy
|
||||
SET citus.task_assignment_policy TO 'round-robin';
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
|
@ -127,9 +116,20 @@ DEBUG: assigned task 2 to node localhost:57638
|
|||
(3 rows)
|
||||
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
|
|
|
@ -63,9 +63,9 @@ DEBUG: CommitTransactionCommand
|
|||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: ProcessUtility
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: CommitTransactionCommand
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
|
@ -77,9 +77,9 @@ DEBUG: CommitTransactionCommand
|
|||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: ProcessUtility
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: CommitTransactionCommand
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
|
@ -96,9 +96,9 @@ DEBUG: CommitTransactionCommand
|
|||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: ProcessUtility
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: CommitTransactionCommand
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
|
@ -110,9 +110,9 @@ DEBUG: CommitTransactionCommand
|
|||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: ProcessUtility
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: CommitTransactionCommand
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
|
@ -129,23 +129,9 @@ DEBUG: CommitTransactionCommand
|
|||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: ProcessUtility
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: CommitTransactionCommand
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: ProcessUtility
|
||||
DEBUG: assigned task 6 to node localhost:57637
|
||||
DEBUG: assigned task 4 to node localhost:57637
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
DEBUG: CommitTransactionCommand
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
|
@ -157,9 +143,23 @@ DEBUG: CommitTransactionCommand
|
|||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: ProcessUtility
|
||||
DEBUG: assigned task 6 to node localhost:57638
|
||||
DEBUG: assigned task 4 to node localhost:57638
|
||||
DEBUG: assigned task 3 to node localhost:57637
|
||||
DEBUG: assigned task 2 to node localhost:57637
|
||||
DEBUG: assigned task 1 to node localhost:57638
|
||||
DEBUG: CommitTransactionCommand
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||
explain statements for distributed queries are not enabled
|
||||
(3 rows)
|
||||
|
||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||
DEBUG: StartTransactionCommand
|
||||
DEBUG: ProcessUtility
|
||||
DEBUG: assigned task 3 to node localhost:57638
|
||||
DEBUG: assigned task 2 to node localhost:57638
|
||||
DEBUG: assigned task 1 to node localhost:57637
|
||||
DEBUG: CommitTransactionCommand
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------
|
||||
|
|
|
@ -297,21 +297,21 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 24
|
||||
DETAIL: Creating dependency on merge taskId 20
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 24
|
||||
DETAIL: Creating dependency on merge taskId 20
|
||||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
DEBUG: generating subplan 53_1 for subquery SELECT t1.x FROM recursive_set_local.test t1, recursive_set_local.test t2 WHERE (t1.x = t2.y) LIMIT 2
|
||||
|
|
|
@ -760,21 +760,21 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 24
|
||||
DETAIL: Creating dependency on merge taskId 20
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 24
|
||||
DETAIL: Creating dependency on merge taskId 20
|
||||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
DEBUG: generating subplan 164_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x = t2.y) LIMIT 0
|
||||
|
@ -801,21 +801,21 @@ DEBUG: join prunable for task partitionId 3 and 0
|
|||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DETAIL: Creating dependency on merge taskId 5
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 14
|
||||
DETAIL: Creating dependency on merge taskId 10
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 19
|
||||
DETAIL: Creating dependency on merge taskId 15
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 24
|
||||
DETAIL: Creating dependency on merge taskId 20
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 24
|
||||
DETAIL: Creating dependency on merge taskId 20
|
||||
DEBUG: cannot use real time executor with repartition jobs
|
||||
HINT: Since you enabled citus.enable_repartition_joins Citus chose to use task-tracker.
|
||||
DEBUG: generating subplan 167_1 for subquery SELECT t1.x FROM recursive_union.test t1, recursive_union.test t2 WHERE (t1.x = t2.y)
|
||||
|
|
|
@ -34,8 +34,8 @@ SELECT set_config('citus.shard_replication_factor', '2', false);
|
|||
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
|
||||
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
|
||||
|
||||
-- Place 'right' table only on the primary worker
|
||||
SELECT set_config('citus.shard_replication_factor', '1', false);
|
||||
-- Place 'right' table on both workers
|
||||
SELECT set_config('citus.shard_replication_factor', '2', false);
|
||||
\copy multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data'
|
||||
|
||||
-- Reset shard replication factor to ensure tasks will be assigned to both workers
|
||||
|
@ -54,34 +54,8 @@ CREATE TABLE multi_append_table_to_shard_stage
|
|||
text TEXT not null
|
||||
);
|
||||
|
||||
COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/agg.data';
|
||||
|
||||
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid;
|
||||
|
||||
-- Only the primary worker will see the new matches, as the secondary still uses a cached shard
|
||||
SELECT COUNT(*)
|
||||
FROM multi_append_table_to_shard_left,
|
||||
multi_append_table_to_shard_right
|
||||
WHERE left_number = right_number;
|
||||
|
||||
-- Now add a lot of data to ensure we increase the size on disk
|
||||
DELETE FROM multi_append_table_to_shard_stage;
|
||||
COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|';
|
||||
|
||||
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid;
|
||||
|
||||
-- This join will refresh the shard on the secondary, all 8 rows in the left table will match twice (16)
|
||||
SELECT COUNT(*)
|
||||
FROM multi_append_table_to_shard_left,
|
||||
multi_append_table_to_shard_right
|
||||
WHERE left_number = right_number;
|
||||
|
||||
-- Check that we error out if we try to append data to a hash partitioned table.
|
||||
SELECT master_create_empty_shard('multi_append_table_to_shard_right_hash');
|
||||
|
||||
|
|
|
@ -17,10 +17,15 @@ SET search_path TO public;
|
|||
\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
|
||||
\c - - - :worker_2_port
|
||||
-- and use second worker as well
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
|
||||
-- and use coordinator for reference tables
|
||||
-- These copies were intended to test copying data to single sharded table from
|
||||
-- worker nodes, yet in order to remove broadcast logic related codes we change
|
||||
-- the the table to reference table and copy data from master. Should be updated
|
||||
-- when worker nodes gain capability to run dml commands on reference tables.
|
||||
\c - - - :master_port
|
||||
SET search_path TO public;
|
||||
|
||||
|
|
|
@ -159,7 +159,8 @@ test: multi_transaction_recovery
|
|||
# multi_copy creates hash and range-partitioned tables and performs COPY
|
||||
# multi_router_planner creates hash partitioned tables.
|
||||
# ---------
|
||||
test: multi_copy multi_router_planner
|
||||
test: multi_copy
|
||||
test: multi_router_planner
|
||||
|
||||
# ----------
|
||||
# multi_large_shardid loads more lineitem data using high shard identifiers
|
||||
|
@ -198,11 +199,6 @@ test: multi_function_evaluation
|
|||
# ----------
|
||||
test: multi_truncate
|
||||
|
||||
# ----------
|
||||
# multi_expire_table_cache tests for broadcast tables
|
||||
# ----------
|
||||
test: multi_expire_table_cache
|
||||
|
||||
# ----------
|
||||
# multi_colocation_utils tests utility functions written for co-location feature & internal API
|
||||
# multi_colocated_shard_transfer tests master_copy_shard_placement with colocated tables.
|
||||
|
|
|
@ -51,11 +51,11 @@ SELECT set_config('citus.shard_replication_factor', '2', false);
|
|||
|
||||
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
|
||||
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
|
||||
-- Place 'right' table only on the primary worker
|
||||
SELECT set_config('citus.shard_replication_factor', '1', false);
|
||||
-- Place 'right' table on both workers
|
||||
SELECT set_config('citus.shard_replication_factor', '2', false);
|
||||
set_config
|
||||
------------
|
||||
1
|
||||
2
|
||||
(1 row)
|
||||
|
||||
\copy multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data'
|
||||
|
@ -82,48 +82,7 @@ CREATE TABLE multi_append_table_to_shard_stage
|
|||
number INTEGER not null,
|
||||
text TEXT not null
|
||||
);
|
||||
COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/agg.data';
|
||||
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid;
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0533333
|
||||
(1 row)
|
||||
|
||||
-- Only the primary worker will see the new matches, as the secondary still uses a cached shard
|
||||
SELECT COUNT(*)
|
||||
FROM multi_append_table_to_shard_left,
|
||||
multi_append_table_to_shard_right
|
||||
WHERE left_number = right_number;
|
||||
count
|
||||
-------
|
||||
12
|
||||
(1 row)
|
||||
|
||||
-- Now add a lot of data to ensure we increase the size on disk
|
||||
DELETE FROM multi_append_table_to_shard_stage;
|
||||
COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|';
|
||||
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid;
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.106667
|
||||
(1 row)
|
||||
|
||||
-- This join will refresh the shard on the secondary, all 8 rows in the left table will match twice (16)
|
||||
SELECT COUNT(*)
|
||||
FROM multi_append_table_to_shard_left,
|
||||
multi_append_table_to_shard_right
|
||||
WHERE left_number = right_number;
|
||||
count
|
||||
-------
|
||||
16
|
||||
(1 row)
|
||||
|
||||
-- Check that we error out if we try to append data to a hash partitioned table.
|
||||
SELECT master_create_empty_shard('multi_append_table_to_shard_right_hash');
|
||||
ERROR: relation "multi_append_table_to_shard_right_hash" is a hash partitioned table
|
||||
|
|
|
@ -12,9 +12,14 @@ SET search_path TO citus_mx_test_schema;
|
|||
SET search_path TO public;
|
||||
\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
\c - - - :worker_2_port
|
||||
-- and use second worker as well
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
-- and use coordinator for reference tables
|
||||
-- These copies were intended to test copying data to single sharded table from
|
||||
-- worker nodes, yet in order to remove broadcast logic related codes we change
|
||||
-- the the table to reference table and copy data from master. Should be updated
|
||||
-- when worker nodes gain capability to run dml commands on reference tables.
|
||||
\c - - - :master_port
|
||||
SET search_path TO public;
|
||||
\COPY customer_mx FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
---
|
||||
--- MULTI_EXPIRE_TABLE_CACHE
|
||||
---
|
||||
|
||||
|
||||
SET citus.next_shard_id TO 1220000;
|
||||
|
||||
-- create test table
|
||||
CREATE TABLE large_table(a int, b int);
|
||||
SELECT master_create_distributed_table('large_table', 'a', 'hash');
|
||||
SELECT master_create_worker_shards('large_table', 8, 1);
|
||||
|
||||
CREATE TABLE broadcast_table(a int, b int);
|
||||
SELECT master_create_distributed_table('broadcast_table', 'a', 'hash');
|
||||
SELECT master_create_worker_shards('broadcast_table', 2, 1);
|
||||
|
||||
-- verify only small tables are supported
|
||||
SELECT master_expire_table_cache('large_table');
|
||||
SELECT master_expire_table_cache('broadcast_table');
|
||||
|
||||
-- run a join so that broadcast tables are cached on other workers
|
||||
SELECT * from large_table l, broadcast_table b where l.a = b.b;
|
||||
|
||||
-- insert some data
|
||||
INSERT INTO large_table VALUES(1, 1);
|
||||
INSERT INTO large_table VALUES(1, 2);
|
||||
INSERT INTO large_table VALUES(2, 1);
|
||||
INSERT INTO large_table VALUES(2, 2);
|
||||
INSERT INTO large_table VALUES(3, 1);
|
||||
INSERT INTO large_table VALUES(3, 2);
|
||||
|
||||
INSERT INTO broadcast_table VALUES(1, 1);
|
||||
|
||||
-- verify returned results are wrong
|
||||
SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b;
|
||||
|
||||
-- expire cache and re-run, results should be correct this time
|
||||
SELECT master_expire_table_cache('broadcast_table');
|
||||
|
||||
SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b;
|
||||
|
||||
-- insert some more data into broadcast table
|
||||
INSERT INTO broadcast_table VALUES(2, 2);
|
||||
|
||||
-- run the same query, get wrong results
|
||||
SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b;
|
||||
|
||||
-- expire cache and re-run, results should be correct this time
|
||||
SELECT master_expire_table_cache('broadcast_table');
|
||||
SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b;
|
||||
|
||||
|
||||
DROP TABLE large_table, broadcast_table;
|
|
@ -133,6 +133,7 @@ ALTER EXTENSION citus UPDATE TO '7.2-1';
|
|||
ALTER EXTENSION citus UPDATE TO '7.2-2';
|
||||
ALTER EXTENSION citus UPDATE TO '7.2-3';
|
||||
ALTER EXTENSION citus UPDATE TO '7.3-3';
|
||||
ALTER EXTENSION citus UPDATE TO '7.4-1';
|
||||
|
||||
-- show running version
|
||||
SHOW citus.version;
|
||||
|
|
|
@ -111,7 +111,7 @@ INSERT INTO researchers VALUES (8, 5, 'Douglas Engelbart');
|
|||
INSERT INTO labs VALUES (5, 'Los Alamos');
|
||||
COMMIT;
|
||||
|
||||
SELECT * FROM researchers, labs WHERE labs.id = researchers.lab_id;
|
||||
SELECT * FROM researchers, labs WHERE labs.id = researchers.lab_id AND researchers.lab_id = 5;
|
||||
|
||||
-- and the other way around is also allowed
|
||||
BEGIN;
|
||||
|
|
|
@ -8,7 +8,6 @@ SET citus.next_shard_id TO 1420000;
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1420000;
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.shard_count TO 2;
|
||||
|
||||
CREATE TABLE test (id integer, val integer);
|
||||
SELECT create_distributed_table('test', 'id');
|
||||
|
@ -33,6 +32,9 @@ CREATE USER no_access;
|
|||
GRANT ALL ON TABLE test_1420000 TO full_access;
|
||||
GRANT SELECT ON TABLE test_1420000 TO read_access;
|
||||
|
||||
GRANT ALL ON TABLE test_1420002 TO full_access;
|
||||
GRANT SELECT ON TABLE test_1420002 TO read_access;
|
||||
|
||||
\c - - - :worker_2_port
|
||||
CREATE USER full_access;
|
||||
CREATE USER read_access;
|
||||
|
@ -41,6 +43,9 @@ CREATE USER no_access;
|
|||
GRANT ALL ON TABLE test_1420001 TO full_access;
|
||||
GRANT SELECT ON TABLE test_1420001 TO read_access;
|
||||
|
||||
GRANT ALL ON TABLE test_1420003 TO full_access;
|
||||
GRANT SELECT ON TABLE test_1420003 TO read_access;
|
||||
|
||||
\c - - - :master_port
|
||||
|
||||
-- create prepare tests
|
||||
|
|
|
@ -12,6 +12,11 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
|
|||
SET citus.task_executor_type TO 'real-time';
|
||||
SET citus.explain_distributed_queries TO on;
|
||||
|
||||
VACUUM ANALYZE lineitem_mx;
|
||||
VACUUM ANALYZE orders_mx;
|
||||
VACUUM ANALYZE customer_mx;
|
||||
VACUUM ANALYZE supplier_mx;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
-- Function that parses explain output as JSON
|
||||
CREATE FUNCTION explain_json(query text)
|
||||
|
|
|
@ -116,7 +116,7 @@ INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart');
|
|||
INSERT INTO labs_mx VALUES (5, 'Los Alamos');
|
||||
COMMIT;
|
||||
|
||||
SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id;
|
||||
SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id and researchers_mx.lab_id = 5;;
|
||||
|
||||
-- and the other way around is also allowed
|
||||
BEGIN;
|
||||
|
@ -132,7 +132,7 @@ INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart');
|
|||
INSERT INTO labs_mx VALUES (5, 'Los Alamos');
|
||||
COMMIT;
|
||||
|
||||
SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id;
|
||||
SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id and researchers_mx.lab_id = 5;
|
||||
|
||||
-- and the other way around is also allowed
|
||||
BEGIN;
|
||||
|
|
|
@ -192,10 +192,12 @@ INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo');
|
|||
|
||||
SET client_min_messages = LOG;
|
||||
|
||||
-- Query that should result in a repartition join on int column, and be empty.
|
||||
-- This query was intended to test "Query that should result in a repartition
|
||||
-- join on int column, and be empty." While removing broadcast logic, we
|
||||
-- manually make the query router plannable.
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.pk = repartition_udt_other.pk
|
||||
WHERE repartition_udt.pk > 1;
|
||||
WHERE repartition_udt.pk = 1;
|
||||
|
||||
-- Query that should result in a repartition join on UDT column.
|
||||
SET citus.large_table_shard_count = 1;
|
||||
|
|
|
@ -9,10 +9,12 @@ SET citus.large_table_shard_count = 1;
|
|||
SET citus.task_executor_type = 'task-tracker';
|
||||
SET citus.log_multi_join_order = true;
|
||||
|
||||
-- Query that should result in a repartition join on int column, and be empty.
|
||||
-- This query was intended to test "Query that should result in a repartition
|
||||
-- join on int column, and be empty." In order to remove broadcast logic, we
|
||||
-- manually make the query router plannable.
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.pk = repartition_udt_other.pk
|
||||
WHERE repartition_udt.pk > 1;
|
||||
WHERE repartition_udt.pk = 1;
|
||||
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.udtcol = repartition_udt_other.udtcol
|
||||
|
|
|
@ -9,10 +9,12 @@ SET citus.large_table_shard_count = 1;
|
|||
SET citus.task_executor_type = 'task-tracker';
|
||||
SET citus.log_multi_join_order = true;
|
||||
|
||||
-- Query that should result in a repartition join on int column, and be empty.
|
||||
-- This query was intended to test "Query that should result in a repartition
|
||||
-- join on int column, and be empty." In order to remove broadcast logic, we
|
||||
-- manually make the query router plannable.
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.pk = repartition_udt_other.pk
|
||||
WHERE repartition_udt.pk > 1;
|
||||
WHERE repartition_udt.pk = 1;
|
||||
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.udtcol = repartition_udt_other.udtcol
|
||||
|
|
|
@ -189,10 +189,12 @@ INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo');
|
|||
|
||||
SET client_min_messages = LOG;
|
||||
|
||||
-- Query that should result in a repartition join on int column, and be empty.
|
||||
-- This query was intended to test "Query that should result in a repartition
|
||||
-- join on int column, and be empty." In order to remove broadcast logic, we
|
||||
-- manually make the query router plannable.
|
||||
SELECT * FROM repartition_udt JOIN repartition_udt_other
|
||||
ON repartition_udt.pk = repartition_udt_other.pk
|
||||
WHERE repartition_udt.pk > 1;
|
||||
WHERE repartition_udt.pk = 1;
|
||||
|
||||
-- Query that should result in a repartition join on UDT column.
|
||||
SET citus.large_table_shard_count = 1;
|
||||
|
|
|
@ -788,13 +788,18 @@ SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id)
|
|||
SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id)
|
||||
WHERE ar.author_id = 1 and au.id = 2;
|
||||
|
||||
-- multi-shard join is not router plannable
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id)
|
||||
-- This query was intended to test "multi-shard join is not router plannable"
|
||||
-- To run it using repartition join logic we change the join columns
|
||||
SET citus.task_executor_type to "task-tracker";
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.title = au.name)
|
||||
WHERE ar.author_id = 35;
|
||||
|
||||
-- this is a bug, it is a single shard join query but not router plannable
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id)
|
||||
-- This query was intended to test "this is a bug, it is a single shard join
|
||||
-- query but not router plannable". To run it using repartition join logic we
|
||||
-- change the join columns.
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.title = au.name)
|
||||
WHERE ar.author_id = 1 or au.id = 5;
|
||||
RESET citus.task_executor_type;
|
||||
|
||||
-- bogus query, join on non-partition column, but router plannable due to filters
|
||||
SELECT * FROM articles_range ar join authors_range au on (ar.id = au.id)
|
||||
|
|
|
@ -132,10 +132,15 @@ SELECT * FROM articles, position('om' in 'Thomas') ORDER BY 2 DESC, 1 DESC, 3 DE
|
|||
SELECT * FROM articles WHERE author_id IN (SELECT id FROM authors WHERE name LIKE '%a');
|
||||
|
||||
-- subqueries are supported in FROM clause
|
||||
|
||||
SET citus.large_table_shard_count TO 1;
|
||||
|
||||
SELECT articles.id,test.word_count
|
||||
FROM articles, (SELECT id, word_count FROM articles) AS test WHERE test.id = articles.id
|
||||
ORDER BY articles.id;
|
||||
|
||||
RESET citus.large_table_shard_count;
|
||||
|
||||
-- subqueries are not supported in SELECT clause
|
||||
SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard a2 WHERE a.id = a2.id LIMIT 1)
|
||||
AS special_price FROM articles a;
|
||||
|
|
|
@ -55,38 +55,6 @@ FROM
|
|||
GROUP BY
|
||||
l_orderkey) AS unit_prices;
|
||||
|
||||
-- this query is only required to execute
|
||||
-- the following query given that recursive planning
|
||||
-- (in general real-time queries in transactions)
|
||||
-- do not execute shard fetch tasks and the next
|
||||
-- query relies on that
|
||||
SELECT
|
||||
l_orderkey,
|
||||
avg(o_totalprice / l_quantity) AS unit_price
|
||||
FROM
|
||||
lineitem_subquery,
|
||||
orders_subquery
|
||||
WHERE
|
||||
l_orderkey = o_custkey
|
||||
GROUP BY
|
||||
l_orderkey
|
||||
ORDER BY 2 DESC, 1 DESC
|
||||
LIMIT 5;
|
||||
|
||||
SELECT
|
||||
avg(unit_price)
|
||||
FROM
|
||||
(SELECT
|
||||
l_orderkey,
|
||||
avg(o_totalprice / l_quantity) AS unit_price
|
||||
FROM
|
||||
lineitem_subquery,
|
||||
orders_subquery
|
||||
WHERE
|
||||
l_orderkey = o_custkey
|
||||
GROUP BY
|
||||
l_orderkey) AS unit_prices;
|
||||
|
||||
RESET client_min_messages;
|
||||
|
||||
-- Subqueries without relation with a volatile functions (non-constant) are planned recursively
|
||||
|
|
|
@ -1821,6 +1821,24 @@ FROM
|
|||
ORDER BY 1,2
|
||||
LIMIT 5;
|
||||
|
||||
-- Getting aggregation of value which is created by aggregation in subquery
|
||||
SELECT
|
||||
count(1),
|
||||
avg(agg_value)
|
||||
FROM
|
||||
(SELECT
|
||||
users_table.user_id,
|
||||
avg(users_table.value_1 / events_table.value_4) AS agg_value
|
||||
FROM
|
||||
users_table,
|
||||
events_table
|
||||
WHERE
|
||||
users_table.user_id = events_table.user_id
|
||||
GROUP BY
|
||||
1
|
||||
) AS temp;
|
||||
|
||||
|
||||
|
||||
DROP FUNCTION test_join_function_2(integer, integer);
|
||||
|
||||
|
|
Loading…
Reference in New Issue