mirror of https://github.com/citusdata/citus.git
Merge pull request #1208 from citusdata/remove_job_id_seq
Stop using a sequence to generate job IDspull/1938/head
commit
065d167d2e
|
@ -225,8 +225,7 @@ MultiRealTimeExecute(Job *job)
|
||||||
*/
|
*/
|
||||||
if (taskFailed)
|
if (taskFailed)
|
||||||
{
|
{
|
||||||
ereport(ERROR, (errmsg("failed to execute job " UINT64_FORMAT, job->jobId),
|
ereport(ERROR, (errmsg("failed to execute task %u", failedTaskId)));
|
||||||
errdetail("Failure due to failed task %u", failedTaskId)));
|
|
||||||
}
|
}
|
||||||
else if (QueryCancelPending)
|
else if (QueryCancelPending)
|
||||||
{
|
{
|
||||||
|
|
|
@ -150,7 +150,6 @@ MultiTaskTrackerExecute(Job *job)
|
||||||
ListCell *taskAndExecutionCell = NULL;
|
ListCell *taskAndExecutionCell = NULL;
|
||||||
uint32 taskTrackerCount = 0;
|
uint32 taskTrackerCount = 0;
|
||||||
uint32 topLevelTaskCount = 0;
|
uint32 topLevelTaskCount = 0;
|
||||||
uint64 failedJobId = 0;
|
|
||||||
uint32 failedTaskId = 0;
|
uint32 failedTaskId = 0;
|
||||||
bool allTasksCompleted = false;
|
bool allTasksCompleted = false;
|
||||||
bool taskFailed = false;
|
bool taskFailed = false;
|
||||||
|
@ -279,7 +278,6 @@ MultiTaskTrackerExecute(Job *job)
|
||||||
taskFailed = TaskExecutionFailed(taskExecution);
|
taskFailed = TaskExecutionFailed(taskExecution);
|
||||||
if (taskFailed)
|
if (taskFailed)
|
||||||
{
|
{
|
||||||
failedJobId = taskExecution->jobId;
|
|
||||||
failedTaskId = taskExecution->taskId;
|
failedTaskId = taskExecution->taskId;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -336,7 +334,6 @@ MultiTaskTrackerExecute(Job *job)
|
||||||
taskTransmitFailed = TaskExecutionFailed(taskExecution);
|
taskTransmitFailed = TaskExecutionFailed(taskExecution);
|
||||||
if (taskTransmitFailed)
|
if (taskTransmitFailed)
|
||||||
{
|
{
|
||||||
failedJobId = taskExecution->jobId;
|
|
||||||
failedTaskId = taskExecution->taskId;
|
failedTaskId = taskExecution->taskId;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -415,13 +412,11 @@ MultiTaskTrackerExecute(Job *job)
|
||||||
*/
|
*/
|
||||||
if (taskFailed)
|
if (taskFailed)
|
||||||
{
|
{
|
||||||
ereport(ERROR, (errmsg("failed to execute job " UINT64_FORMAT, failedJobId),
|
ereport(ERROR, (errmsg("failed to execute task %u", failedTaskId)));
|
||||||
errdetail("Failure due to failed task %u", failedTaskId)));
|
|
||||||
}
|
}
|
||||||
else if (clusterFailed)
|
else if (clusterFailed)
|
||||||
{
|
{
|
||||||
ereport(ERROR, (errmsg("failed to execute job " UINT64_FORMAT, job->jobId),
|
ereport(ERROR, (errmsg("failed to execute task %u", failedTaskId)));
|
||||||
errdetail("Too many task tracker failures")));
|
|
||||||
}
|
}
|
||||||
else if (QueryCancelPending)
|
else if (QueryCancelPending)
|
||||||
{
|
{
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include "access/heapam.h"
|
#include "access/heapam.h"
|
||||||
#include "access/nbtree.h"
|
#include "access/nbtree.h"
|
||||||
#include "access/skey.h"
|
#include "access/skey.h"
|
||||||
|
#include "access/xlog.h"
|
||||||
#include "catalog/pg_am.h"
|
#include "catalog/pg_am.h"
|
||||||
#include "catalog/pg_operator.h"
|
#include "catalog/pg_operator.h"
|
||||||
#include "catalog/pg_type.h"
|
#include "catalog/pg_type.h"
|
||||||
|
@ -63,6 +64,7 @@
|
||||||
|
|
||||||
/* Policy to use when assigning tasks to worker nodes */
|
/* Policy to use when assigning tasks to worker nodes */
|
||||||
int TaskAssignmentPolicy = TASK_ASSIGNMENT_GREEDY;
|
int TaskAssignmentPolicy = TASK_ASSIGNMENT_GREEDY;
|
||||||
|
bool EnableUniqueJobIds = true;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1693,41 +1695,61 @@ ChildNodeList(MultiNode *multiNode)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* UniqueJobId allocates and returns a unique jobId for the job to be executed.
|
* UniqueJobId allocates and returns a unique jobId for the job to be executed.
|
||||||
* This allocation occurs both in shared memory and in write ahead logs; writing
|
|
||||||
* to logs avoids the risk of having jobId collisions.
|
|
||||||
*
|
*
|
||||||
* Please note that the jobId sequence wraps around after 2^32 integers. This
|
* The resulting job ID is built up as:
|
||||||
* leaves the upper 32-bits to slave nodes and their jobs.
|
* <16-bit group ID><24-bit process ID><1-bit secondary flag><23-bit local counter>
|
||||||
|
*
|
||||||
|
* When citus.enable_unique_job_ids is off then only the local counter is
|
||||||
|
* included to get repeatable results.
|
||||||
*/
|
*/
|
||||||
static uint64
|
static uint64
|
||||||
UniqueJobId(void)
|
UniqueJobId(void)
|
||||||
{
|
{
|
||||||
text *sequenceName = cstring_to_text(JOBID_SEQUENCE_NAME);
|
static uint32 jobIdCounter = 0;
|
||||||
Oid sequenceId = ResolveRelationId(sequenceName);
|
|
||||||
Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId);
|
|
||||||
Datum jobIdDatum = 0;
|
|
||||||
int64 jobId = 0;
|
|
||||||
int64 localizedJobId = 0;
|
|
||||||
int64 localGroupId = GetLocalGroupId();
|
|
||||||
Oid savedUserId = InvalidOid;
|
|
||||||
int savedSecurityContext = 0;
|
|
||||||
|
|
||||||
GetUserIdAndSecContext(&savedUserId, &savedSecurityContext);
|
uint64 jobId = 0;
|
||||||
SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE);
|
uint64 jobIdNumber = 0;
|
||||||
|
uint64 processId = 0;
|
||||||
|
uint64 localGroupId = 0;
|
||||||
|
|
||||||
/* generate new and unique jobId from sequence */
|
jobIdCounter++;
|
||||||
jobIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum);
|
|
||||||
jobId = DatumGetInt64(jobIdDatum);
|
if (EnableUniqueJobIds)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Add the local group id information to the jobId to
|
||||||
|
* prevent concurrent jobs on different groups to conflict.
|
||||||
|
*/
|
||||||
|
localGroupId = GetLocalGroupId() & 0xFF;
|
||||||
|
jobId = jobId | (localGroupId << 48);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add the current process ID to distinguish jobs by this
|
||||||
|
* backends from jobs started by other backends. Process
|
||||||
|
* IDs can have at most 24-bits on platforms supported by
|
||||||
|
* Citus.
|
||||||
|
*/
|
||||||
|
processId = MyProcPid & 0xFFFFFF;
|
||||||
|
jobId = jobId | (processId << 24);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add an extra bit for secondaries to distinguish their
|
||||||
|
* jobs from primaries.
|
||||||
|
*/
|
||||||
|
if (RecoveryInProgress())
|
||||||
|
{
|
||||||
|
jobId = jobId | (1 << 23);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add the local group id information to the jobId to
|
* Use the remaining 23 bits to distinguish jobs by the
|
||||||
* prevent concurrent jobs on different groups to conflict.
|
* same backend.
|
||||||
*/
|
*/
|
||||||
localizedJobId = jobId | (localGroupId << 32);
|
jobIdNumber = jobIdCounter & 0x1FFFFFF;
|
||||||
|
jobId = jobId | jobIdNumber;
|
||||||
|
|
||||||
SetUserIdAndSecContext(savedUserId, savedSecurityContext);
|
return jobId;
|
||||||
|
|
||||||
return localizedJobId;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2144,8 +2166,7 @@ SubquerySqlTaskList(Job *job)
|
||||||
sqlTask->dependedTaskList = dataFetchTaskList;
|
sqlTask->dependedTaskList = dataFetchTaskList;
|
||||||
|
|
||||||
/* log the query string we generated */
|
/* log the query string we generated */
|
||||||
ereport(DEBUG4, (errmsg("generated sql query for job " UINT64_FORMAT
|
ereport(DEBUG4, (errmsg("generated sql query for task %d", sqlTask->taskId),
|
||||||
" and task %d", sqlTask->jobId, sqlTask->taskId),
|
|
||||||
errdetail("query string: \"%s\"", sqlQueryString->data)));
|
errdetail("query string: \"%s\"", sqlQueryString->data)));
|
||||||
|
|
||||||
sqlTask->anchorShardId = AnchorShardId(fragmentCombination, anchorRangeTableId);
|
sqlTask->anchorShardId = AnchorShardId(fragmentCombination, anchorRangeTableId);
|
||||||
|
@ -2260,8 +2281,7 @@ SqlTaskList(Job *job)
|
||||||
sqlTask->dependedTaskList = dataFetchTaskList;
|
sqlTask->dependedTaskList = dataFetchTaskList;
|
||||||
|
|
||||||
/* log the query string we generated */
|
/* log the query string we generated */
|
||||||
ereport(DEBUG4, (errmsg("generated sql query for job " UINT64_FORMAT
|
ereport(DEBUG4, (errmsg("generated sql query for task %d", sqlTask->taskId),
|
||||||
" and task %d", sqlTask->jobId, sqlTask->taskId),
|
|
||||||
errdetail("query string: \"%s\"", sqlQueryString->data)));
|
errdetail("query string: \"%s\"", sqlQueryString->data)));
|
||||||
|
|
||||||
sqlTask->anchorShardId = INVALID_SHARD_ID;
|
sqlTask->anchorShardId = INVALID_SHARD_ID;
|
||||||
|
|
|
@ -634,6 +634,18 @@ RegisterCitusConfigVariables(void)
|
||||||
GUC_NO_SHOW_ALL,
|
GUC_NO_SHOW_ALL,
|
||||||
NULL, NULL, NULL);
|
NULL, NULL, NULL);
|
||||||
|
|
||||||
|
DefineCustomBoolVariable(
|
||||||
|
"citus.enable_unique_job_ids",
|
||||||
|
gettext_noop("Enables unique job IDs by prepending the local process ID and "
|
||||||
|
"group ID. This should usually be enabled, but can be disabled "
|
||||||
|
"for repeatable output in regression tests."),
|
||||||
|
NULL,
|
||||||
|
&EnableUniqueJobIds,
|
||||||
|
true,
|
||||||
|
PGC_USERSET,
|
||||||
|
GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL,
|
||||||
|
NULL, NULL, NULL);
|
||||||
|
|
||||||
/* warn about config items in the citus namespace that are not registered above */
|
/* warn about config items in the citus namespace that are not registered above */
|
||||||
EmitWarningsOnPlaceholders("citus");
|
EmitWarningsOnPlaceholders("citus");
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@
|
||||||
|
|
||||||
|
|
||||||
/* Definitions local to the physical planner */
|
/* Definitions local to the physical planner */
|
||||||
#define JOBID_SEQUENCE_NAME "pg_dist_jobid_seq"
|
|
||||||
#define ARRAY_OUT_FUNC_ID 751
|
#define ARRAY_OUT_FUNC_ID 751
|
||||||
#define NON_PRUNABLE_JOIN -1
|
#define NON_PRUNABLE_JOIN -1
|
||||||
#define RESERVED_HASHED_COLUMN_ID MaxAttrNumber
|
#define RESERVED_HASHED_COLUMN_ID MaxAttrNumber
|
||||||
|
@ -244,6 +243,8 @@ typedef struct OperatorCacheEntry
|
||||||
|
|
||||||
/* Config variable managed via guc.c */
|
/* Config variable managed via guc.c */
|
||||||
extern int TaskAssignmentPolicy;
|
extern int TaskAssignmentPolicy;
|
||||||
|
extern bool EnableUniqueJobIds;
|
||||||
|
|
||||||
|
|
||||||
/* Function declarations for building physical plans and constructing queries */
|
/* Function declarations for building physical plans and constructing queries */
|
||||||
extern MultiPlan * MultiPhysicalPlanCreate(MultiTreeRoot *multiTree);
|
extern MultiPlan * MultiPhysicalPlanCreate(MultiTreeRoot *multiTree);
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_AGG_APPROXIMATE_DISTINCT
|
-- MULTI_AGG_APPROXIMATE_DISTINCT
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 340000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 340000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 340000;
|
|
||||||
-- Create HLL extension if present, print false result otherwise
|
-- Create HLL extension if present, print false result otherwise
|
||||||
SELECT CASE WHEN COUNT(*) > 0 THEN
|
SELECT CASE WHEN COUNT(*) > 0 THEN
|
||||||
'CREATE EXTENSION HLL'
|
'CREATE EXTENSION HLL'
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_AGG_APPROXIMATE_DISTINCT
|
-- MULTI_AGG_APPROXIMATE_DISTINCT
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 340000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 340000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 340000;
|
|
||||||
-- Create HLL extension if present, print false result otherwise
|
-- Create HLL extension if present, print false result otherwise
|
||||||
SELECT CASE WHEN COUNT(*) > 0 THEN
|
SELECT CASE WHEN COUNT(*) > 0 THEN
|
||||||
'CREATE EXTENSION HLL'
|
'CREATE EXTENSION HLL'
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_ARRAY_AGG
|
-- MULTI_ARRAY_AGG
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 520000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 520000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 520000;
|
|
||||||
CREATE OR REPLACE FUNCTION array_sort (ANYARRAY)
|
CREATE OR REPLACE FUNCTION array_sort (ANYARRAY)
|
||||||
RETURNS ANYARRAY LANGUAGE SQL
|
RETURNS ANYARRAY LANGUAGE SQL
|
||||||
AS $$
|
AS $$
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
-- expression, and can be anywhere in the projection order. This is in response
|
-- expression, and can be anywhere in the projection order. This is in response
|
||||||
-- to a bug we had due to the average expression introducing new columns.
|
-- to a bug we had due to the average expression introducing new columns.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 450000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 450000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 450000;
|
|
||||||
SELECT
|
SELECT
|
||||||
sum(l_quantity) as sum_qty,
|
sum(l_quantity) as sum_qty,
|
||||||
sum(l_extendedprice) as sum_base_price,
|
sum(l_extendedprice) as sum_base_price,
|
||||||
|
|
|
@ -22,6 +22,16 @@ SELECT avg(l_extendedprice) FROM lineitem;
|
||||||
38141.835375000000
|
38141.835375000000
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- Verify that we can do queries in read-only mode
|
||||||
|
BEGIN;
|
||||||
|
SET TRANSACTION READ ONLY;
|
||||||
|
SELECT count(*) FROM lineitem;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
12000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
COMMIT;
|
||||||
-- Verify temp tables which are used for final result aggregation don't persist.
|
-- Verify temp tables which are used for final result aggregation don't persist.
|
||||||
SELECT count(*) FROM pg_class WHERE relname LIKE 'pg_merge_job_%' AND relkind = 'r';
|
SELECT count(*) FROM pg_class WHERE relname LIKE 'pg_merge_job_%' AND relkind = 'r';
|
||||||
count
|
count
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_BINARY_MASTER_COPY
|
-- MULTI_BINARY_MASTER_COPY
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 430000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 430000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 430000;
|
|
||||||
-- Try binary master copy for different executors
|
-- Try binary master copy for different executors
|
||||||
SET citus.binary_master_copy_format TO 'on';
|
SET citus.binary_master_copy_format TO 'on';
|
||||||
SET citus.task_executor_type TO 'task-tracker';
|
SET citus.task_executor_type TO 'task-tracker';
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
-- tests UDFs created for citus tools
|
-- tests UDFs created for citus tools
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1240000;
|
|
||||||
-- test with invalid port, prevent OS dependent warning from being displayed
|
-- test with invalid port, prevent OS dependent warning from being displayed
|
||||||
SET client_min_messages to ERROR;
|
SET client_min_messages to ERROR;
|
||||||
-- PG 9.5 does not show context for plpgsql raise
|
-- PG 9.5 does not show context for plpgsql raise
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1220000;
|
|
||||||
-- Tests functions related to cluster membership
|
-- Tests functions related to cluster membership
|
||||||
-- before starting the test, lets try to create reference table and see a
|
-- before starting the test, lets try to create reference table and see a
|
||||||
-- meaningful error
|
-- meaningful error
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1300000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- create test utility function
|
-- create test utility function
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_COMPLEX_EXPRESSIONS
|
-- MULTI_COMPLEX_EXPRESSIONS
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 420000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 420000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 420000;
|
|
||||||
-- Check that we can correctly handle complex expressions and aggregates.
|
-- Check that we can correctly handle complex expressions and aggregates.
|
||||||
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem;
|
||||||
?column?
|
?column?
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_COUNT_TYPE_CONVERSION
|
-- MULTI_COUNT_TYPE_CONVERSION
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 400000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 400000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 400000;
|
|
||||||
-- Verify that we can sort count(*) results correctly. We perform this check as
|
-- Verify that we can sort count(*) results correctly. We perform this check as
|
||||||
-- our count() operations execute in two steps: worker nodes report their
|
-- our count() operations execute in two steps: worker nodes report their
|
||||||
-- count() results, and the master node sums these counts up. During this sum(),
|
-- count() results, and the master node sums these counts up. During this sum(),
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 390000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 390000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 390000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- get ready for the foreign data wrapper tests
|
-- get ready for the foreign data wrapper tests
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 380000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 380000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 380000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- test INSERT proxy creation functionality
|
-- test INSERT proxy creation functionality
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 370000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 370000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 370000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- create test functions and types needed for tests
|
-- create test functions and types needed for tests
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_CREATE_TABLE
|
-- MULTI_CREATE_TABLE
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 360000;
|
|
||||||
-- Create new table definitions for use in testing in distributed planning and
|
-- Create new table definitions for use in testing in distributed planning and
|
||||||
-- execution functionality. Also create indexes to boost performance.
|
-- execution functionality. Also create indexes to boost performance.
|
||||||
CREATE TABLE lineitem (
|
CREATE TABLE lineitem (
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_CREATE_TABLE_CONSTRAINTS
|
-- MULTI_CREATE_TABLE_CONSTRAINTS
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 365000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 365000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 365000;
|
|
||||||
-- test that Citus forbids unique and EXCLUDE constraints on append-partitioned tables.
|
-- test that Citus forbids unique and EXCLUDE constraints on append-partitioned tables.
|
||||||
CREATE TABLE uniq_cns_append_tables
|
CREATE TABLE uniq_cns_append_tables
|
||||||
(
|
(
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
-- create, distribute, INSERT, SELECT and UPDATE
|
-- create, distribute, INSERT, SELECT and UPDATE
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 530000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 530000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 530000;
|
|
||||||
-- create a custom type...
|
-- create a custom type...
|
||||||
CREATE TYPE test_composite_type AS (
|
CREATE TYPE test_composite_type AS (
|
||||||
i integer,
|
i integer,
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_DEPARSE_SHARD_QUERY
|
-- MULTI_DEPARSE_SHARD_QUERY
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13100000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13100000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 13100000;
|
|
||||||
CREATE FUNCTION deparse_shard_query_test(text)
|
CREATE FUNCTION deparse_shard_query_test(text)
|
||||||
RETURNS VOID
|
RETURNS VOID
|
||||||
AS 'citus'
|
AS 'citus'
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- create test functions
|
-- create test functions
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 540000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 540000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 540000;
|
|
||||||
CREATE FUNCTION load_shard_id_array(regclass)
|
CREATE FUNCTION load_shard_id_array(regclass)
|
||||||
RETURNS bigint[]
|
RETURNS bigint[]
|
||||||
AS 'citus'
|
AS 'citus'
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
--
|
--
|
||||||
-- Tests around dropping and recreating the extension
|
-- Tests around dropping and recreating the extension
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 550000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 550000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 550000;
|
|
||||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||||
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||||
master_create_distributed_table
|
master_create_distributed_table
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
-- Tests that check that our query functionality behaves as expected when the
|
-- Tests that check that our query functionality behaves as expected when the
|
||||||
-- table schema is modified via ALTER statements.
|
-- table schema is modified via ALTER statements.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 620000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 620000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 620000;
|
|
||||||
SELECT count(*) FROM customer;
|
SELECT count(*) FROM customer;
|
||||||
count
|
count
|
||||||
-------
|
-------
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
--- MULTI_EXPIRE_TABLE_CACHE
|
--- MULTI_EXPIRE_TABLE_CACHE
|
||||||
---
|
---
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1220000;
|
|
||||||
-- create test table
|
-- create test table
|
||||||
CREATE TABLE large_table(a int, b int);
|
CREATE TABLE large_table(a int, b int);
|
||||||
SELECT master_create_distributed_table('large_table', 'a', 'hash');
|
SELECT master_create_distributed_table('large_table', 'a', 'hash');
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_EXPLAIN
|
-- MULTI_EXPLAIN
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 570000;
|
|
||||||
-- print major version to make version-specific tests clear
|
-- print major version to make version-specific tests clear
|
||||||
SELECT substring(version(), '\d+\.\d+') AS major_version;
|
SELECT substring(version(), '\d+\.\d+') AS major_version;
|
||||||
major_version
|
major_version
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_EXPLAIN
|
-- MULTI_EXPLAIN
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 570000;
|
|
||||||
-- print major version to make version-specific tests clear
|
-- print major version to make version-specific tests clear
|
||||||
SELECT substring(version(), '\d+\.\d+') AS major_version;
|
SELECT substring(version(), '\d+\.\d+') AS major_version;
|
||||||
major_version
|
major_version
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_FOREIGN_KEY
|
-- MULTI_FOREIGN_KEY
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1350000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1350000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1350000;
|
|
||||||
-- set shard_count to 4 for faster tests, because we create/drop lots of shards in this test.
|
-- set shard_count to 4 for faster tests, because we create/drop lots of shards in this test.
|
||||||
SET citus.shard_count TO 4;
|
SET citus.shard_count TO 4;
|
||||||
-- create tables
|
-- create tables
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_FUNCTION_EVALUATION
|
-- MULTI_FUNCTION_EVALUATION
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1200000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1200000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1200000;
|
|
||||||
-- nextval() works (no good way to test DEFAULT, or, by extension, SERIAL)
|
-- nextval() works (no good way to test DEFAULT, or, by extension, SERIAL)
|
||||||
CREATE TABLE example (key INT, value INT);
|
CREATE TABLE example (key INT, value INT);
|
||||||
SELECT master_create_distributed_table('example', 'key', 'hash');
|
SELECT master_create_distributed_table('example', 'key', 'hash');
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 610000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 610000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 610000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- create test functions
|
-- create test functions
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
--
|
--
|
||||||
-- Tests for shard and join pruning logic on hash partitioned tables.
|
-- Tests for shard and join pruning logic on hash partitioned tables.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 630000;
|
|
||||||
-- Create a table partitioned on integer column and update partition type to
|
-- Create a table partitioned on integer column and update partition type to
|
||||||
-- hash. Then load data into this table and update shard min max values with
|
-- hash. Then load data into this table and update shard min max values with
|
||||||
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
|
-- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026,
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
-- Check that we can run CREATE INDEX and DROP INDEX statements on distributed
|
-- Check that we can run CREATE INDEX and DROP INDEX statements on distributed
|
||||||
-- tables.
|
-- tables.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 640000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 640000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 640000;
|
|
||||||
--
|
--
|
||||||
-- CREATE TEST TABLES
|
-- CREATE TEST TABLES
|
||||||
--
|
--
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_INSERT_SELECT
|
-- MULTI_INSERT_SELECT
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13300000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13300000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 13300000;
|
|
||||||
-- create co-located tables
|
-- create co-located tables
|
||||||
SET citus.shard_count = 4;
|
SET citus.shard_count = 4;
|
||||||
SET citus.shard_replication_factor = 2;
|
SET citus.shard_replication_factor = 2;
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_JOIN_ORDER_ADDITIONAL
|
-- MULTI_JOIN_ORDER_ADDITIONAL
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 650000;
|
|
||||||
-- Set configuration to print table join order and pruned shards
|
-- Set configuration to print table join order and pruned shards
|
||||||
SET citus.explain_distributed_queries TO off;
|
SET citus.explain_distributed_queries TO off;
|
||||||
SET citus.log_multi_join_order TO TRUE;
|
SET citus.log_multi_join_order TO TRUE;
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_JOIN_ORDER_TPCH_LARGE
|
-- MULTI_JOIN_ORDER_TPCH_LARGE
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 660000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 660000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 660000;
|
|
||||||
-- Enable configuration to print table join order
|
-- Enable configuration to print table join order
|
||||||
SET citus.explain_distributed_queries TO off;
|
SET citus.explain_distributed_queries TO off;
|
||||||
SET citus.log_multi_join_order TO TRUE;
|
SET citus.log_multi_join_order TO TRUE;
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
--
|
--
|
||||||
-- MULTI_JOIN_PRUNING
|
-- MULTI_JOIN_PRUNING
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 680000;
|
|
||||||
-- Check that join-pruning works for joins between two large relations. For now
|
-- Check that join-pruning works for joins between two large relations. For now
|
||||||
-- we only check for join-pruning between locally partitioned relations. In the
|
-- we only check for join-pruning between locally partitioned relations. In the
|
||||||
-- future we want to check for pruning between re-partitioned relations as well.
|
-- future we want to check for pruning between re-partitioned relations as well.
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
-- transaction ids in them. Also, we set the executor type to task tracker
|
-- transaction ids in them. Also, we set the executor type to task tracker
|
||||||
-- executor here, as we cannot run repartition jobs with real time executor.
|
-- executor here, as we cannot run repartition jobs with real time executor.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 690000;
|
SET citus.enable_unique_job_ids TO off;
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SET client_min_messages TO DEBUG4;
|
SET client_min_messages TO DEBUG4;
|
||||||
DEBUG: CommitTransactionCommand
|
DEBUG: CommitTransactionCommand
|
||||||
|
@ -21,10 +21,6 @@ DEBUG: CommitTransactionCommand
|
||||||
-- Debug4 log messages display jobIds within them. We explicitly set the jobId
|
-- Debug4 log messages display jobIds within them. We explicitly set the jobId
|
||||||
-- sequence here so that the regression output becomes independent of the number
|
-- sequence here so that the regression output becomes independent of the number
|
||||||
-- of jobs executed prior to running this test.
|
-- of jobs executed prior to running this test.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250;
|
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
-- Multi-level repartition join to verify our projection columns are correctly
|
-- Multi-level repartition join to verify our projection columns are correctly
|
||||||
-- referenced and propagated across multiple repartition jobs. The test also
|
-- referenced and propagated across multiple repartition jobs. The test also
|
||||||
-- validates that only the minimal necessary projection columns are transferred
|
-- validates that only the minimal necessary projection columns are transferred
|
||||||
|
@ -53,21 +49,21 @@ DEBUG: join prunable for intervals [8997,10560] and [1,5986]
|
||||||
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
DEBUG: join prunable for intervals [10560,12036] and [1,5986]
|
||||||
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
DEBUG: join prunable for intervals [12036,13473] and [1,5986]
|
||||||
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
DEBUG: join prunable for intervals [13473,14947] and [1,5986]
|
||||||
DEBUG: generated sql query for job 1250 and task 3
|
DEBUG: generated sql query for task 3
|
||||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
DEBUG: generated sql query for job 1250 and task 6
|
DEBUG: generated sql query for task 6
|
||||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
DEBUG: generated sql query for job 1250 and task 9
|
DEBUG: generated sql query for task 9
|
||||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
DEBUG: generated sql query for job 1250 and task 12
|
DEBUG: generated sql query for task 12
|
||||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
DEBUG: generated sql query for job 1250 and task 15
|
DEBUG: generated sql query for task 15
|
||||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
DEBUG: generated sql query for job 1250 and task 18
|
DEBUG: generated sql query for task 18
|
||||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
DEBUG: generated sql query for job 1250 and task 21
|
DEBUG: generated sql query for task 21
|
||||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
DEBUG: generated sql query for job 1250 and task 24
|
DEBUG: generated sql query for task 24
|
||||||
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))"
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
DEBUG: assigned task 3 to node localhost:57638
|
DEBUG: assigned task 3 to node localhost:57638
|
||||||
|
@ -79,10 +75,10 @@ DEBUG: assigned task 24 to node localhost:57637
|
||||||
DEBUG: assigned task 21 to node localhost:57638
|
DEBUG: assigned task 21 to node localhost:57638
|
||||||
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
DEBUG: join prunable for intervals [1,1000] and [6001,7000]
|
||||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||||
DEBUG: generated sql query for job 1251 and task 3
|
DEBUG: generated sql query for task 3
|
||||||
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000025".intermediate_column_1250_0, "pg_merge_job_1250.task_000025".intermediate_column_1250_1, "pg_merge_job_1250.task_000025".intermediate_column_1250_2, "pg_merge_job_1250.task_000025".intermediate_column_1250_3, "pg_merge_job_1250.task_000025".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000025 "pg_merge_job_1250.task_000025" JOIN part_290011 part ON (("pg_merge_job_1250.task_000025".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000025".intermediate_column_1_0, "pg_merge_job_0001.task_000025".intermediate_column_1_1, "pg_merge_job_0001.task_000025".intermediate_column_1_2, "pg_merge_job_0001.task_000025".intermediate_column_1_3, "pg_merge_job_0001.task_000025".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000025 "pg_merge_job_0001.task_000025" JOIN part_290011 part ON (("pg_merge_job_0001.task_000025".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||||
DEBUG: generated sql query for job 1251 and task 6
|
DEBUG: generated sql query for task 6
|
||||||
DETAIL: query string: "SELECT "pg_merge_job_1250.task_000034".intermediate_column_1250_0, "pg_merge_job_1250.task_000034".intermediate_column_1250_1, "pg_merge_job_1250.task_000034".intermediate_column_1250_2, "pg_merge_job_1250.task_000034".intermediate_column_1250_3, "pg_merge_job_1250.task_000034".intermediate_column_1250_4 FROM (pg_merge_job_1250.task_000034 "pg_merge_job_1250.task_000034" JOIN part_280002 part ON (("pg_merge_job_1250.task_000034".intermediate_column_1250_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
DETAIL: query string: "SELECT "pg_merge_job_0001.task_000034".intermediate_column_1_0, "pg_merge_job_0001.task_000034".intermediate_column_1_1, "pg_merge_job_0001.task_000034".intermediate_column_1_2, "pg_merge_job_0001.task_000034".intermediate_column_1_3, "pg_merge_job_0001.task_000034".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000034 "pg_merge_job_0001.task_000034" JOIN part_280002 part ON (("pg_merge_job_0001.task_000034".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)"
|
||||||
DEBUG: pruning merge fetch taskId 1
|
DEBUG: pruning merge fetch taskId 1
|
||||||
DETAIL: Creating dependency on merge taskId 25
|
DETAIL: Creating dependency on merge taskId 25
|
||||||
DEBUG: pruning merge fetch taskId 4
|
DEBUG: pruning merge fetch taskId 4
|
||||||
|
@ -95,12 +91,12 @@ DEBUG: join prunable for intervals [1001,2000] and [1,1000]
|
||||||
DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
DEBUG: join prunable for intervals [1001,2000] and [6001,7000]
|
||||||
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
DEBUG: join prunable for intervals [6001,7000] and [1,1000]
|
||||||
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
DEBUG: join prunable for intervals [6001,7000] and [1001,2000]
|
||||||
DEBUG: generated sql query for job 1252 and task 3
|
DEBUG: generated sql query for task 3
|
||||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000007".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000007".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000007 "pg_merge_job_1251.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000007".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000007".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000007".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000007".intermediate_column_1251_0, "pg_merge_job_1251.task_000007".intermediate_column_1251_1"
|
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000007".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000007".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000007 "pg_merge_job_0002.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000007".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000007".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000007".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000007".intermediate_column_2_0, "pg_merge_job_0002.task_000007".intermediate_column_2_1"
|
||||||
DEBUG: generated sql query for job 1252 and task 6
|
DEBUG: generated sql query for task 6
|
||||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000010".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000010".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000010 "pg_merge_job_1251.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000010".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000010".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000010".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000010".intermediate_column_1251_0, "pg_merge_job_1251.task_000010".intermediate_column_1251_1"
|
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000010".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000010".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000010 "pg_merge_job_0002.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000010".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000010".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000010".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000010".intermediate_column_2_0, "pg_merge_job_0002.task_000010".intermediate_column_2_1"
|
||||||
DEBUG: generated sql query for job 1252 and task 9
|
DEBUG: generated sql query for task 9
|
||||||
DETAIL: query string: "SELECT "pg_merge_job_1251.task_000013".intermediate_column_1251_0 AS l_partkey, "pg_merge_job_1251.task_000013".intermediate_column_1251_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1251.task_000013 "pg_merge_job_1251.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_1251.task_000013".intermediate_column_1251_4))) WHERE ((("pg_merge_job_1251.task_000013".intermediate_column_1251_2 > 5.0) OR ("pg_merge_job_1251.task_000013".intermediate_column_1251_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_1251.task_000013".intermediate_column_1251_0, "pg_merge_job_1251.task_000013".intermediate_column_1251_1"
|
DETAIL: query string: "SELECT "pg_merge_job_0002.task_000013".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000013".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000013 "pg_merge_job_0002.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000013".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000013".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000013".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000013".intermediate_column_2_0, "pg_merge_job_0002.task_000013".intermediate_column_2_1"
|
||||||
DEBUG: pruning merge fetch taskId 1
|
DEBUG: pruning merge fetch taskId 1
|
||||||
DETAIL: Creating dependency on merge taskId 7
|
DETAIL: Creating dependency on merge taskId 7
|
||||||
DEBUG: pruning merge fetch taskId 4
|
DEBUG: pruning merge fetch taskId 4
|
||||||
|
@ -110,12 +106,12 @@ DETAIL: Creating dependency on merge taskId 13
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
DEBUG: assigned task 9 to node localhost:57638
|
DEBUG: assigned task 9 to node localhost:57638
|
||||||
DEBUG: assigned task 3 to node localhost:57637
|
DEBUG: assigned task 3 to node localhost:57637
|
||||||
DEBUG: completed cleanup query for job 1252
|
DEBUG: completed cleanup query for job 3
|
||||||
DEBUG: completed cleanup query for job 1252
|
DEBUG: completed cleanup query for job 3
|
||||||
DEBUG: completed cleanup query for job 1251
|
DEBUG: completed cleanup query for job 2
|
||||||
DEBUG: completed cleanup query for job 1251
|
DEBUG: completed cleanup query for job 2
|
||||||
DEBUG: completed cleanup query for job 1250
|
DEBUG: completed cleanup query for job 1
|
||||||
DEBUG: completed cleanup query for job 1250
|
DEBUG: completed cleanup query for job 1
|
||||||
DEBUG: CommitTransactionCommand
|
DEBUG: CommitTransactionCommand
|
||||||
l_partkey | o_orderkey | count
|
l_partkey | o_orderkey | count
|
||||||
-----------+------------+-------
|
-----------+------------+-------
|
||||||
|
@ -162,21 +158,21 @@ GROUP BY
|
||||||
ORDER BY
|
ORDER BY
|
||||||
l_partkey, o_orderkey;
|
l_partkey, o_orderkey;
|
||||||
DEBUG: StartTransactionCommand
|
DEBUG: StartTransactionCommand
|
||||||
DEBUG: generated sql query for job 1253 and task 2
|
DEBUG: generated sql query for task 2
|
||||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)"
|
||||||
DEBUG: generated sql query for job 1253 and task 4
|
DEBUG: generated sql query for task 4
|
||||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)"
|
||||||
DEBUG: generated sql query for job 1253 and task 6
|
DEBUG: generated sql query for task 6
|
||||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)"
|
||||||
DEBUG: generated sql query for job 1253 and task 8
|
DEBUG: generated sql query for task 8
|
||||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)"
|
||||||
DEBUG: generated sql query for job 1253 and task 10
|
DEBUG: generated sql query for task 10
|
||||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)"
|
||||||
DEBUG: generated sql query for job 1253 and task 12
|
DEBUG: generated sql query for task 12
|
||||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)"
|
||||||
DEBUG: generated sql query for job 1253 and task 14
|
DEBUG: generated sql query for task 14
|
||||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)"
|
||||||
DEBUG: generated sql query for job 1253 and task 16
|
DEBUG: generated sql query for task 16
|
||||||
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
|
DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)"
|
||||||
DEBUG: assigned task 4 to node localhost:57637
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
DEBUG: assigned task 2 to node localhost:57638
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
@ -186,9 +182,9 @@ DEBUG: assigned task 12 to node localhost:57637
|
||||||
DEBUG: assigned task 10 to node localhost:57638
|
DEBUG: assigned task 10 to node localhost:57638
|
||||||
DEBUG: assigned task 16 to node localhost:57637
|
DEBUG: assigned task 16 to node localhost:57637
|
||||||
DEBUG: assigned task 14 to node localhost:57638
|
DEBUG: assigned task 14 to node localhost:57638
|
||||||
DEBUG: generated sql query for job 1254 and task 2
|
DEBUG: generated sql query for task 2
|
||||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
|
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)"
|
||||||
DEBUG: generated sql query for job 1254 and task 4
|
DEBUG: generated sql query for task 4
|
||||||
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
|
DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)"
|
||||||
DEBUG: assigned task 4 to node localhost:57637
|
DEBUG: assigned task 4 to node localhost:57637
|
||||||
DEBUG: assigned task 2 to node localhost:57638
|
DEBUG: assigned task 2 to node localhost:57638
|
||||||
|
@ -204,14 +200,14 @@ DEBUG: join prunable for task partitionId 2 and 3
|
||||||
DEBUG: join prunable for task partitionId 3 and 0
|
DEBUG: join prunable for task partitionId 3 and 0
|
||||||
DEBUG: join prunable for task partitionId 3 and 1
|
DEBUG: join prunable for task partitionId 3 and 1
|
||||||
DEBUG: join prunable for task partitionId 3 and 2
|
DEBUG: join prunable for task partitionId 3 and 2
|
||||||
DEBUG: generated sql query for job 1255 and task 3
|
DEBUG: generated sql query for task 3
|
||||||
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000017".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000005".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000017 "pg_merge_job_1253.task_000017" JOIN pg_merge_job_1254.task_000005 "pg_merge_job_1254.task_000005" ON (("pg_merge_job_1253.task_000017".intermediate_column_1253_1 = "pg_merge_job_1254.task_000005".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000017".intermediate_column_1253_0, "pg_merge_job_1254.task_000005".intermediate_column_1254_0"
|
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000017".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000005".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000017 "pg_merge_job_0004.task_000017" JOIN pg_merge_job_0005.task_000005 "pg_merge_job_0005.task_000005" ON (("pg_merge_job_0004.task_000017".intermediate_column_4_1 = "pg_merge_job_0005.task_000005".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000017".intermediate_column_4_0, "pg_merge_job_0005.task_000005".intermediate_column_5_0"
|
||||||
DEBUG: generated sql query for job 1255 and task 6
|
DEBUG: generated sql query for task 6
|
||||||
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000026".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000008".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000026 "pg_merge_job_1253.task_000026" JOIN pg_merge_job_1254.task_000008 "pg_merge_job_1254.task_000008" ON (("pg_merge_job_1253.task_000026".intermediate_column_1253_1 = "pg_merge_job_1254.task_000008".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000026".intermediate_column_1253_0, "pg_merge_job_1254.task_000008".intermediate_column_1254_0"
|
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000026".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000008".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000026 "pg_merge_job_0004.task_000026" JOIN pg_merge_job_0005.task_000008 "pg_merge_job_0005.task_000008" ON (("pg_merge_job_0004.task_000026".intermediate_column_4_1 = "pg_merge_job_0005.task_000008".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000026".intermediate_column_4_0, "pg_merge_job_0005.task_000008".intermediate_column_5_0"
|
||||||
DEBUG: generated sql query for job 1255 and task 9
|
DEBUG: generated sql query for task 9
|
||||||
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000035".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000011".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000035 "pg_merge_job_1253.task_000035" JOIN pg_merge_job_1254.task_000011 "pg_merge_job_1254.task_000011" ON (("pg_merge_job_1253.task_000035".intermediate_column_1253_1 = "pg_merge_job_1254.task_000011".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000035".intermediate_column_1253_0, "pg_merge_job_1254.task_000011".intermediate_column_1254_0"
|
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000035".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000011".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000035 "pg_merge_job_0004.task_000035" JOIN pg_merge_job_0005.task_000011 "pg_merge_job_0005.task_000011" ON (("pg_merge_job_0004.task_000035".intermediate_column_4_1 = "pg_merge_job_0005.task_000011".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000035".intermediate_column_4_0, "pg_merge_job_0005.task_000011".intermediate_column_5_0"
|
||||||
DEBUG: generated sql query for job 1255 and task 12
|
DEBUG: generated sql query for task 12
|
||||||
DETAIL: query string: "SELECT "pg_merge_job_1253.task_000044".intermediate_column_1253_0 AS l_partkey, "pg_merge_job_1254.task_000014".intermediate_column_1254_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_1253.task_000044 "pg_merge_job_1253.task_000044" JOIN pg_merge_job_1254.task_000014 "pg_merge_job_1254.task_000014" ON (("pg_merge_job_1253.task_000044".intermediate_column_1253_1 = "pg_merge_job_1254.task_000014".intermediate_column_1254_1))) WHERE true GROUP BY "pg_merge_job_1253.task_000044".intermediate_column_1253_0, "pg_merge_job_1254.task_000014".intermediate_column_1254_0"
|
DETAIL: query string: "SELECT "pg_merge_job_0004.task_000044".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000014".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000044 "pg_merge_job_0004.task_000044" JOIN pg_merge_job_0005.task_000014 "pg_merge_job_0005.task_000014" ON (("pg_merge_job_0004.task_000044".intermediate_column_4_1 = "pg_merge_job_0005.task_000014".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000044".intermediate_column_4_0, "pg_merge_job_0005.task_000014".intermediate_column_5_0"
|
||||||
DEBUG: pruning merge fetch taskId 1
|
DEBUG: pruning merge fetch taskId 1
|
||||||
DETAIL: Creating dependency on merge taskId 17
|
DETAIL: Creating dependency on merge taskId 17
|
||||||
DEBUG: pruning merge fetch taskId 2
|
DEBUG: pruning merge fetch taskId 2
|
||||||
|
@ -228,16 +224,16 @@ DEBUG: pruning merge fetch taskId 10
|
||||||
DETAIL: Creating dependency on merge taskId 44
|
DETAIL: Creating dependency on merge taskId 44
|
||||||
DEBUG: pruning merge fetch taskId 11
|
DEBUG: pruning merge fetch taskId 11
|
||||||
DETAIL: Creating dependency on merge taskId 14
|
DETAIL: Creating dependency on merge taskId 14
|
||||||
DEBUG: assigned task 3 to node localhost:57638
|
DEBUG: assigned task 3 to node localhost:57637
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
DEBUG: assigned task 9 to node localhost:57638
|
DEBUG: assigned task 9 to node localhost:57637
|
||||||
DEBUG: assigned task 12 to node localhost:57637
|
DEBUG: assigned task 12 to node localhost:57638
|
||||||
DEBUG: completed cleanup query for job 1255
|
DEBUG: completed cleanup query for job 6
|
||||||
DEBUG: completed cleanup query for job 1255
|
DEBUG: completed cleanup query for job 6
|
||||||
DEBUG: completed cleanup query for job 1253
|
DEBUG: completed cleanup query for job 4
|
||||||
DEBUG: completed cleanup query for job 1253
|
DEBUG: completed cleanup query for job 4
|
||||||
DEBUG: completed cleanup query for job 1254
|
DEBUG: completed cleanup query for job 5
|
||||||
DEBUG: completed cleanup query for job 1254
|
DEBUG: completed cleanup query for job 5
|
||||||
DEBUG: CommitTransactionCommand
|
DEBUG: CommitTransactionCommand
|
||||||
l_partkey | o_orderkey | count
|
l_partkey | o_orderkey | count
|
||||||
-----------+------------+-------
|
-----------+------------+-------
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
-- set executor type to task tracker executor here, as we cannot run repartition
|
-- set executor type to task tracker executor here, as we cannot run repartition
|
||||||
-- jobs with real time executor.
|
-- jobs with real time executor.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 700000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 700000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 700000;
|
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
SET client_min_messages TO DEBUG2;
|
SET client_min_messages TO DEBUG2;
|
||||||
SET citus.task_executor_type TO 'task-tracker';
|
SET citus.task_executor_type TO 'task-tracker';
|
||||||
|
|
|
@ -6,7 +6,6 @@
|
||||||
-- from a sql task to its depended tasks. Note that we set the executor type to task
|
-- from a sql task to its depended tasks. Note that we set the executor type to task
|
||||||
-- tracker executor here, as we cannot run repartition jobs with real time executor.
|
-- tracker executor here, as we cannot run repartition jobs with real time executor.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 710000;
|
|
||||||
BEGIN;
|
BEGIN;
|
||||||
SET client_min_messages TO DEBUG3;
|
SET client_min_messages TO DEBUG3;
|
||||||
DEBUG: CommitTransactionCommand
|
DEBUG: CommitTransactionCommand
|
||||||
|
@ -186,19 +185,6 @@ SET citus.large_table_shard_count TO 2;
|
||||||
DEBUG: StartTransactionCommand
|
DEBUG: StartTransactionCommand
|
||||||
DEBUG: ProcessUtility
|
DEBUG: ProcessUtility
|
||||||
DEBUG: CommitTransactionCommand
|
DEBUG: CommitTransactionCommand
|
||||||
-- The next test, dual hash repartition join, uses the current jobId to assign
|
|
||||||
-- tasks in a round-robin fashion. We therefore need to ensure that jobIds start
|
|
||||||
-- with an odd number here to get consistent test output.
|
|
||||||
SELECT case when (currval('pg_dist_jobid_seq') % 2) = 0
|
|
||||||
then nextval('pg_dist_jobid_seq') % 2
|
|
||||||
else 1 end;
|
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
case
|
|
||||||
------
|
|
||||||
1
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-- Dual hash repartition join which tests the separate hash repartition join
|
-- Dual hash repartition join which tests the separate hash repartition join
|
||||||
-- task assignment algorithm.
|
-- task assignment algorithm.
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -247,10 +233,10 @@ DEBUG: pruning merge fetch taskId 10
|
||||||
DETAIL: Creating dependency on merge taskId 44
|
DETAIL: Creating dependency on merge taskId 44
|
||||||
DEBUG: pruning merge fetch taskId 11
|
DEBUG: pruning merge fetch taskId 11
|
||||||
DETAIL: Creating dependency on merge taskId 19
|
DETAIL: Creating dependency on merge taskId 19
|
||||||
DEBUG: assigned task 3 to node localhost:57637
|
DEBUG: assigned task 3 to node localhost:57638
|
||||||
DEBUG: assigned task 6 to node localhost:57638
|
DEBUG: assigned task 6 to node localhost:57637
|
||||||
DEBUG: assigned task 9 to node localhost:57637
|
DEBUG: assigned task 9 to node localhost:57638
|
||||||
DEBUG: assigned task 12 to node localhost:57638
|
DEBUG: assigned task 12 to node localhost:57637
|
||||||
DEBUG: CommitTransactionCommand
|
DEBUG: CommitTransactionCommand
|
||||||
count
|
count
|
||||||
-------
|
-------
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_LIMIT_CLAUSE
|
-- MULTI_LIMIT_CLAUSE
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 730000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 730000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 730000;
|
|
||||||
-- Display debug messages on limit clause push down.
|
-- Display debug messages on limit clause push down.
|
||||||
SET client_min_messages TO DEBUG1;
|
SET client_min_messages TO DEBUG1;
|
||||||
-- Check that we can correctly handle the Limit clause in distributed queries.
|
-- Check that we can correctly handle the Limit clause in distributed queries.
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_LIMIT_CLAUSE_APPROXIMATE
|
-- MULTI_LIMIT_CLAUSE_APPROXIMATE
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 720000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 720000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 720000;
|
|
||||||
-- Display debug messages on limit clause push down.
|
-- Display debug messages on limit clause push down.
|
||||||
SET client_min_messages TO DEBUG1;
|
SET client_min_messages TO DEBUG1;
|
||||||
-- We first look at results with limit optimization disabled. This first query
|
-- We first look at results with limit optimization disabled. This first query
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
--
|
--
|
||||||
-- Tests that check the metadata returned by the master node.
|
-- Tests that check the metadata returned by the master node.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 740000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 740000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 740000;
|
|
||||||
SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
||||||
part_placement_policy FROM master_get_table_metadata('lineitem');
|
part_placement_policy FROM master_get_table_metadata('lineitem');
|
||||||
part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy
|
part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_METADATA_ACCESS
|
-- MULTI_METADATA_ACCESS
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1360000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1360000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1360000;
|
|
||||||
CREATE USER no_access;
|
CREATE USER no_access;
|
||||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
-- Tests for metadata snapshot functions, metadata syncing functions and propagation of
|
-- Tests for metadata snapshot functions, metadata syncing functions and propagation of
|
||||||
-- metadata changes to MX tables.
|
-- metadata changes to MX tables.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
|
|
||||||
SELECT nextval('pg_catalog.pg_dist_shard_placement_placementid_seq') AS last_placement_id
|
SELECT nextval('pg_catalog.pg_dist_shard_placement_placementid_seq') AS last_placement_id
|
||||||
\gset
|
\gset
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shard_placement_placementid_seq RESTART 100000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shard_placement_placementid_seq RESTART 100000;
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 750000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 750000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 750000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- test end-to-end modification functionality
|
-- test end-to-end modification functionality
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1200000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1200000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1200000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- test end-to-end modification functionality
|
-- test end-to-end modification functionality
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_MX_CREATE_TABLE
|
-- MULTI_MX_CREATE_TABLE
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1220000;
|
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||||
start_metadata_sync_to_node
|
start_metadata_sync_to_node
|
||||||
-----------------------------
|
-----------------------------
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
-- Tests related to distributed DDL commands on mx cluster
|
-- Tests related to distributed DDL commands on mx cluster
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1600000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1600000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1600000;
|
|
||||||
SELECT * FROM mx_ddl_table ORDER BY key;
|
SELECT * FROM mx_ddl_table ORDER BY key;
|
||||||
key | value
|
key | value
|
||||||
-----+-------
|
-----+-------
|
||||||
|
|
|
@ -2,11 +2,8 @@
|
||||||
-- MULTI_MX_EXPLAIN
|
-- MULTI_MX_EXPLAIN
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
\a\t
|
\a\t
|
||||||
SET citus.task_executor_type TO 'real-time';
|
SET citus.task_executor_type TO 'real-time';
|
||||||
|
|
|
@ -2,11 +2,8 @@
|
||||||
-- MULTI_MX_EXPLAIN
|
-- MULTI_MX_EXPLAIN
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1320000;
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
\a\t
|
\a\t
|
||||||
SET citus.task_executor_type TO 'real-time';
|
SET citus.task_executor_type TO 'real-time';
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1330000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1330000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1330000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- test end-to-end modification functionality for mx tables
|
-- test end-to-end modification functionality for mx tables
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1340000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1340000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1340000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- test end-to-end modification functionality for mx tables in transactions
|
-- test end-to-end modification functionality for mx tables in transactions
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp);
|
CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp);
|
||||||
SELECT create_reference_table('reference_table_test');
|
SELECT create_reference_table('reference_table_test');
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_MX_REPARTITION_UDT_PREPARE
|
-- MULTI_MX_REPARTITION_UDT_PREPARE
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
|
|
||||||
-- START type creation
|
-- START type creation
|
||||||
CREATE TYPE test_udt AS (i integer, i2 integer);
|
CREATE TYPE test_udt AS (i integer, i2 integer);
|
||||||
-- ... as well as a function to use as its comparator...
|
-- ... as well as a function to use as its comparator...
|
||||||
|
@ -202,6 +201,4 @@ LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_ot
|
||||||
|
|
||||||
|
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
|
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 840000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- test router planner functionality for single shard select queries
|
-- test router planner functionality for single shard select queries
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_MX_SCHEMA_SUPPORT
|
-- MULTI_MX_SCHEMA_SUPPORT
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1210000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1210000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1210000;
|
|
||||||
-- connect to a worker node and run some queries
|
-- connect to a worker node and run some queries
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
-- test very basic queries
|
-- test very basic queries
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_MX_TPCH_QUERY1
|
-- MULTI_MX_TPCH_QUERY1
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
|
|
||||||
-- connect to the coordinator
|
-- connect to the coordinator
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
|
@ -40,7 +39,6 @@ ORDER BY
|
||||||
-- connect one of the workers
|
-- connect one of the workers
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #1 from the TPC-H decision support benchmark
|
-- Query #1 from the TPC-H decision support benchmark
|
||||||
|
@ -76,7 +74,6 @@ ORDER BY
|
||||||
-- connect to the other node
|
-- connect to the other node
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1310000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #1 from the TPC-H decision support benchmark
|
-- Query #1 from the TPC-H decision support benchmark
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
-- we don't set citus.large_table_shard_count here, and instead use the default value
|
-- we don't set citus.large_table_shard_count here, and instead use the default value
|
||||||
-- coming from postgresql.conf or multi_task_tracker_executor.conf.
|
-- coming from postgresql.conf or multi_task_tracker_executor.conf.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1300000;
|
|
||||||
-- connect to master
|
-- connect to master
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -67,7 +66,6 @@ LIMIT 20;
|
||||||
-- connect one of the workers
|
-- connect one of the workers
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1300000;
|
|
||||||
SELECT
|
SELECT
|
||||||
c_custkey,
|
c_custkey,
|
||||||
c_name,
|
c_name,
|
||||||
|
@ -127,7 +125,6 @@ LIMIT 20;
|
||||||
-- connect to the other worker
|
-- connect to the other worker
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1300000;
|
|
||||||
SELECT
|
SELECT
|
||||||
c_custkey,
|
c_custkey,
|
||||||
c_name,
|
c_name,
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_MX_TPCH_QUERY12
|
-- MULTI_MX_TPCH_QUERY12
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1290000;
|
|
||||||
-- connect to the coordinator
|
-- connect to the coordinator
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
|
@ -45,7 +44,6 @@ ORDER BY
|
||||||
-- connect one of the workers
|
-- connect one of the workers
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1290000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #12 from the TPC-H decision support benchmark
|
-- Query #12 from the TPC-H decision support benchmark
|
||||||
|
@ -86,7 +84,6 @@ ORDER BY
|
||||||
-- connect to the other worker node
|
-- connect to the other worker node
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1290000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1290000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #12 from the TPC-H decision support benchmark
|
-- Query #12 from the TPC-H decision support benchmark
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_MX_TPCH_QUERY14
|
-- MULTI_MX_TPCH_QUERY14
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1280000;
|
|
||||||
-- connect to the coordinator
|
-- connect to the coordinator
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
|
@ -29,7 +28,6 @@ WHERE
|
||||||
-- connect one of the workers
|
-- connect one of the workers
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1280000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #14 from the TPC-H decision support benchmark
|
-- Query #14 from the TPC-H decision support benchmark
|
||||||
|
@ -54,7 +52,6 @@ WHERE
|
||||||
-- connect to the other node
|
-- connect to the other node
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1280000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1280000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #14 from the TPC-H decision support benchmark
|
-- Query #14 from the TPC-H decision support benchmark
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_MX_TPCH_QUERY19
|
-- MULTI_MX_TPCH_QUERY19
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1270000;
|
|
||||||
-- connect to the coordinator
|
-- connect to the coordinator
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
|
@ -46,7 +45,6 @@ WHERE
|
||||||
-- connect one of the workers
|
-- connect one of the workers
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1270000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #19 from the TPC-H decision support benchmark. Note that we modified
|
-- Query #19 from the TPC-H decision support benchmark. Note that we modified
|
||||||
|
@ -88,7 +86,6 @@ WHERE
|
||||||
-- connect to the other node
|
-- connect to the other node
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1270000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #19 from the TPC-H decision support benchmark. Note that we modified
|
-- Query #19 from the TPC-H decision support benchmark. Note that we modified
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
-- we don't set citus.large_table_shard_count here, and instead use the default value
|
-- we don't set citus.large_table_shard_count here, and instead use the default value
|
||||||
-- coming from postgresql.conf or multi_task_tracker_executor.conf.
|
-- coming from postgresql.conf or multi_task_tracker_executor.conf.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1260000;
|
|
||||||
-- connect to the coordinator
|
-- connect to the coordinator
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -53,7 +52,6 @@ ORDER BY
|
||||||
-- connect one of the workers
|
-- connect one of the workers
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1260000;
|
|
||||||
SELECT
|
SELECT
|
||||||
l_orderkey,
|
l_orderkey,
|
||||||
sum(l_extendedprice * (1 - l_discount)) as revenue,
|
sum(l_extendedprice * (1 - l_discount)) as revenue,
|
||||||
|
@ -99,7 +97,6 @@ ORDER BY
|
||||||
-- connect to the other node
|
-- connect to the other node
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1260000;
|
|
||||||
SELECT
|
SELECT
|
||||||
l_orderkey,
|
l_orderkey,
|
||||||
sum(l_extendedprice * (1 - l_discount)) as revenue,
|
sum(l_extendedprice * (1 - l_discount)) as revenue,
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_MX_TPCH_QUERY6
|
-- MULTI_MX_TPCH_QUERY6
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
|
|
||||||
-- connect to the coordinator
|
-- connect to the coordinator
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
|
@ -25,7 +24,6 @@ WHERE
|
||||||
-- connect to one of the worker nodes
|
-- connect to one of the worker nodes
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #6 from the TPC-H decision support benchmark
|
-- Query #6 from the TPC-H decision support benchmark
|
||||||
|
@ -46,7 +44,6 @@ WHERE
|
||||||
-- connect to the other worker node
|
-- connect to the other worker node
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #6 from the TPC-H decision support benchmark
|
-- Query #6 from the TPC-H decision support benchmark
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_MX_TPCH_QUERY7
|
-- MULTI_MX_TPCH_QUERY7
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1230000;
|
|
||||||
-- connect to the coordinator
|
-- connect to the coordinator
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Change configuration to treat lineitem AND orders tables as large
|
-- Change configuration to treat lineitem AND orders tables as large
|
||||||
|
@ -55,7 +54,6 @@ ORDER BY
|
||||||
-- connect one of the workers
|
-- connect one of the workers
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1230000;
|
|
||||||
-- Change configuration to treat lineitem AND orders tables as large
|
-- Change configuration to treat lineitem AND orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #7 from the TPC-H decision support benchmark
|
-- Query #7 from the TPC-H decision support benchmark
|
||||||
|
@ -106,7 +104,6 @@ ORDER BY
|
||||||
-- connect to the other worker node
|
-- connect to the other worker node
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1230000;
|
|
||||||
-- Change configuration to treat lineitem AND orders tables as large
|
-- Change configuration to treat lineitem AND orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #7 from the TPC-H decision support benchmark
|
-- Query #7 from the TPC-H decision support benchmark
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_MX_TPCH_QUERY7_NESTED
|
-- MULTI_MX_TPCH_QUERY7_NESTED
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1240000;
|
|
||||||
-- connect to the coordinator
|
-- connect to the coordinator
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
-- Change configuration to treat lineitem AND orders tables AS large
|
-- Change configuration to treat lineitem AND orders tables AS large
|
||||||
|
@ -64,7 +63,6 @@ ORDER BY
|
||||||
-- connect to one of the workers
|
-- connect to one of the workers
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1240000;
|
|
||||||
-- Change configuration to treat lineitem AND orders tables AS large
|
-- Change configuration to treat lineitem AND orders tables AS large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #7 from the TPC-H benchmark; modified to include sub-selects
|
-- Query #7 from the TPC-H benchmark; modified to include sub-selects
|
||||||
|
@ -124,7 +122,6 @@ ORDER BY
|
||||||
-- connect to the coordinator
|
-- connect to the coordinator
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1240000;
|
|
||||||
-- Change configuration to treat lineitem AND orders tables AS large
|
-- Change configuration to treat lineitem AND orders tables AS large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #7 from the TPC-H benchmark; modified to include sub-selects
|
-- Query #7 from the TPC-H benchmark; modified to include sub-selects
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_NAME_LENGTHS
|
-- MULTI_NAME_LENGTHS
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 225000;
|
|
||||||
SET citus.multi_shard_commit_protocol = '2pc';
|
SET citus.multi_shard_commit_protocol = '2pc';
|
||||||
-- Verify that a table name > 56 characters gets hashed properly.
|
-- Verify that a table name > 56 characters gets hashed properly.
|
||||||
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
|
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
-- This test checks that we can handle null min/max values in shard statistics
|
-- This test checks that we can handle null min/max values in shard statistics
|
||||||
-- and that we don't partition or join prune shards that have null values.
|
-- and that we don't partition or join prune shards that have null values.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 760000;
|
|
||||||
SET client_min_messages TO DEBUG2;
|
SET client_min_messages TO DEBUG2;
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
-- Tests to verify that we correctly prune unreferenced shards. For this, we
|
-- Tests to verify that we correctly prune unreferenced shards. For this, we
|
||||||
-- need to increase the logging verbosity of messages displayed on the client.
|
-- need to increase the logging verbosity of messages displayed on the client.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 770000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 770000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 770000;
|
|
||||||
SET citus.explain_distributed_queries TO off;
|
SET citus.explain_distributed_queries TO off;
|
||||||
SET client_min_messages TO DEBUG2;
|
SET client_min_messages TO DEBUG2;
|
||||||
-- Adding additional l_orderkey = 1 to make this query not router executable
|
-- Adding additional l_orderkey = 1 to make this query not router executable
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
-- and converted into both plain SQL and PL/pgsql functions, which
|
-- and converted into both plain SQL and PL/pgsql functions, which
|
||||||
-- use prepared statements internally.
|
-- use prepared statements internally.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 780000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 780000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 780000;
|
|
||||||
CREATE FUNCTION plpgsql_test_1() RETURNS TABLE(count bigint) AS $$
|
CREATE FUNCTION plpgsql_test_1() RETURNS TABLE(count bigint) AS $$
|
||||||
DECLARE
|
DECLARE
|
||||||
BEGIN
|
BEGIN
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
-- taken from other regression test files and converted into
|
-- taken from other regression test files and converted into
|
||||||
-- prepared statements.
|
-- prepared statements.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 790000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 790000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 790000;
|
|
||||||
PREPARE prepared_test_1 AS
|
PREPARE prepared_test_1 AS
|
||||||
SELECT
|
SELECT
|
||||||
count(*)
|
count(*)
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 800000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 800000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 800000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- create test functions
|
-- create test functions
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -6,12 +6,11 @@
|
||||||
-- the resource owner should automatically clean up these intermediate query
|
-- the resource owner should automatically clean up these intermediate query
|
||||||
-- result files.
|
-- result files.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 810000;
|
SET citus.enable_unique_job_ids TO off;
|
||||||
BEGIN;
|
BEGIN;
|
||||||
-- pg_ls_dir() displays jobids. We explicitly set the jobId sequence
|
-- pg_ls_dir() displays jobids. We explicitly set the jobId sequence
|
||||||
-- here so that the regression output becomes independent of the
|
-- here so that the regression output becomes independent of the
|
||||||
-- number of jobs executed prior to running this test.
|
-- number of jobs executed prior to running this test.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250;
|
|
||||||
SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem;
|
SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem;
|
||||||
revenue
|
revenue
|
||||||
---------------
|
---------------
|
||||||
|
@ -218,26 +217,26 @@ FETCH 1 FROM c_19;
|
||||||
SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f;
|
SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f;
|
||||||
f
|
f
|
||||||
-----------------
|
-----------------
|
||||||
master_job_1256
|
master_job_0007
|
||||||
master_job_1257
|
master_job_0008
|
||||||
master_job_1258
|
master_job_0009
|
||||||
master_job_1259
|
master_job_0010
|
||||||
master_job_1260
|
master_job_0011
|
||||||
master_job_1261
|
master_job_0012
|
||||||
master_job_1262
|
master_job_0013
|
||||||
master_job_1263
|
master_job_0014
|
||||||
master_job_1264
|
master_job_0015
|
||||||
master_job_1265
|
master_job_0016
|
||||||
master_job_1266
|
master_job_0017
|
||||||
master_job_1267
|
master_job_0018
|
||||||
master_job_1268
|
master_job_0019
|
||||||
master_job_1269
|
master_job_0020
|
||||||
master_job_1270
|
master_job_0021
|
||||||
master_job_1271
|
master_job_0022
|
||||||
master_job_1272
|
master_job_0023
|
||||||
master_job_1273
|
master_job_0024
|
||||||
master_job_1274
|
master_job_0025
|
||||||
master_job_1275
|
master_job_0026
|
||||||
(20 rows)
|
(20 rows)
|
||||||
|
|
||||||
-- close first, 17th (first after re-alloc) and last cursor.
|
-- close first, 17th (first after re-alloc) and last cursor.
|
||||||
|
@ -247,23 +246,23 @@ CLOSE c_19;
|
||||||
SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f;
|
SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f;
|
||||||
f
|
f
|
||||||
-----------------
|
-----------------
|
||||||
master_job_1257
|
master_job_0008
|
||||||
master_job_1258
|
master_job_0009
|
||||||
master_job_1259
|
master_job_0010
|
||||||
master_job_1260
|
master_job_0011
|
||||||
master_job_1261
|
master_job_0012
|
||||||
master_job_1262
|
master_job_0013
|
||||||
master_job_1263
|
master_job_0014
|
||||||
master_job_1264
|
master_job_0015
|
||||||
master_job_1265
|
master_job_0016
|
||||||
master_job_1266
|
master_job_0017
|
||||||
master_job_1267
|
master_job_0018
|
||||||
master_job_1268
|
master_job_0019
|
||||||
master_job_1269
|
master_job_0020
|
||||||
master_job_1270
|
master_job_0021
|
||||||
master_job_1271
|
master_job_0022
|
||||||
master_job_1273
|
master_job_0024
|
||||||
master_job_1274
|
master_job_0025
|
||||||
(17 rows)
|
(17 rows)
|
||||||
|
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1250000;
|
|
||||||
CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp);
|
CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp);
|
||||||
-- insert some data, and make sure that cannot be create_distributed_table
|
-- insert some data, and make sure that cannot be create_distributed_table
|
||||||
INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01');
|
INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01');
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
--
|
--
|
||||||
-- Tests that check the metadata after master_remove_node.
|
-- Tests that check the metadata after master_remove_node.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1380000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1380000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1380000;
|
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1380000;
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1380000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1380000;
|
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1380000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1380000;
|
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1380000;
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 820000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 820000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 820000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- test shard repair functionality
|
-- test shard repair functionality
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_REPARTITION_UDT
|
-- MULTI_REPARTITION_UDT
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 535000;
|
|
||||||
-- START type creation
|
-- START type creation
|
||||||
CREATE TYPE test_udt AS (i integer, i2 integer);
|
CREATE TYPE test_udt AS (i integer, i2 integer);
|
||||||
-- ... as well as a function to use as its comparator...
|
-- ... as well as a function to use as its comparator...
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_REPARTITIONED_SUBQUERY_UDF
|
-- MULTI_REPARTITIONED_SUBQUERY_UDF
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 830000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 830000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 830000;
|
|
||||||
-- Create UDF in master and workers
|
-- Create UDF in master and workers
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
DROP FUNCTION IF EXISTS median(double precision[]);
|
DROP FUNCTION IF EXISTS median(double precision[]);
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
--
|
--
|
||||||
-- Tests that check the metadata returned by the master node.
|
-- Tests that check the metadata returned by the master node.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1370000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1370000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1370000;
|
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1370000;
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1370000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1370000;
|
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1370000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000;
|
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000;
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 840000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- test router planner functionality for single shard select queries
|
-- test router planner functionality for single shard select queries
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
@ -1873,8 +1872,7 @@ WARNING: relation "public.articles_append" does not exist
|
||||||
CONTEXT: while executing command on localhost:57638
|
CONTEXT: while executing command on localhost:57638
|
||||||
WARNING: relation "public.articles_append" does not exist
|
WARNING: relation "public.articles_append" does not exist
|
||||||
CONTEXT: while executing command on localhost:57638
|
CONTEXT: while executing command on localhost:57638
|
||||||
ERROR: failed to execute job 840026
|
ERROR: failed to execute task 2
|
||||||
DETAIL: Failure due to failed task 2
|
|
||||||
-- same query with where false but evaluation left to worker
|
-- same query with where false but evaluation left to worker
|
||||||
SELECT author_id FROM articles_append
|
SELECT author_id FROM articles_append
|
||||||
WHERE
|
WHERE
|
||||||
|
@ -1889,8 +1887,7 @@ WARNING: relation "public.articles_append" does not exist
|
||||||
CONTEXT: while executing command on localhost:57638
|
CONTEXT: while executing command on localhost:57638
|
||||||
WARNING: relation "public.articles_append" does not exist
|
WARNING: relation "public.articles_append" does not exist
|
||||||
CONTEXT: while executing command on localhost:57638
|
CONTEXT: while executing command on localhost:57638
|
||||||
ERROR: failed to execute job 840027
|
ERROR: failed to execute task 2
|
||||||
DETAIL: Failure due to failed task 2
|
|
||||||
-- same query on router planner with where false but evaluation left to worker
|
-- same query on router planner with where false but evaluation left to worker
|
||||||
SELECT author_id FROM articles_single_shard_hash
|
SELECT author_id FROM articles_single_shard_hash
|
||||||
WHERE
|
WHERE
|
||||||
|
@ -2026,6 +2023,26 @@ DEBUG: Plan is router executable
|
||||||
41 | 1 | aznavour | 11814
|
41 | 1 | aznavour | 11814
|
||||||
(5 rows)
|
(5 rows)
|
||||||
|
|
||||||
|
END;
|
||||||
|
-- queries inside read-only transactions can be router plannable
|
||||||
|
BEGIN;
|
||||||
|
SET TRANSACTION READ ONLY;
|
||||||
|
SELECT *
|
||||||
|
FROM articles_hash
|
||||||
|
WHERE author_id = 1
|
||||||
|
ORDER BY id;
|
||||||
|
DEBUG: predicate pruning for shardId 840001
|
||||||
|
DEBUG: Creating router plan
|
||||||
|
DEBUG: Plan is router executable
|
||||||
|
id | author_id | title | word_count
|
||||||
|
----+-----------+--------------+------------
|
||||||
|
1 | 1 | arsenous | 9572
|
||||||
|
11 | 1 | alamo | 1347
|
||||||
|
21 | 1 | arcading | 5890
|
||||||
|
31 | 1 | athwartships | 7271
|
||||||
|
41 | 1 | aznavour | 11814
|
||||||
|
(5 rows)
|
||||||
|
|
||||||
END;
|
END;
|
||||||
-- cursor queries are router plannable
|
-- cursor queries are router plannable
|
||||||
BEGIN;
|
BEGIN;
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_SCHEMA_SUPPORT
|
-- MULTI_SCHEMA_SUPPORT
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1190000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1190000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1190000;
|
|
||||||
-- create schema to test schema support
|
-- create schema to test schema support
|
||||||
CREATE SCHEMA test_schema_support;
|
CREATE SCHEMA test_schema_support;
|
||||||
-- test master_append_table_to_shard with schema
|
-- test master_append_table_to_shard with schema
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_SHARD_MODIFY
|
-- MULTI_SHARD_MODIFY
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 350000;
|
|
||||||
-- Create a new hash partitioned multi_shard_modify_test table and load data into it.
|
-- Create a new hash partitioned multi_shard_modify_test table and load data into it.
|
||||||
CREATE TABLE multi_shard_modify_test (
|
CREATE TABLE multi_shard_modify_test (
|
||||||
t_key integer not null,
|
t_key integer not null,
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 850000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 850000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 850000;
|
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
-- test end-to-end query functionality
|
-- test end-to-end query functionality
|
||||||
-- ===================================================================
|
-- ===================================================================
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
--
|
--
|
||||||
-- This test checks that we are able to run selected set of distributed SQL subqueries.
|
-- This test checks that we are able to run selected set of distributed SQL subqueries.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 860000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 860000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 860000;
|
|
||||||
SET citus.task_executor_type TO 'task-tracker';
|
SET citus.task_executor_type TO 'task-tracker';
|
||||||
select
|
select
|
||||||
number_sum,
|
number_sum,
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_SQL_FUNCTION
|
-- MULTI_SQL_FUNCTION
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1230000;
|
|
||||||
CREATE FUNCTION sql_test_no_1() RETURNS bigint AS '
|
CREATE FUNCTION sql_test_no_1() RETURNS bigint AS '
|
||||||
SELECT
|
SELECT
|
||||||
count(*)
|
count(*)
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
--
|
--
|
||||||
-- Tests around changing the schema and dropping of a distributed table
|
-- Tests around changing the schema and dropping of a distributed table
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 870000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 870000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 870000;
|
|
||||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||||
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||||
master_create_distributed_table
|
master_create_distributed_table
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_TASK_ASSIGNMENT
|
-- MULTI_TASK_ASSIGNMENT
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 880000;
|
|
||||||
SET citus.explain_distributed_queries TO off;
|
SET citus.explain_distributed_queries TO off;
|
||||||
-- Check that our policies for assigning tasks to worker nodes run as expected.
|
-- Check that our policies for assigning tasks to worker nodes run as expected.
|
||||||
-- To test this, we first create a shell table, and then manually insert shard
|
-- To test this, we first create a shell table, and then manually insert shard
|
||||||
|
@ -114,39 +113,11 @@ DEBUG: CommitTransactionCommand
|
||||||
explain statements for distributed queries are not enabled
|
explain statements for distributed queries are not enabled
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
-- Round-robin task assignment relies on the current jobId. We therefore need to
|
|
||||||
-- ensure that jobIds start with an odd number here; this way, task assignment
|
|
||||||
-- debug messages always produce the same output. Also, we make sure that the
|
|
||||||
-- following case statement always prints out "1" as the query's result.
|
|
||||||
SELECT case when (currval('pg_dist_jobid_seq') % 2) = 0
|
|
||||||
then nextval('pg_dist_jobid_seq') % 2
|
|
||||||
else 1 end;
|
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
case
|
|
||||||
------
|
|
||||||
1
|
|
||||||
(1 row)
|
|
||||||
|
|
||||||
-- Finally test the round-robin task assignment policy
|
-- Finally test the round-robin task assignment policy
|
||||||
SET citus.task_assignment_policy TO 'round-robin';
|
SET citus.task_assignment_policy TO 'round-robin';
|
||||||
DEBUG: StartTransactionCommand
|
DEBUG: StartTransactionCommand
|
||||||
DEBUG: ProcessUtility
|
DEBUG: ProcessUtility
|
||||||
DEBUG: CommitTransactionCommand
|
DEBUG: CommitTransactionCommand
|
||||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
|
||||||
DEBUG: StartTransactionCommand
|
|
||||||
DEBUG: ProcessUtility
|
|
||||||
DEBUG: assigned task 6 to node localhost:57637
|
|
||||||
DEBUG: assigned task 4 to node localhost:57637
|
|
||||||
DEBUG: assigned task 2 to node localhost:57638
|
|
||||||
DEBUG: CommitTransactionCommand
|
|
||||||
QUERY PLAN
|
|
||||||
-----------------------------------------------------------------------
|
|
||||||
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
|
||||||
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
|
||||||
explain statements for distributed queries are not enabled
|
|
||||||
(3 rows)
|
|
||||||
|
|
||||||
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
DEBUG: StartTransactionCommand
|
DEBUG: StartTransactionCommand
|
||||||
DEBUG: ProcessUtility
|
DEBUG: ProcessUtility
|
||||||
|
@ -175,6 +146,20 @@ DEBUG: CommitTransactionCommand
|
||||||
explain statements for distributed queries are not enabled
|
explain statements for distributed queries are not enabled
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
|
EXPLAIN SELECT count(*) FROM task_assignment_test_table;
|
||||||
|
DEBUG: StartTransactionCommand
|
||||||
|
DEBUG: ProcessUtility
|
||||||
|
DEBUG: assigned task 6 to node localhost:57638
|
||||||
|
DEBUG: assigned task 4 to node localhost:57638
|
||||||
|
DEBUG: assigned task 2 to node localhost:57637
|
||||||
|
DEBUG: CommitTransactionCommand
|
||||||
|
QUERY PLAN
|
||||||
|
-----------------------------------------------------------------------
|
||||||
|
Aggregate (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
-> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0)
|
||||||
|
explain statements for distributed queries are not enabled
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
RESET citus.task_assignment_policy;
|
RESET citus.task_assignment_policy;
|
||||||
DEBUG: StartTransactionCommand
|
DEBUG: StartTransactionCommand
|
||||||
DEBUG: ProcessUtility
|
DEBUG: ProcessUtility
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_TPCH_QUERY1
|
-- MULTI_TPCH_QUERY1
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 890000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 890000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 890000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #1 from the TPC-H decision support benchmark
|
-- Query #1 from the TPC-H decision support benchmark
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
-- we don't set citus.large_table_shard_count here, and instead use the default value
|
-- we don't set citus.large_table_shard_count here, and instead use the default value
|
||||||
-- coming from postgresql.conf or multi_task_tracker_executor.conf.
|
-- coming from postgresql.conf or multi_task_tracker_executor.conf.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 900000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 900000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 900000;
|
|
||||||
SELECT
|
SELECT
|
||||||
c_custkey,
|
c_custkey,
|
||||||
c_name,
|
c_name,
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_TPCH_QUERY12
|
-- MULTI_TPCH_QUERY12
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 910000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 910000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 910000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #12 from the TPC-H decision support benchmark
|
-- Query #12 from the TPC-H decision support benchmark
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_TPCH_QUERY14
|
-- MULTI_TPCH_QUERY14
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 920000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 920000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 920000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #14 from the TPC-H decision support benchmark
|
-- Query #14 from the TPC-H decision support benchmark
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_TPCH_QUERY19
|
-- MULTI_TPCH_QUERY19
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 930000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 930000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 930000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #19 from the TPC-H decision support benchmark. Note that we modified
|
-- Query #19 from the TPC-H decision support benchmark. Note that we modified
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
-- we don't set citus.large_table_shard_count here, and instead use the default value
|
-- we don't set citus.large_table_shard_count here, and instead use the default value
|
||||||
-- coming from postgresql.conf or multi_task_tracker_executor.conf.
|
-- coming from postgresql.conf or multi_task_tracker_executor.conf.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 940000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 940000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 940000;
|
|
||||||
SELECT
|
SELECT
|
||||||
l_orderkey,
|
l_orderkey,
|
||||||
sum(l_extendedprice * (1 - l_discount)) as revenue,
|
sum(l_extendedprice * (1 - l_discount)) as revenue,
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_TPCH_QUERY6
|
-- MULTI_TPCH_QUERY6
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 950000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 950000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 950000;
|
|
||||||
-- Change configuration to treat lineitem and orders tables as large
|
-- Change configuration to treat lineitem and orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #6 from the TPC-H decision support benchmark
|
-- Query #6 from the TPC-H decision support benchmark
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_TPCH_QUERY7
|
-- MULTI_TPCH_QUERY7
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 970000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 970000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 970000;
|
|
||||||
-- Change configuration to treat lineitem AND orders tables as large
|
-- Change configuration to treat lineitem AND orders tables as large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #7 from the TPC-H decision support benchmark
|
-- Query #7 from the TPC-H decision support benchmark
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_TPCH_QUERY7_NESTED
|
-- MULTI_TPCH_QUERY7_NESTED
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 960000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 960000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 960000;
|
|
||||||
-- Change configuration to treat lineitem AND orders tables AS large
|
-- Change configuration to treat lineitem AND orders tables AS large
|
||||||
SET citus.large_table_shard_count TO 2;
|
SET citus.large_table_shard_count TO 2;
|
||||||
-- Query #7 from the TPC-H benchmark; modified to include sub-selects
|
-- Query #7 from the TPC-H benchmark; modified to include sub-selects
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1220000;
|
|
||||||
-- Tests for prepared transaction recovery
|
-- Tests for prepared transaction recovery
|
||||||
-- Ensure pg_dist_transaction is empty for test
|
-- Ensure pg_dist_transaction is empty for test
|
||||||
SELECT recover_prepared_transactions();
|
SELECT recover_prepared_transactions();
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
--
|
--
|
||||||
-- Tests that check the metadata returned by the master node.
|
-- Tests that check the metadata returned by the master node.
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1410000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1410000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1410000;
|
|
||||||
SET citus.shard_count TO 4;
|
SET citus.shard_count TO 4;
|
||||||
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK
|
-- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK
|
||||||
CREATE TABLE transactional_drop_shards(column1 int);
|
CREATE TABLE transactional_drop_shards(column1 int);
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
-- MULTI_TRUNCATE
|
-- MULTI_TRUNCATE
|
||||||
--
|
--
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1210000;
|
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1210000;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1210000;
|
|
||||||
--
|
--
|
||||||
-- truncate for append distribution
|
-- truncate for append distribution
|
||||||
-- expect all shards to be dropped
|
-- expect all shards to be dropped
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue