mirror of https://github.com/citusdata/citus.git
Fix typos. Spurred spotting "connectios" in logs
parent
3bdbfc3edf
commit
cc50682158
|
@ -23,7 +23,7 @@ they are often moved to files that are named after the command.
|
|||
| `table.c` | |
|
||||
| `transmit.c` | Implementation of `COPY` commands with `format transmit` set in the options. This format is used to transfer files from one node to another node |
|
||||
| `truncate.c` | Implementation of `TRUNCATE` commands on distributed tables |
|
||||
| `utility_hook.c` | This is the entry point from postgres into the commands module of citus. It contains the implementation that gets registered in postgres' `ProcessUtility_hook` callback to extends the functionality of the original ProcessUtility. This code is used to route the incomming commands to their respective implementation in Citus |
|
||||
| `utility_hook.c` | This is the entry point from postgres into the commands module of citus. It contains the implementation that gets registered in postgres' `ProcessUtility_hook` callback to extends the functionality of the original ProcessUtility. This code is used to route the incoming commands to their respective implementation in Citus |
|
||||
| `vacuum.c` | Implementation of `VACUUM` commands on distributed tables |
|
||||
|
||||
# COPY
|
||||
|
|
|
@ -455,7 +455,7 @@ AlterCollationSchemaStmtObjectAddress(Node *node, bool missing_ok)
|
|||
* EnsureSequentialModeForCollationDDL makes sure that the current transaction is already in
|
||||
* sequential mode, or can still safely be put in sequential mode, it errors if that is
|
||||
* not possible. The error contains information for the user to retry the transaction with
|
||||
* sequential mode set from the begining.
|
||||
* sequential mode set from the beginning.
|
||||
*
|
||||
* As collations are node scoped objects there exists only 1 instance of the collation used by
|
||||
* potentially multiple shards. To make sure all shards in the transaction can interact
|
||||
|
|
|
@ -260,7 +260,7 @@ DistributeFunctionColocatedWithDistributedTable(RegProcedure funcOid,
|
|||
{
|
||||
/*
|
||||
* cannot provide colocate_with without distribution_arg_name when the function
|
||||
* is not collocated with a reference table
|
||||
* is not colocated with a reference table
|
||||
*/
|
||||
if (pg_strncasecmp(colocateWithTableName, "default", NAMEDATALEN) != 0)
|
||||
{
|
||||
|
@ -1487,7 +1487,7 @@ PreprocessDropFunctionStmt(Node *node, const char *queryString,
|
|||
{
|
||||
/*
|
||||
* extensions should be created separately on the workers, types cascading from an
|
||||
* extension should therefor not be propagated here.
|
||||
* extension should therefore not be propagated here.
|
||||
*/
|
||||
return NIL;
|
||||
}
|
||||
|
@ -1588,7 +1588,7 @@ PreprocessAlterFunctionDependsStmt(Node *node, const char *queryString,
|
|||
{
|
||||
/*
|
||||
* extensions should be created separately on the workers, types cascading from an
|
||||
* extension should therefor not be propagated here.
|
||||
* extension should therefore not be propagated here.
|
||||
*/
|
||||
return NIL;
|
||||
}
|
||||
|
|
|
@ -630,7 +630,7 @@ GetRoleNameFromDbRoleSetting(HeapTuple tuple, TupleDesc DbRoleSettingDescription
|
|||
|
||||
|
||||
/*
|
||||
* MakeSetStatementArgs parses a configuraton value and creates an List of A_Const
|
||||
* MakeSetStatementArgs parses a configuration value and creates an List of A_Const
|
||||
* Nodes with appropriate types.
|
||||
*
|
||||
* The allowed A_Const types are Integer, Float, and String.
|
||||
|
|
|
@ -253,7 +253,7 @@ FilterDistributedSchemas(List *schemas)
|
|||
* EnsureSequentialModeForSchemaDDL makes sure that the current transaction is already in
|
||||
* sequential mode, or can still safely be put in sequential mode, it errors if that is
|
||||
* not possible. The error contains information for the user to retry the transaction with
|
||||
* sequential mode set from the begining.
|
||||
* sequential mode set from the beginning.
|
||||
*
|
||||
* Copy-pasted from type.c
|
||||
*/
|
||||
|
|
|
@ -234,7 +234,7 @@ PreprocessDropSequenceStmt(Node *node, const char *queryString,
|
|||
{
|
||||
/*
|
||||
* extensions should be created separately on the workers, sequences cascading
|
||||
* from an extension should therefor not be propagated here.
|
||||
* from an extension should therefore not be propagated here.
|
||||
*/
|
||||
return NIL;
|
||||
}
|
||||
|
|
|
@ -1276,7 +1276,7 @@ ConvertPostgresLocalTablesToCitusLocalTables(AlterTableStmt *alterTableStatement
|
|||
bool cascade = true;
|
||||
|
||||
/*
|
||||
* Withoud this check, we would be erroring out in CreateCitusLocalTable
|
||||
* Without this check, we would be erroring out in CreateCitusLocalTable
|
||||
* for this case anyway. The purpose of this check&error is to provide
|
||||
* a more meaningful message for the user.
|
||||
*/
|
||||
|
|
|
@ -424,7 +424,7 @@ AcquireDistributedLockOnRelations(List *relationIdList, LOCKMODE lockMode)
|
|||
const char *lockModeText = LockModeToLockModeText(lockMode);
|
||||
|
||||
/*
|
||||
* We want to acquire locks in the same order accross the nodes.
|
||||
* We want to acquire locks in the same order across the nodes.
|
||||
* Although relation ids may change, their ordering will not.
|
||||
*/
|
||||
relationIdList = SortList(relationIdList, CompareOids);
|
||||
|
|
|
@ -316,7 +316,7 @@ PostprocessCreateEnumStmt(Node *node, const char *queryString)
|
|||
*
|
||||
* Since it is an alter of an existing type we actually have the ObjectAddress. This is
|
||||
* used to check if the type is distributed, if so the alter will be executed on the
|
||||
* workers directly to keep the types in sync accross the cluster.
|
||||
* workers directly to keep the types in sync across the cluster.
|
||||
*/
|
||||
List *
|
||||
PreprocessAlterEnumStmt(Node *node, const char *queryString,
|
||||
|
@ -1125,7 +1125,7 @@ MakeTypeNameFromRangeVar(const RangeVar *relation)
|
|||
* EnsureSequentialModeForTypeDDL makes sure that the current transaction is already in
|
||||
* sequential mode, or can still safely be put in sequential mode, it errors if that is
|
||||
* not possible. The error contains information for the user to retry the transaction with
|
||||
* sequential mode set from the begining.
|
||||
* sequential mode set from the beginning.
|
||||
*
|
||||
* As types are node scoped objects there exists only 1 instance of the type used by
|
||||
* potentially multiple shards. To make sure all shards in the transaction can interact
|
||||
|
|
|
@ -948,7 +948,7 @@ FinishConnectionListEstablishment(List *multiConnectionList)
|
|||
if (eventCount == 0)
|
||||
{
|
||||
/*
|
||||
* timeout has occured on waitset, double check the timeout since
|
||||
* timeout has occurred on waitset, double check the timeout since
|
||||
* connectionStart and if passed close all non-finished connections
|
||||
*/
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
* For COPY command, we use this fact to reserve connections to the remote nodes
|
||||
* in the same order as the adaptive executor in order to prevent any resource
|
||||
* starvations. We need to do this because COPY establishes connections when it
|
||||
* recieves a tuple that targets a remote node. This is a valuable optimization
|
||||
* receives a tuple that targets a remote node. This is a valuable optimization
|
||||
* to prevent unnecessary connection establishments, which are pretty expensive.
|
||||
* Instead, COPY command can reserve connections upfront, and utilize them when
|
||||
* they are actually needed.
|
||||
|
@ -191,7 +191,7 @@ InitializeLocallyReservedSharedConnections(void)
|
|||
uint32 hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE);
|
||||
|
||||
SessionLocalReservedConnections =
|
||||
hash_create("citus session level reserved connectios (host,port,database,user)",
|
||||
hash_create("citus session level reserved connections (host,port,database,user)",
|
||||
64, &reservedConnectionInfo, hashFlags);
|
||||
}
|
||||
|
||||
|
|
|
@ -5199,7 +5199,7 @@ RebuildWaitEventSetFlags(WaitEventSet *waitEventSet, List *sessionList)
|
|||
* ModifyWaitEvent may throw hard errors. For example, when the underlying
|
||||
* socket for a connection is closed by the remote server and already
|
||||
* reflected by the OS, however Citus hasn't had a chance to get this
|
||||
* information. In that case, if repliction factor is >1, Citus can
|
||||
* information. In that case, if replication factor is >1, Citus can
|
||||
* failover to other nodes for executing the query. Even if replication
|
||||
* factor = 1, Citus can give much nicer errors.
|
||||
*
|
||||
|
|
|
@ -649,7 +649,7 @@ ExecuteLocalTaskPlan(PlannedStmt *taskPlan, char *queryString,
|
|||
int localPlacementIndex = 0;
|
||||
|
||||
/*
|
||||
* Use the tupleStore provided by the scanState because it is shared accross
|
||||
* Use the tupleStore provided by the scanState because it is shared across
|
||||
* the other task executions and the adaptive executor.
|
||||
*
|
||||
* Also note that as long as the tupleDest is provided, local execution always
|
||||
|
|
|
@ -322,7 +322,7 @@ ReturnTupleFromTuplestore(CitusScanState *scanState)
|
|||
{
|
||||
/*
|
||||
* If there is a very selective qual on the Citus Scan node we might block
|
||||
* interupts for a longer time if we would not check for interrupts in this loop
|
||||
* interrupts for a longer time if we would not check for interrupts in this loop
|
||||
*/
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
|
|
|
@ -788,7 +788,7 @@ FollowNewSupportedDependencies(ObjectAddressCollector *collector,
|
|||
|
||||
/*
|
||||
* If the object is already distributed it is not a `new` object that needs to be
|
||||
* distributed before we create a dependant object
|
||||
* distributed before we create a dependent object
|
||||
*/
|
||||
if (IsObjectDistributed(&address))
|
||||
{
|
||||
|
|
|
@ -239,7 +239,7 @@ StartMetadataSyncToNode(const char *nodeNameString, int32 nodePort)
|
|||
* EnsureSequentialModeMetadataOperations makes sure that the current transaction is
|
||||
* already in sequential mode, or can still safely be put in sequential mode,
|
||||
* it errors if that is not possible. The error contains information for the user to
|
||||
* retry the transaction with sequential mode set from the begining.
|
||||
* retry the transaction with sequential mode set from the beginning.
|
||||
*
|
||||
* Metadata objects (e.g., distributed table on the workers) exists only 1 instance of
|
||||
* the type used by potentially multiple other shards/connections. To make sure all
|
||||
|
|
|
@ -234,7 +234,7 @@ DropShards(Oid relationId, char *schemaName, char *relationName,
|
|||
UseCoordinatedTransaction();
|
||||
|
||||
/*
|
||||
* We will use below variable accross this function to decide if we can
|
||||
* We will use below variable across this function to decide if we can
|
||||
* use local execution
|
||||
*/
|
||||
int32 localGroupId = GetLocalGroupId();
|
||||
|
|
|
@ -614,7 +614,7 @@ GetColocatedRebalanceSteps(List *placementUpdateList)
|
|||
/*
|
||||
* AcquireColocationLock tries to acquire a lock for rebalance/replication. If
|
||||
* this is it not possible it fails instantly because this means another
|
||||
* rebalance/repliction is currently happening. This would really mess up
|
||||
* rebalance/replication is currently happening. This would really mess up
|
||||
* planning.
|
||||
*/
|
||||
static void
|
||||
|
|
|
@ -1957,7 +1957,7 @@ ReorderTaskPlacementsByTaskAssignmentPolicy(Job *job,
|
|||
* because the user is trying to distributed the load across nodes via
|
||||
* round-robin policy. Otherwise, the local execution would prioritize
|
||||
* executing the local tasks and especially for reference tables on the
|
||||
* coordinator this would prevent load balancing accross nodes.
|
||||
* coordinator this would prevent load balancing across nodes.
|
||||
*
|
||||
* For other worker nodes in Citus MX, we let the local execution to kick-in
|
||||
* even for round-robin policy, that's because we expect the clients to evenly
|
||||
|
|
|
@ -1413,7 +1413,7 @@ RegisterCitusConfigVariables(void)
|
|||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.override_table_visibility",
|
||||
gettext_noop("Enables replacing occurencens of pg_catalog.pg_table_visible() "
|
||||
gettext_noop("Enables replacing occurrrences of pg_catalog.pg_table_visible() "
|
||||
"with pg_catalog.citus_table_visible()"),
|
||||
gettext_noop("When enabled, shards on the Citus MX worker (data) nodes would be "
|
||||
"filtered out by many psql commands to provide better user "
|
||||
|
|
|
@ -11,7 +11,7 @@ DECLARE
|
|||
schema_name_text name;
|
||||
table_name_text name;
|
||||
|
||||
-- record for to-be-created parttion
|
||||
-- record for to-be-created partition
|
||||
missing_partition_record record;
|
||||
|
||||
-- result indiciates whether any partitions were created
|
||||
|
|
|
@ -11,7 +11,7 @@ DECLARE
|
|||
schema_name_text name;
|
||||
table_name_text name;
|
||||
|
||||
-- record for to-be-created parttion
|
||||
-- record for to-be-created partition
|
||||
missing_partition_record record;
|
||||
|
||||
-- result indiciates whether any partitions were created
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* citus_dist_stat_activity.c
|
||||
*
|
||||
* This file contains functions for monitoring the distributed transactions
|
||||
* accross the cluster.
|
||||
* across the cluster.
|
||||
*
|
||||
* Copyright (c) Citus Data, Inc.
|
||||
*
|
||||
|
|
|
@ -673,7 +673,7 @@ GetRelationAccessMode(Oid relationId, ShardPlacementAccessType accessType)
|
|||
* of the relation accesses.
|
||||
*
|
||||
* In many cases, we'd only need IsMultiStatementTransaction(), however, for some
|
||||
* cases such as CTEs, where Citus uses the same connections accross multiple queries,
|
||||
* cases such as CTEs, where Citus uses the same connections across multiple queries,
|
||||
* we should still record the relation accesses even not inside an explicit transaction
|
||||
* block. Thus, keeping track of the relation accesses inside coordinated transactions
|
||||
* is also required.
|
||||
|
|
|
@ -83,11 +83,11 @@ SafeStringToInt64(const char *str)
|
|||
}
|
||||
else if ((errno == ERANGE && number == LLONG_MIN) || number < INT64_MIN)
|
||||
{
|
||||
ereport(ERROR, (errmsg("Error parsing %s as int64, underflow occured\n", str)));
|
||||
ereport(ERROR, (errmsg("Error parsing %s as int64, underflow occurred\n", str)));
|
||||
}
|
||||
else if ((errno == ERANGE && number == LLONG_MAX) || number > INT64_MAX)
|
||||
{
|
||||
ereport(ERROR, (errmsg("Error parsing %s as int64, overflow occured\n", str)));
|
||||
ereport(ERROR, (errmsg("Error parsing %s as int64, overflow occurred\n", str)));
|
||||
}
|
||||
else if (errno == EINVAL)
|
||||
{
|
||||
|
@ -130,7 +130,7 @@ SafeStringToUint64(const char *str)
|
|||
}
|
||||
else if ((errno == ERANGE && number == ULLONG_MAX) || number > UINT64_MAX)
|
||||
{
|
||||
ereport(ERROR, (errmsg("Error parsing %s as uint64, overflow occured\n", str)));
|
||||
ereport(ERROR, (errmsg("Error parsing %s as uint64, overflow occurred\n", str)));
|
||||
}
|
||||
else if (errno == EINVAL)
|
||||
{
|
||||
|
|
|
@ -196,7 +196,7 @@ GloballyReloadConfig()
|
|||
/*
|
||||
* ShouldUseAutoSSL checks if citus should enable ssl based on the connection settings it
|
||||
* uses for outward connections. When the outward connection is configured to require ssl
|
||||
* it assumes the other nodes in the network have the same setting and therefor it will
|
||||
* it assumes the other nodes in the network have the same setting and therefore it will
|
||||
* automatically enable ssl during installation.
|
||||
*/
|
||||
static bool
|
||||
|
|
|
@ -63,7 +63,7 @@ alter_role_if_exists(PG_FUNCTION_ARGS)
|
|||
* If the role does exist it will run the query provided in alter_role_utility_query to
|
||||
* change the existing user in such a way that it is compatible with the user on the
|
||||
* coordinator. This query is expected to be a AlterRoleStmt, if a different statement is
|
||||
* provdided the function will raise an error.
|
||||
* provided the function will raise an error.
|
||||
*
|
||||
* For both queries a NULL value can be passed to omit the execution of that condition.
|
||||
*
|
||||
|
|
|
@ -275,7 +275,7 @@ ReplaceTableVisibleFunction(Node *inputNode)
|
|||
|
||||
|
||||
/*
|
||||
* ReplaceTableVisibleFunction replaces all occurences of
|
||||
* ReplaceTableVisibleFunction replaces all occurrences of
|
||||
* pg_catalog.pg_table_visible() to
|
||||
* pg_catalog.citus_table_visible() in the given input node.
|
||||
*
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include "distributed/multi_physical_planner.h"
|
||||
#include "distributed/worker_manager.h"
|
||||
|
||||
/* Adaptive executor repartioning related defines */
|
||||
/* Adaptive executor repartitioning related defines */
|
||||
#define WORKER_CREATE_SCHEMA_QUERY "SELECT worker_create_schema (" UINT64_FORMAT ", %s);"
|
||||
#define WORKER_REPARTITION_CLEANUP_QUERY "SELECT worker_repartition_cleanup (" \
|
||||
UINT64_FORMAT \
|
||||
|
|
|
@ -94,7 +94,7 @@ INSERT INTO order_line SELECT c, c, c, c, c, NULL, c, c, c, 'abc' FROM generate_
|
|||
INSERT INTO stock SELECT c, c, c, c, c, c, 'abc', c, c, c, c, c, c, c, c, c, c FROM generate_series(1, 5) as c;
|
||||
INSERT INTO item SELECT c, 'abc', c, 'abc', c FROM generate_series(1, 3) as c;
|
||||
INSERT INTO item SELECT 10+c, 'abc', c, 'abc', c FROM generate_series(1, 3) as c;
|
||||
-- Subquery + repartion is supported when it is an IN query where the subquery
|
||||
-- Subquery + repartition is supported when it is an IN query where the subquery
|
||||
-- returns unique results (because it's converted to an INNER JOIN)
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -109,7 +109,7 @@ select s_i_id
|
|||
3
|
||||
(3 rows)
|
||||
|
||||
-- Subquery + repartion is not supported when it is an IN query where the
|
||||
-- Subquery + repartition is not supported when it is an IN query where the
|
||||
-- subquery doesn't return unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -117,7 +117,7 @@ select s_i_id
|
|||
s_i_id in (select i_im_id from item)
|
||||
AND s_i_id = ol_i_id;
|
||||
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
|
||||
-- Subquery + repartion is supported when it is a NOT IN query where the subquery
|
||||
-- Subquery + repartition is supported when it is a NOT IN query where the subquery
|
||||
-- returns unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -125,7 +125,7 @@ select s_i_id
|
|||
s_i_id not in (select i_id from item)
|
||||
AND s_i_id = ol_i_id;
|
||||
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
|
||||
-- Subquery + repartion is not supported when it is a NOT IN where the subquery
|
||||
-- Subquery + repartition is not supported when it is a NOT IN where the subquery
|
||||
-- doesn't return unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -144,7 +144,7 @@ select s_i_id
|
|||
---------------------------------------------------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Subquery + repartion is not supported when it contains both an IN and a NOT IN
|
||||
-- Subquery + repartition is not supported when it contains both an IN and a NOT IN
|
||||
-- where both subqueries return unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -153,7 +153,7 @@ select s_i_id
|
|||
AND s_i_id not in (select i_id from item)
|
||||
AND s_i_id = ol_i_id;
|
||||
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
|
||||
-- Subquery + repartion is not supported when it contains both an IN and a NOT IN
|
||||
-- Subquery + repartition is not supported when it contains both an IN and a NOT IN
|
||||
-- where the IN subquery returns unique results and the NOT IN returns non unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -162,7 +162,7 @@ select s_i_id
|
|||
AND s_i_id not in (select i_im_id from item)
|
||||
AND s_i_id = ol_i_id;
|
||||
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
|
||||
-- Subquery + repartion is not supported when it contains both an IN and a NOT IN
|
||||
-- Subquery + repartition is not supported when it contains both an IN and a NOT IN
|
||||
-- where the IN subquery returns non unique results and the NOT IN returns unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -171,7 +171,7 @@ select s_i_id
|
|||
AND s_i_id not in (select i_id from item)
|
||||
AND s_i_id = ol_i_id;
|
||||
ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns
|
||||
-- Subquery + repartion is not supported when it contains both an IN and a NOT IN
|
||||
-- Subquery + repartition is not supported when it contains both an IN and a NOT IN
|
||||
-- where both subqueries return non unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
SET search_path to "ch benchmarks";
|
||||
-- Subquery + repartion is supported when it is an IN query where the subquery
|
||||
-- Subquery + repartition is supported when it is an IN query where the subquery
|
||||
-- returns unique results (because it's converted to an INNER JOIN)
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
|
|
@ -282,7 +282,7 @@ LIMIT 1;
|
|||
7 | 1 | 7 | 1
|
||||
(1 row)
|
||||
|
||||
-- Check repartion joins are supported
|
||||
-- Check repartition joins are supported
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x, t2.x, t1.y, t2.y;
|
||||
x | y | x | y
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -228,7 +228,7 @@ ORDER BY foo_inner_1.tenant_id;
|
|||
|
||||
RESET citus.enable_repartition_joins;
|
||||
-- there is a lateral join (e.g., correlated subquery) thus the subqueries cannot be
|
||||
-- recursively planned, this one can not be planned by the repartion planner
|
||||
-- recursively planned, this one can not be planned by the repartition planner
|
||||
-- because of the IN query on a non unique column
|
||||
UPDATE
|
||||
second_distributed_table
|
||||
|
|
|
@ -37,7 +37,7 @@ CREATE TABLE local(c int, d int);
|
|||
INSERT INTO test VALUES (1, 2), (3, 4), (5, 6), (2, 7), (4, 5);
|
||||
INSERT INTO ref VALUES (1, 2), (5, 6), (7, 8);
|
||||
INSERT INTO local VALUES (1, 2), (3, 4), (7, 8);
|
||||
-- Check repartion joins are supported
|
||||
-- Check repartition joins are supported
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
x | y | x | y
|
||||
|
|
|
@ -72,7 +72,7 @@ create table events (
|
|||
);
|
||||
create index event_time_idx on events using BRIN (event_time);
|
||||
create index event_json_idx on events using gin(payload);
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't collocate on correctly on org_id
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't colocate on correctly on org_id
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -306,8 +306,8 @@ Aggregate (actual rows=1 loops=1)
|
|||
Merge Task Count: 4
|
||||
-- Confirm repartiton join in distributed subplan works
|
||||
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off)
|
||||
WITH repartion AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
|
||||
SELECT count(*) from repartion;
|
||||
WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
|
||||
SELECT count(*) from repartition;
|
||||
Custom Scan (Citus Adaptive) (actual rows=1 loops=1)
|
||||
-> Distributed Subplan XXX_1
|
||||
Intermediate Data Size: 14 bytes
|
||||
|
|
|
@ -66,7 +66,7 @@ CREATE INDEX lineitem_partkey_desc_index ON public.lineitem (l_partkey DESC);
|
|||
CREATE INDEX lineitem_partial_index ON public.lineitem (l_shipdate)
|
||||
WHERE l_shipdate < '1995-01-01';
|
||||
CREATE INDEX lineitem_colref_index ON public.lineitem (record_ne(lineitem.*, NULL));
|
||||
SET client_min_messages = ERROR; -- avoid version dependant warning about WAL
|
||||
SET client_min_messages = ERROR; -- avoid version dependent warning about WAL
|
||||
CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey);
|
||||
CREATE UNIQUE INDEX index_test_range_index_a ON index_test_range(a);
|
||||
CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range(a,b);
|
||||
|
|
|
@ -933,7 +933,7 @@ SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1;
|
|||
25 | 10-10-2010 |
|
||||
(2 rows)
|
||||
|
||||
-- perform operations on partition and partioned tables together
|
||||
-- perform operations on partition and partitioned tables together
|
||||
INSERT INTO partitioning_test VALUES(26, '2010-02-02', 26);
|
||||
INSERT INTO partitioning_test_2010 VALUES(26, '2010-02-02', 26);
|
||||
COPY partitioning_test FROM STDIN WITH CSV;
|
||||
|
|
|
@ -875,7 +875,7 @@ SET citus.subquery_pushdown to ON;
|
|||
NOTICE: Setting citus.subquery_pushdown flag is discouraged becuase it forces the planner to pushdown certain queries, skipping relevant correctness checks.
|
||||
DETAIL: When enabled, the planner skips many correctness checks for subqueries and pushes down the queries to shards as-is. It means that the queries are likely to return wrong results unless the user is absolutely sure that pushing down the subquery is safe. This GUC is maintained only for backward compatibility, no new users are supposed to use it. The planner is capable of pushing down as much computation as possible to the shards depending on the query.
|
||||
-- multi-subquery-join
|
||||
-- The first query has filters on partion column to make it router plannable
|
||||
-- The first query has filters on partition column to make it router plannable
|
||||
-- but it is processed by logical planner since we disabled router execution
|
||||
SELECT
|
||||
e1.user_id,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
-- ===================================================================
|
||||
-- test recursive planning functionality for non-colocated subqueries
|
||||
-- We prefered to use EXPLAIN almost all the queries here,
|
||||
-- We preferred to use EXPLAIN almost all the queries here,
|
||||
-- otherwise the execution time of so many repartition queries would
|
||||
-- be too high for the regression tests. Also, note that we're mostly
|
||||
-- interested in recurive planning side of the things, thus supressing
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
-- ===================================================================
|
||||
-- test recursive planning functionality for non-colocated subqueries
|
||||
-- We prefered to use EXPLAIN almost all the queries here,
|
||||
-- We preferred to use EXPLAIN almost all the queries here,
|
||||
-- otherwise the execution time of so many repartition queries would
|
||||
-- be too high for the regression tests. Also, note that we're mostly
|
||||
-- interested in recurive planning side of the things, thus supressing
|
||||
|
|
|
@ -953,7 +953,7 @@ SELECT * FROM ref, local WHERE a = c ORDER BY a;
|
|||
7 | 8 | 7 | 8
|
||||
(2 rows)
|
||||
|
||||
-- Check repartion joins are supported
|
||||
-- Check repartition joins are supported
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
x | y | x | y
|
||||
|
|
|
@ -92,7 +92,7 @@ create table events (
|
|||
);
|
||||
create index event_time_idx on events using BRIN (event_time);
|
||||
create index event_json_idx on events using gin(payload);
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't collocate on correctly on org_id
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't colocate on correctly on org_id
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -579,7 +579,7 @@ DEBUG: CTE cte_1 is going to be inlined via distributed planning
|
|||
3
|
||||
(1 row)
|
||||
|
||||
-- prepared statements with volatile functtions should be still pushed down
|
||||
-- prepared statements with volatile functions should be still pushed down
|
||||
-- because the function is evaluated on the coordinator
|
||||
CREATE OR REPLACE FUNCTION fixed_volatile_value() RETURNS integer VOLATILE AS $$
|
||||
BEGIN
|
||||
|
|
|
@ -78,7 +78,7 @@ INSERT INTO item SELECT c, 'abc', c, 'abc', c FROM generate_series(1, 3) as c;
|
|||
INSERT INTO item SELECT 10+c, 'abc', c, 'abc', c FROM generate_series(1, 3) as c;
|
||||
|
||||
|
||||
-- Subquery + repartion is supported when it is an IN query where the subquery
|
||||
-- Subquery + repartition is supported when it is an IN query where the subquery
|
||||
-- returns unique results (because it's converted to an INNER JOIN)
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -88,7 +88,7 @@ select s_i_id
|
|||
order by s_i_id;
|
||||
|
||||
|
||||
-- Subquery + repartion is not supported when it is an IN query where the
|
||||
-- Subquery + repartition is not supported when it is an IN query where the
|
||||
-- subquery doesn't return unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -96,7 +96,7 @@ select s_i_id
|
|||
s_i_id in (select i_im_id from item)
|
||||
AND s_i_id = ol_i_id;
|
||||
|
||||
-- Subquery + repartion is supported when it is a NOT IN query where the subquery
|
||||
-- Subquery + repartition is supported when it is a NOT IN query where the subquery
|
||||
-- returns unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -104,7 +104,7 @@ select s_i_id
|
|||
s_i_id not in (select i_id from item)
|
||||
AND s_i_id = ol_i_id;
|
||||
|
||||
-- Subquery + repartion is not supported when it is a NOT IN where the subquery
|
||||
-- Subquery + repartition is not supported when it is a NOT IN where the subquery
|
||||
-- doesn't return unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -120,7 +120,7 @@ select s_i_id
|
|||
s_i_id in (select i_id from item)
|
||||
AND s_i_id not in (select i_im_id from item);
|
||||
|
||||
-- Subquery + repartion is not supported when it contains both an IN and a NOT IN
|
||||
-- Subquery + repartition is not supported when it contains both an IN and a NOT IN
|
||||
-- where both subqueries return unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -129,7 +129,7 @@ select s_i_id
|
|||
AND s_i_id not in (select i_id from item)
|
||||
AND s_i_id = ol_i_id;
|
||||
|
||||
-- Subquery + repartion is not supported when it contains both an IN and a NOT IN
|
||||
-- Subquery + repartition is not supported when it contains both an IN and a NOT IN
|
||||
-- where the IN subquery returns unique results and the NOT IN returns non unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -139,7 +139,7 @@ select s_i_id
|
|||
AND s_i_id = ol_i_id;
|
||||
|
||||
|
||||
-- Subquery + repartion is not supported when it contains both an IN and a NOT IN
|
||||
-- Subquery + repartition is not supported when it contains both an IN and a NOT IN
|
||||
-- where the IN subquery returns non unique results and the NOT IN returns unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
@ -148,7 +148,7 @@ select s_i_id
|
|||
AND s_i_id not in (select i_id from item)
|
||||
AND s_i_id = ol_i_id;
|
||||
|
||||
-- Subquery + repartion is not supported when it contains both an IN and a NOT IN
|
||||
-- Subquery + repartition is not supported when it contains both an IN and a NOT IN
|
||||
-- where both subqueries return non unique results
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
SET search_path to "ch benchmarks";
|
||||
|
||||
-- Subquery + repartion is supported when it is an IN query where the subquery
|
||||
-- Subquery + repartition is supported when it is an IN query where the subquery
|
||||
-- returns unique results (because it's converted to an INNER JOIN)
|
||||
select s_i_id
|
||||
from stock, order_line
|
||||
|
|
|
@ -141,7 +141,7 @@ WHERE
|
|||
ORDER BY 1 DESC, 2 DESC, 3 DESC, 4 DESC
|
||||
LIMIT 1;
|
||||
|
||||
-- Check repartion joins are supported
|
||||
-- Check repartition joins are supported
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x, t2.x, t1.y, t2.y;
|
||||
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ RESET citus.enable_repartition_joins;
|
|||
|
||||
|
||||
-- there is a lateral join (e.g., correlated subquery) thus the subqueries cannot be
|
||||
-- recursively planned, this one can not be planned by the repartion planner
|
||||
-- recursively planned, this one can not be planned by the repartition planner
|
||||
-- because of the IN query on a non unique column
|
||||
UPDATE
|
||||
second_distributed_table
|
||||
|
|
|
@ -22,7 +22,7 @@ INSERT INTO test VALUES (1, 2), (3, 4), (5, 6), (2, 7), (4, 5);
|
|||
INSERT INTO ref VALUES (1, 2), (5, 6), (7, 8);
|
||||
INSERT INTO local VALUES (1, 2), (3, 4), (7, 8);
|
||||
|
||||
-- Check repartion joins are supported
|
||||
-- Check repartition joins are supported
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
SET citus.enable_single_hash_repartition_joins TO ON;
|
||||
|
|
|
@ -54,7 +54,7 @@ create table events (
|
|||
);
|
||||
create index event_time_idx on events using BRIN (event_time);
|
||||
create index event_json_idx on events using gin(payload);
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't collocate on correctly on org_id
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't colocate on correctly on org_id
|
||||
|
||||
create table local_data(
|
||||
id bigserial primary key
|
||||
|
|
|
@ -98,8 +98,8 @@ SET LOCAL citus.enable_repartition_joins TO true;
|
|||
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off) SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b;
|
||||
-- Confirm repartiton join in distributed subplan works
|
||||
EXPLAIN (COSTS off, ANALYZE on, TIMING off, SUMMARY off)
|
||||
WITH repartion AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
|
||||
SELECT count(*) from repartion;
|
||||
WITH repartition AS (SELECT count(*) FROM t1, t2 WHERE t1.a=t2.b)
|
||||
SELECT count(*) from repartition;
|
||||
END;
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ CREATE INDEX lineitem_partial_index ON public.lineitem (l_shipdate)
|
|||
|
||||
CREATE INDEX lineitem_colref_index ON public.lineitem (record_ne(lineitem.*, NULL));
|
||||
|
||||
SET client_min_messages = ERROR; -- avoid version dependant warning about WAL
|
||||
SET client_min_messages = ERROR; -- avoid version dependent warning about WAL
|
||||
CREATE INDEX lineitem_orderkey_hash_index ON public.lineitem USING hash (l_partkey);
|
||||
CREATE UNIQUE INDEX index_test_range_index_a ON index_test_range(a);
|
||||
CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range(a,b);
|
||||
|
|
|
@ -565,7 +565,7 @@ UPDATE partitioning_test SET time = '2010-10-10' WHERE id = 25;
|
|||
-- see the data is updated
|
||||
SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1;
|
||||
|
||||
-- perform operations on partition and partioned tables together
|
||||
-- perform operations on partition and partitioned tables together
|
||||
INSERT INTO partitioning_test VALUES(26, '2010-02-02', 26);
|
||||
INSERT INTO partitioning_test_2010 VALUES(26, '2010-02-02', 26);
|
||||
COPY partitioning_test FROM STDIN WITH CSV;
|
||||
|
|
|
@ -725,7 +725,7 @@ GROUP BY
|
|||
SET citus.subquery_pushdown to ON;
|
||||
|
||||
-- multi-subquery-join
|
||||
-- The first query has filters on partion column to make it router plannable
|
||||
-- The first query has filters on partition column to make it router plannable
|
||||
-- but it is processed by logical planner since we disabled router execution
|
||||
SELECT
|
||||
e1.user_id,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
-- ===================================================================
|
||||
-- test recursive planning functionality for non-colocated subqueries
|
||||
-- We prefered to use EXPLAIN almost all the queries here,
|
||||
-- We preferred to use EXPLAIN almost all the queries here,
|
||||
-- otherwise the execution time of so many repartition queries would
|
||||
-- be too high for the regression tests. Also, note that we're mostly
|
||||
-- interested in recurive planning side of the things, thus supressing
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
-- ===================================================================
|
||||
-- test recursive planning functionality for non-colocated subqueries
|
||||
-- We prefered to use EXPLAIN almost all the queries here,
|
||||
-- We preferred to use EXPLAIN almost all the queries here,
|
||||
-- otherwise the execution time of so many repartition queries would
|
||||
-- be too high for the regression tests. Also, note that we're mostly
|
||||
-- interested in recurive planning side of the things, thus supressing
|
||||
|
|
|
@ -529,7 +529,7 @@ SELECT count(*) FROM local;
|
|||
SELECT * FROM local ORDER BY c;
|
||||
SELECT * FROM ref, local WHERE a = c ORDER BY a;
|
||||
|
||||
-- Check repartion joins are supported
|
||||
-- Check repartition joins are supported
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM test t1, test t2 WHERE t1.x = t2.y ORDER BY t1.x;
|
||||
SET citus.enable_single_hash_repartition_joins TO ON;
|
||||
|
|
|
@ -77,7 +77,7 @@ create table events (
|
|||
);
|
||||
create index event_time_idx on events using BRIN (event_time);
|
||||
create index event_json_idx on events using gin(payload);
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't collocate on correctly on org_id
|
||||
select create_distributed_table('events', 'user_id'); -- on purpose don't colocate on correctly on org_id
|
||||
|
||||
create table local_data(
|
||||
id bigserial primary key
|
||||
|
|
|
@ -373,7 +373,7 @@ EXECUTE test_values_pushdown(1,2,3);
|
|||
EXECUTE test_values_pushdown(1,2,3);
|
||||
EXECUTE test_values_pushdown(1,2,3);
|
||||
|
||||
-- prepared statements with volatile functtions should be still pushed down
|
||||
-- prepared statements with volatile functions should be still pushed down
|
||||
-- because the function is evaluated on the coordinator
|
||||
CREATE OR REPLACE FUNCTION fixed_volatile_value() RETURNS integer VOLATILE AS $$
|
||||
BEGIN
|
||||
|
|
Loading…
Reference in New Issue