mirror of https://github.com/citusdata/citus.git
Merge 2c73c7695a
into 73c41210c5
commit
e7048cdc0b
|
@ -100,7 +100,7 @@ SET search_path = 'pg_catalog';
|
|||
|
||||
/* master_* functions */
|
||||
|
||||
CREATE FUNCTION master_get_table_metadata(relation_name text, OUT logical_relid oid,
|
||||
CREATE FUNCTION get_table_metadata(relation_name text, OUT logical_relid oid,
|
||||
OUT part_storage_type "char",
|
||||
OUT part_method "char", OUT part_key text,
|
||||
OUT part_replica_count integer,
|
||||
|
@ -108,91 +108,91 @@ CREATE FUNCTION master_get_table_metadata(relation_name text, OUT logical_relid
|
|||
OUT part_placement_policy integer)
|
||||
RETURNS record
|
||||
LANGUAGE C STABLE STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_get_table_metadata$$;
|
||||
COMMENT ON FUNCTION master_get_table_metadata(relation_name text)
|
||||
AS 'MODULE_PATHNAME', $$get_table_metadata$$;
|
||||
COMMENT ON FUNCTION get_table_metadata(relation_name text)
|
||||
IS 'fetch metadata values for the table';
|
||||
|
||||
CREATE FUNCTION master_get_table_ddl_events(text)
|
||||
CREATE FUNCTION get_table_ddl_events(text)
|
||||
RETURNS SETOF text
|
||||
LANGUAGE C STRICT ROWS 100
|
||||
AS 'MODULE_PATHNAME', $$master_get_table_ddl_events$$;
|
||||
COMMENT ON FUNCTION master_get_table_ddl_events(text)
|
||||
AS 'MODULE_PATHNAME', $$get_table_ddl_events$$;
|
||||
COMMENT ON FUNCTION get_table_ddl_events(text)
|
||||
IS 'fetch set of ddl statements for the table';
|
||||
|
||||
CREATE FUNCTION master_get_new_shardid()
|
||||
CREATE FUNCTION get_new_shardid()
|
||||
RETURNS bigint
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_get_new_shardid$$;
|
||||
COMMENT ON FUNCTION master_get_new_shardid()
|
||||
AS 'MODULE_PATHNAME', $$get_new_shardid$$;
|
||||
COMMENT ON FUNCTION get_new_shardid()
|
||||
IS 'fetch unique shardId';
|
||||
|
||||
CREATE FUNCTION master_get_local_first_candidate_nodes(OUT node_name text,
|
||||
CREATE FUNCTION get_local_first_candidate_nodes(OUT node_name text,
|
||||
OUT node_port bigint)
|
||||
RETURNS SETOF record
|
||||
LANGUAGE C STRICT ROWS 100
|
||||
AS 'MODULE_PATHNAME', $$master_get_local_first_candidate_nodes$$;
|
||||
COMMENT ON FUNCTION master_get_local_first_candidate_nodes()
|
||||
AS 'MODULE_PATHNAME', $$get_local_first_candidate_nodes$$;
|
||||
COMMENT ON FUNCTION get_local_first_candidate_nodes()
|
||||
IS 'fetch set of candidate nodes for shard uploading choosing the local node first';
|
||||
|
||||
CREATE FUNCTION master_create_empty_shard(text)
|
||||
CREATE FUNCTION create_empty_shard(text)
|
||||
RETURNS bigint
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_create_empty_shard$$;
|
||||
COMMENT ON FUNCTION master_create_empty_shard(text)
|
||||
AS 'MODULE_PATHNAME', $$create_empty_shard$$;
|
||||
COMMENT ON FUNCTION create_empty_shard(text)
|
||||
IS 'create an empty shard and shard placements for the table';
|
||||
|
||||
CREATE FUNCTION master_append_table_to_shard(bigint, text, text, integer)
|
||||
CREATE FUNCTION append_table_to_shard(bigint, text, text, integer)
|
||||
RETURNS real
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_append_table_to_shard$$;
|
||||
COMMENT ON FUNCTION master_append_table_to_shard(bigint, text, text, integer)
|
||||
AS 'MODULE_PATHNAME', $$append_table_to_shard$$;
|
||||
COMMENT ON FUNCTION append_table_to_shard(bigint, text, text, integer)
|
||||
IS 'append given table to all shard placements and update metadata';
|
||||
|
||||
CREATE FUNCTION master_drop_all_shards(logicalrelid regclass,
|
||||
CREATE FUNCTION drop_all_shards(logicalrelid regclass,
|
||||
schema_name text,
|
||||
table_name text)
|
||||
RETURNS integer
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_drop_all_shards$$;
|
||||
COMMENT ON FUNCTION master_drop_all_shards(regclass, text, text)
|
||||
AS 'MODULE_PATHNAME', $$drop_all_shards$$;
|
||||
COMMENT ON FUNCTION drop_all_shards(regclass, text, text)
|
||||
IS 'drop all shards in a relation and update metadata';
|
||||
|
||||
CREATE FUNCTION master_apply_delete_command(text)
|
||||
CREATE FUNCTION apply_delete_command(text)
|
||||
RETURNS integer
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_apply_delete_command$$;
|
||||
COMMENT ON FUNCTION master_apply_delete_command(text)
|
||||
AS 'MODULE_PATHNAME', $$apply_delete_command$$;
|
||||
COMMENT ON FUNCTION apply_delete_command(text)
|
||||
IS 'drop shards matching delete criteria and update metadata';
|
||||
|
||||
CREATE FUNCTION master_get_active_worker_nodes(OUT node_name text, OUT node_port bigint)
|
||||
CREATE FUNCTION get_active_worker_nodes(OUT node_name text, OUT node_port bigint)
|
||||
RETURNS SETOF record
|
||||
LANGUAGE C STRICT ROWS 100
|
||||
AS 'MODULE_PATHNAME', $$master_get_active_worker_nodes$$;
|
||||
COMMENT ON FUNCTION master_get_active_worker_nodes()
|
||||
AS 'MODULE_PATHNAME', $$get_active_worker_nodes$$;
|
||||
COMMENT ON FUNCTION get_active_worker_nodes()
|
||||
IS 'fetch set of active worker nodes';
|
||||
|
||||
CREATE FUNCTION master_get_round_robin_candidate_nodes(shard_id bigint,
|
||||
CREATE FUNCTION get_round_robin_candidate_nodes(shard_id bigint,
|
||||
OUT node_name text,
|
||||
OUT node_port bigint)
|
||||
RETURNS SETOF record
|
||||
LANGUAGE C STRICT ROWS 100
|
||||
AS 'MODULE_PATHNAME', $$master_get_round_robin_candidate_nodes$$;
|
||||
COMMENT ON FUNCTION master_get_round_robin_candidate_nodes(shard_id bigint)
|
||||
AS 'MODULE_PATHNAME', $$get_round_robin_candidate_nodes$$;
|
||||
COMMENT ON FUNCTION get_round_robin_candidate_nodes(shard_id bigint)
|
||||
IS 'fetch set of candidate nodes for shard uploading in round-robin manner';
|
||||
|
||||
CREATE FUNCTION master_create_distributed_table(table_name regclass,
|
||||
CREATE FUNCTION create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
RETURNS void
|
||||
LANGUAGE C STRICT
|
||||
AS 'MODULE_PATHNAME', $$master_create_distributed_table$$;
|
||||
COMMENT ON FUNCTION master_create_distributed_table(table_name regclass,
|
||||
AS 'MODULE_PATHNAME', $$create_distributed_table$$;
|
||||
COMMENT ON FUNCTION create_distributed_table(table_name regclass,
|
||||
distribution_column text,
|
||||
distribution_method citus.distribution_type)
|
||||
IS 'define the table distribution functions';
|
||||
|
||||
-- define shard creation function for hash-partitioned tables
|
||||
CREATE FUNCTION master_create_worker_shards(table_name text, shard_count integer,
|
||||
CREATE FUNCTION create_worker_shards(table_name text, shard_count integer,
|
||||
replication_factor integer DEFAULT 2)
|
||||
RETURNS void
|
||||
AS 'MODULE_PATHNAME'
|
||||
|
@ -341,7 +341,7 @@ BEGIN
|
|||
END IF;
|
||||
|
||||
-- ensure all shards are dropped
|
||||
PERFORM master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
PERFORM drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name);
|
||||
|
||||
-- delete partition entry
|
||||
DELETE FROM pg_dist_partition WHERE logicalrelid = v_obj.objid;
|
||||
|
@ -352,18 +352,18 @@ $cdbdt$;
|
|||
COMMENT ON FUNCTION citus_drop_trigger()
|
||||
IS 'perform checks and actions at the end of DROP actions';
|
||||
|
||||
CREATE FUNCTION master_dist_partition_cache_invalidate()
|
||||
CREATE FUNCTION dist_partition_cache_invalidate()
|
||||
RETURNS trigger
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', $$master_dist_partition_cache_invalidate$$;
|
||||
COMMENT ON FUNCTION master_dist_partition_cache_invalidate()
|
||||
AS 'MODULE_PATHNAME', $$dist_partition_cache_invalidate$$;
|
||||
COMMENT ON FUNCTION dist_partition_cache_invalidate()
|
||||
IS 'register relcache invalidation for changed rows';
|
||||
|
||||
CREATE FUNCTION master_dist_shard_cache_invalidate()
|
||||
CREATE FUNCTION dist_shard_cache_invalidate()
|
||||
RETURNS trigger
|
||||
LANGUAGE C
|
||||
AS 'MODULE_PATHNAME', $$master_dist_shard_cache_invalidate$$;
|
||||
COMMENT ON FUNCTION master_dist_shard_cache_invalidate()
|
||||
AS 'MODULE_PATHNAME', $$dist_shard_cache_invalidate$$;
|
||||
COMMENT ON FUNCTION dist_shard_cache_invalidate()
|
||||
IS 'register relcache invalidation for changed rows';
|
||||
|
||||
|
||||
|
@ -388,12 +388,12 @@ CREATE EVENT TRIGGER citus_cascade_to_partition
|
|||
CREATE TRIGGER dist_partition_cache_invalidate
|
||||
AFTER INSERT OR UPDATE OR DELETE
|
||||
ON pg_catalog.pg_dist_partition
|
||||
FOR EACH ROW EXECUTE PROCEDURE master_dist_partition_cache_invalidate();
|
||||
FOR EACH ROW EXECUTE PROCEDURE dist_partition_cache_invalidate();
|
||||
|
||||
CREATE TRIGGER dist_shard_cache_invalidate
|
||||
AFTER INSERT OR UPDATE OR DELETE
|
||||
ON pg_catalog.pg_dist_shard
|
||||
FOR EACH ROW EXECUTE PROCEDURE master_dist_shard_cache_invalidate();
|
||||
FOR EACH ROW EXECUTE PROCEDURE dist_shard_cache_invalidate();
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
|
@ -485,7 +485,7 @@ COMMENT ON FUNCTION create_insert_proxy_for_table(regclass, regclass)
|
|||
IS 'create a proxy table that redirects INSERTed rows to a target table';
|
||||
|
||||
-- define shard repair function
|
||||
CREATE FUNCTION master_copy_shard_placement(shard_id bigint,
|
||||
CREATE FUNCTION copy_shard_placement(shard_id bigint,
|
||||
source_node_name text,
|
||||
source_node_port integer,
|
||||
target_node_name text,
|
||||
|
|
|
@ -51,11 +51,11 @@ static Oid SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId,
|
|||
|
||||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(master_create_distributed_table);
|
||||
PG_FUNCTION_INFO_V1(create_distributed_table);
|
||||
|
||||
|
||||
/*
|
||||
* master_create_distributed_table accepts a table, distribution column and
|
||||
* create_distributed_table accepts a table, distribution column and
|
||||
* method and performs the corresponding catalog changes.
|
||||
*
|
||||
* XXX: We should perform more checks here to see if this table is fit for
|
||||
|
@ -66,7 +66,7 @@ PG_FUNCTION_INFO_V1(master_create_distributed_table);
|
|||
* preexisting content.
|
||||
*/
|
||||
Datum
|
||||
master_create_distributed_table(PG_FUNCTION_ARGS)
|
||||
create_distributed_table(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid distributedRelationId = PG_GETARG_OID(0);
|
||||
text *distributionColumnText = PG_GETARG_TEXT_P(1);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* master_create_shards.c
|
||||
* create_shards.c
|
||||
*
|
||||
* This file contains functions to distribute a table by creating shards for it
|
||||
* across a set of worker nodes.
|
||||
|
@ -54,11 +54,11 @@ static text * IntegerToText(int32 value);
|
|||
|
||||
|
||||
/* declarations for dynamic loading */
|
||||
PG_FUNCTION_INFO_V1(master_create_worker_shards);
|
||||
PG_FUNCTION_INFO_V1(create_worker_shards);
|
||||
|
||||
|
||||
/*
|
||||
* master_create_worker_shards creates empty shards for the given table based
|
||||
* create_worker_shards creates empty shards for the given table based
|
||||
* on the specified number of initial shards. The function first gets a list of
|
||||
* candidate nodes and issues DDL commands on the nodes to create empty shard
|
||||
* placements on those nodes. The function then updates metadata on the master
|
||||
|
@ -67,7 +67,7 @@ PG_FUNCTION_INFO_V1(master_create_worker_shards);
|
|||
* ranges for each shard, giving them an equal split of the hash space.
|
||||
*/
|
||||
Datum
|
||||
master_create_worker_shards(PG_FUNCTION_ARGS)
|
||||
create_worker_shards(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *tableNameText = PG_GETARG_TEXT_P(0);
|
||||
int32 shardCount = PG_GETARG_INT32(1);
|
||||
|
@ -163,7 +163,7 @@ master_create_worker_shards(PG_FUNCTION_ARGS)
|
|||
text *maxHashTokenText = NULL;
|
||||
int32 shardMinHashToken = INT32_MIN + (shardIndex * hashTokenIncrement);
|
||||
int32 shardMaxHashToken = shardMinHashToken + (hashTokenIncrement - 1);
|
||||
Datum shardIdDatum = master_get_new_shardid(NULL);
|
||||
Datum shardIdDatum = get_new_shardid(NULL);
|
||||
int64 shardId = DatumGetInt64(shardIdDatum);
|
||||
|
||||
/* if we are at the last shard, make sure the max token value is INT_MAX */
|
||||
|
|
|
@ -57,12 +57,12 @@ static bool ExecuteRemoteCommand(const char *nodeName, uint32 nodePort,
|
|||
|
||||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(master_apply_delete_command);
|
||||
PG_FUNCTION_INFO_V1(master_drop_all_shards);
|
||||
PG_FUNCTION_INFO_V1(apply_delete_command);
|
||||
PG_FUNCTION_INFO_V1(drop_all_shards);
|
||||
|
||||
|
||||
/*
|
||||
* master_apply_delete_command takes in a delete command, finds shards that
|
||||
* apply_delete_command takes in a delete command, finds shards that
|
||||
* match the criteria defined in the delete command, drops the found shards from
|
||||
* the worker nodes, and updates the corresponding metadata on the master node.
|
||||
* This function drops a shard if and only if all rows in the shard satisfy
|
||||
|
@ -75,7 +75,7 @@ PG_FUNCTION_INFO_V1(master_drop_all_shards);
|
|||
* even though related shard placements are not deleted.
|
||||
*/
|
||||
Datum
|
||||
master_apply_delete_command(PG_FUNCTION_ARGS)
|
||||
apply_delete_command(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *queryText = PG_GETARG_TEXT_P(0);
|
||||
char *queryString = text_to_cstring(queryText);
|
||||
|
@ -98,7 +98,7 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
|
|||
bool failOK = false;
|
||||
bool isTopLevel = true;
|
||||
|
||||
PreventTransactionChain(isTopLevel, "master_apply_delete_command");
|
||||
PreventTransactionChain(isTopLevel, "apply_delete_command");
|
||||
|
||||
queryTreeNode = ParseTreeNode(queryString);
|
||||
if (!IsA(queryTreeNode, DeleteStmt))
|
||||
|
@ -161,12 +161,12 @@ master_apply_delete_command(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_drop_shards attempts to drop all shards for a given relation.
|
||||
* Unlike master_apply_delete_command, this function can be called even
|
||||
* drop_shards attempts to drop all shards for a given relation.
|
||||
* Unlike apply_delete_command, this function can be called even
|
||||
* if the table has already been dropped.
|
||||
*/
|
||||
Datum
|
||||
master_drop_all_shards(PG_FUNCTION_ARGS)
|
||||
drop_all_shards(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid relationId = PG_GETARG_OID(0);
|
||||
text *schemaNameText = PG_GETARG_TEXT_P(1);
|
||||
|
|
|
@ -58,22 +58,22 @@ static Datum WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescripto
|
|||
|
||||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(master_get_table_metadata);
|
||||
PG_FUNCTION_INFO_V1(master_get_table_ddl_events);
|
||||
PG_FUNCTION_INFO_V1(master_get_new_shardid);
|
||||
PG_FUNCTION_INFO_V1(master_get_local_first_candidate_nodes);
|
||||
PG_FUNCTION_INFO_V1(master_get_round_robin_candidate_nodes);
|
||||
PG_FUNCTION_INFO_V1(master_get_active_worker_nodes);
|
||||
PG_FUNCTION_INFO_V1(get_table_metadata);
|
||||
PG_FUNCTION_INFO_V1(get_table_ddl_events);
|
||||
PG_FUNCTION_INFO_V1(get_new_shardid);
|
||||
PG_FUNCTION_INFO_V1(get_local_first_candidate_nodes);
|
||||
PG_FUNCTION_INFO_V1(get_round_robin_candidate_nodes);
|
||||
PG_FUNCTION_INFO_V1(get_active_worker_nodes);
|
||||
|
||||
|
||||
/*
|
||||
* master_get_table_metadata takes in a relation name, and returns partition
|
||||
* get_table_metadata takes in a relation name, and returns partition
|
||||
* related metadata for the relation. These metadata are grouped and returned in
|
||||
* a tuple, and are used by the caller when creating new shards. The function
|
||||
* errors if given relation does not exist, or is not partitioned.
|
||||
*/
|
||||
Datum
|
||||
master_get_table_metadata(PG_FUNCTION_ARGS)
|
||||
get_table_metadata(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *relationName = PG_GETARG_TEXT_P(0);
|
||||
Oid relationId = ResolveRelationId(relationName);
|
||||
|
@ -174,13 +174,13 @@ CStoreTable(Oid relationId)
|
|||
|
||||
|
||||
/*
|
||||
* master_get_table_ddl_events takes in a relation name, and returns the set of
|
||||
* get_table_ddl_events takes in a relation name, and returns the set of
|
||||
* DDL commands needed to reconstruct the relation. The returned DDL commands
|
||||
* are similar in flavor to schema definitions that pgdump returns. The function
|
||||
* errors if given relation does not exist.
|
||||
*/
|
||||
Datum
|
||||
master_get_table_ddl_events(PG_FUNCTION_ARGS)
|
||||
get_table_ddl_events(PG_FUNCTION_ARGS)
|
||||
{
|
||||
FuncCallContext *functionContext = NULL;
|
||||
ListCell *tableDDLEventCell = NULL;
|
||||
|
@ -240,7 +240,7 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_get_new_shardid allocates and returns a unique shardId for the shard
|
||||
* get_new_shardid allocates and returns a unique shardId for the shard
|
||||
* to be created. This allocation occurs both in shared memory and in write
|
||||
* ahead logs; writing to logs avoids the risk of having shardId collisions.
|
||||
*
|
||||
|
@ -249,7 +249,7 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS)
|
|||
* on an internal sequence created in initdb to generate unique identifiers.
|
||||
*/
|
||||
Datum
|
||||
master_get_new_shardid(PG_FUNCTION_ARGS)
|
||||
get_new_shardid(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *sequenceName = cstring_to_text(SHARDID_SEQUENCE_NAME);
|
||||
Oid sequenceId = ResolveRelationId(sequenceName);
|
||||
|
@ -264,7 +264,7 @@ master_get_new_shardid(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_get_local_first_candidate_nodes returns a set of candidate host names
|
||||
* get_local_first_candidate_nodes returns a set of candidate host names
|
||||
* and port numbers on which to place new shards. The function makes sure to
|
||||
* always allocate the first candidate node as the node the caller is connecting
|
||||
* from; and allocates additional nodes until the shard replication factor is
|
||||
|
@ -273,7 +273,7 @@ master_get_new_shardid(PG_FUNCTION_ARGS)
|
|||
* replication factor.
|
||||
*/
|
||||
Datum
|
||||
master_get_local_first_candidate_nodes(PG_FUNCTION_ARGS)
|
||||
get_local_first_candidate_nodes(PG_FUNCTION_ARGS)
|
||||
{
|
||||
FuncCallContext *functionContext = NULL;
|
||||
uint32 desiredNodeCount = 0;
|
||||
|
@ -380,14 +380,14 @@ master_get_local_first_candidate_nodes(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_get_round_robin_candidate_nodes returns a set of candidate host names
|
||||
* get_round_robin_candidate_nodes returns a set of candidate host names
|
||||
* and port numbers on which to place new shards. The function uses the round
|
||||
* robin policy to choose the nodes and tries to ensure that there is an even
|
||||
* distribution of shards across the worker nodes. This function errors out if
|
||||
* the number of available nodes falls short of the replication factor.
|
||||
*/
|
||||
Datum
|
||||
master_get_round_robin_candidate_nodes(PG_FUNCTION_ARGS)
|
||||
get_round_robin_candidate_nodes(PG_FUNCTION_ARGS)
|
||||
{
|
||||
uint64 shardId = PG_GETARG_INT64(0);
|
||||
FuncCallContext *functionContext = NULL;
|
||||
|
@ -464,12 +464,12 @@ master_get_round_robin_candidate_nodes(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_get_active_worker_nodes returns a set of active worker host names and
|
||||
* get_active_worker_nodes returns a set of active worker host names and
|
||||
* port numbers in deterministic order. Currently we assume that all worker
|
||||
* nodes in pg_worker_list.conf are active.
|
||||
*/
|
||||
Datum
|
||||
master_get_active_worker_nodes(PG_FUNCTION_ARGS)
|
||||
get_active_worker_nodes(PG_FUNCTION_ARGS)
|
||||
{
|
||||
FuncCallContext *functionContext = NULL;
|
||||
uint32 workerNodeIndex = 0;
|
||||
|
|
|
@ -45,11 +45,11 @@ static bool CopyDataFromFinalizedPlacement(Oid distributedTableId, int64 shardId
|
|||
|
||||
|
||||
/* declarations for dynamic loading */
|
||||
PG_FUNCTION_INFO_V1(master_copy_shard_placement);
|
||||
PG_FUNCTION_INFO_V1(copy_shard_placement);
|
||||
|
||||
|
||||
/*
|
||||
* master_copy_shard_placement implements a user-facing UDF to copy data from
|
||||
* copy_shard_placement implements a user-facing UDF to copy data from
|
||||
* a healthy (source) node to an inactive (target) node. To accomplish this it
|
||||
* entirely recreates the table structure before copying all data. During this
|
||||
* time all modifications are paused to the shard. After successful repair, the
|
||||
|
@ -58,7 +58,7 @@ PG_FUNCTION_INFO_V1(master_copy_shard_placement);
|
|||
* in an unhealthy state.
|
||||
*/
|
||||
Datum
|
||||
master_copy_shard_placement(PG_FUNCTION_ARGS)
|
||||
copy_shard_placement(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int64 shardId = PG_GETARG_INT64(0);
|
||||
text *sourceNodeName = PG_GETARG_TEXT_P(1);
|
||||
|
|
|
@ -52,19 +52,19 @@ static StringInfo WorkerPartitionValue(char *nodeName, uint32 nodePort, Oid rela
|
|||
|
||||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(master_create_empty_shard);
|
||||
PG_FUNCTION_INFO_V1(master_append_table_to_shard);
|
||||
PG_FUNCTION_INFO_V1(create_empty_shard);
|
||||
PG_FUNCTION_INFO_V1(append_table_to_shard);
|
||||
|
||||
|
||||
/*
|
||||
* master_create_empty_shard creates an empty shard for the given distributed
|
||||
* create_empty_shard creates an empty shard for the given distributed
|
||||
* table. For this, the function first gets a list of candidate nodes, connects
|
||||
* to these nodes, and issues DDL commands on the nodes to create empty shard
|
||||
* placements. The function then updates metadata on the master node to make
|
||||
* this shard (and its placements) visible.
|
||||
*/
|
||||
Datum
|
||||
master_create_empty_shard(PG_FUNCTION_ARGS)
|
||||
create_empty_shard(PG_FUNCTION_ARGS)
|
||||
{
|
||||
text *relationNameText = PG_GETARG_TEXT_P(0);
|
||||
char *relationName = text_to_cstring(relationNameText);
|
||||
|
@ -99,7 +99,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
|
|||
}
|
||||
|
||||
/* generate new and unique shardId from sequence */
|
||||
shardIdDatum = master_get_new_shardid(NULL);
|
||||
shardIdDatum = get_new_shardid(NULL);
|
||||
shardId = DatumGetInt64(shardIdDatum);
|
||||
|
||||
/* get table DDL commands to replay on the worker node */
|
||||
|
@ -137,7 +137,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_append_table_to_shard appends the given table's contents to the given
|
||||
* append_table_to_shard appends the given table's contents to the given
|
||||
* shard, and updates shard metadata on the master node. If the function fails
|
||||
* to append table data to all shard placements, it doesn't update any metadata
|
||||
* and errors out. Else if the function fails to append table data to some of
|
||||
|
@ -145,7 +145,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
|
|||
* placements will get cleaned up during shard rebalancing.
|
||||
*/
|
||||
Datum
|
||||
master_append_table_to_shard(PG_FUNCTION_ARGS)
|
||||
append_table_to_shard(PG_FUNCTION_ARGS)
|
||||
{
|
||||
uint64 shardId = PG_GETARG_INT64(0);
|
||||
text *sourceTableNameText = PG_GETARG_TEXT_P(1);
|
||||
|
@ -208,7 +208,7 @@ master_append_table_to_shard(PG_FUNCTION_ARGS)
|
|||
{
|
||||
ereport(ERROR, (errmsg("could not find any shard placements for shardId "
|
||||
UINT64_FORMAT, shardId),
|
||||
errhint("Try running master_create_empty_shard() first")));
|
||||
errhint("Try running create_empty_shard() first")));
|
||||
}
|
||||
|
||||
/* issue command to append table to each shard placement */
|
||||
|
|
|
@ -524,7 +524,7 @@ DistributedModifyShardInterval(Query *query)
|
|||
errmsg("could not find any shards for modification"),
|
||||
errdetail("No shards exist for distributed table \"%s\".",
|
||||
relationName),
|
||||
errhint("Run master_create_worker_shards to create shards "
|
||||
errhint("Run create_worker_shards to create shards "
|
||||
"and try again.")));
|
||||
}
|
||||
|
||||
|
|
|
@ -182,7 +182,7 @@ RegisterCitusConfigVariables(void)
|
|||
NormalizeWorkerListPath();
|
||||
|
||||
DefineCustomBoolVariable(
|
||||
"citus.binary_master_copy_format",
|
||||
"citus.binary_copy_format",
|
||||
gettext_noop("Use the binary master copy format."),
|
||||
gettext_noop("When enabled, data is copied from workers to the master "
|
||||
"in PostgreSQL's binary serialization format."),
|
||||
|
|
|
@ -276,7 +276,7 @@ create_monolithic_shard_row(PG_FUNCTION_ARGS)
|
|||
Oid distributedTableId = PG_GETARG_OID(0);
|
||||
StringInfo minInfo = makeStringInfo();
|
||||
StringInfo maxInfo = makeStringInfo();
|
||||
Datum newShardIdDatum = master_get_new_shardid(NULL);
|
||||
Datum newShardIdDatum = get_new_shardid(NULL);
|
||||
int64 newShardId = DatumGetInt64(newShardIdDatum);
|
||||
text *maxInfoText = NULL;
|
||||
text *minInfoText = NULL;
|
||||
|
|
|
@ -59,8 +59,8 @@ static void CachedRelationLookup(const char *relationName, Oid *cachedOid);
|
|||
|
||||
|
||||
/* exports for SQL callable functions */
|
||||
PG_FUNCTION_INFO_V1(master_dist_partition_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(master_dist_shard_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(dist_partition_cache_invalidate);
|
||||
PG_FUNCTION_INFO_V1(dist_shard_cache_invalidate);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -439,12 +439,12 @@ CitusExtraDataContainerFuncId(void)
|
|||
|
||||
|
||||
/*
|
||||
* master_dist_partition_cache_invalidate is a trigger function that performs
|
||||
* dist_partition_cache_invalidate is a trigger function that performs
|
||||
* relcache invalidations when the contents of pg_dist_partition are changed
|
||||
* on the SQL level.
|
||||
*/
|
||||
Datum
|
||||
master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
dist_partition_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TriggerData *triggerData = (TriggerData *) fcinfo->context;
|
||||
HeapTuple newTuple = NULL;
|
||||
|
@ -497,12 +497,12 @@ master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS)
|
|||
|
||||
|
||||
/*
|
||||
* master_dist_shard_cache_invalidate is a trigger function that performs
|
||||
* dist_shard_cache_invalidate is a trigger function that performs
|
||||
* relcache invalidations when the contents of pg_dist_shard are changed
|
||||
* on the SQL level.
|
||||
*/
|
||||
Datum
|
||||
master_dist_shard_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
dist_shard_cache_invalidate(PG_FUNCTION_ARGS)
|
||||
{
|
||||
TriggerData *triggerData = (TriggerData *) fcinfo->context;
|
||||
HeapTuple newTuple = NULL;
|
||||
|
|
|
@ -30,13 +30,13 @@
|
|||
#define ROLLBACK_COMMAND "ROLLBACK"
|
||||
|
||||
/* Names of remote function calls to execute on the master. */
|
||||
#define MASTER_GET_TABLE_METADATA "SELECT * FROM master_get_table_metadata($1::text)"
|
||||
#define MASTER_GET_TABLE_DDL_EVENTS "SELECT * FROM master_get_table_ddl_events($1::text)"
|
||||
#define MASTER_GET_NEW_SHARDID "SELECT * FROM master_get_new_shardid()"
|
||||
#define MASTER_GET_TABLE_METADATA "SELECT * FROM get_table_metadata($1::text)"
|
||||
#define MASTER_GET_TABLE_DDL_EVENTS "SELECT * FROM get_table_ddl_events($1::text)"
|
||||
#define MASTER_GET_NEW_SHARDID "SELECT * FROM get_new_shardid()"
|
||||
#define MASTER_GET_LOCAL_FIRST_CANDIDATE_NODES \
|
||||
"SELECT * FROM master_get_local_first_candidate_nodes()"
|
||||
"SELECT * FROM get_local_first_candidate_nodes()"
|
||||
#define MASTER_GET_ROUND_ROBIN_CANDIDATE_NODES \
|
||||
"SELECT * FROM master_get_round_robin_candidate_nodes($1::int8)"
|
||||
"SELECT * FROM get_round_robin_candidate_nodes($1::int8)"
|
||||
|
||||
#define MASTER_INSERT_SHARD_ROW \
|
||||
"INSERT INTO pg_dist_shard " \
|
||||
|
|
|
@ -87,23 +87,23 @@ extern void CreateShardPlacements(int64 shardId, List *ddlEventList,
|
|||
int replicationFactor);
|
||||
|
||||
/* Function declarations for generating metadata for shard creation */
|
||||
extern Datum master_get_table_metadata(PG_FUNCTION_ARGS);
|
||||
extern Datum master_get_table_ddl_events(PG_FUNCTION_ARGS);
|
||||
extern Datum master_get_new_shardid(PG_FUNCTION_ARGS);
|
||||
extern Datum master_get_local_first_candidate_nodes(PG_FUNCTION_ARGS);
|
||||
extern Datum master_get_round_robin_candidate_nodes(PG_FUNCTION_ARGS);
|
||||
extern Datum master_get_active_worker_nodes(PG_FUNCTION_ARGS);
|
||||
extern Datum get_table_metadata(PG_FUNCTION_ARGS);
|
||||
extern Datum get_table_ddl_events(PG_FUNCTION_ARGS);
|
||||
extern Datum get_new_shardid(PG_FUNCTION_ARGS);
|
||||
extern Datum get_local_first_candidate_nodes(PG_FUNCTION_ARGS);
|
||||
extern Datum get_round_robin_candidate_nodes(PG_FUNCTION_ARGS);
|
||||
extern Datum get_active_worker_nodes(PG_FUNCTION_ARGS);
|
||||
|
||||
/* Function declarations to help with data staging and deletion */
|
||||
extern Datum master_create_empty_shard(PG_FUNCTION_ARGS);
|
||||
extern Datum master_append_table_to_shard(PG_FUNCTION_ARGS);
|
||||
extern Datum master_apply_delete_command(PG_FUNCTION_ARGS);
|
||||
extern Datum master_drop_all_shards(PG_FUNCTION_ARGS);
|
||||
extern Datum create_empty_shard(PG_FUNCTION_ARGS);
|
||||
extern Datum append_table_to_shard(PG_FUNCTION_ARGS);
|
||||
extern Datum apply_delete_command(PG_FUNCTION_ARGS);
|
||||
extern Datum drop_all_shards(PG_FUNCTION_ARGS);
|
||||
|
||||
/* function declarations for shard creation functionality */
|
||||
extern Datum master_create_worker_shards(PG_FUNCTION_ARGS);
|
||||
extern Datum create_worker_shards(PG_FUNCTION_ARGS);
|
||||
|
||||
/* function declarations for shard repair functionality */
|
||||
extern Datum master_copy_shard_placement(PG_FUNCTION_ARGS);
|
||||
extern Datum copy_shard_placement(PG_FUNCTION_ARGS);
|
||||
|
||||
#endif /* MASTER_PROTOCOL_H */
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
/* Defines that relate to fetching foreign tables */
|
||||
#define FOREIGN_CACHED_FILE_PATH "pg_foreign_file/cached/%s"
|
||||
#define GET_TABLE_DDL_EVENTS "SELECT master_get_table_ddl_events('%s')"
|
||||
#define GET_TABLE_DDL_EVENTS "SELECT get_table_ddl_events('%s')"
|
||||
#define SET_FOREIGN_TABLE_FILENAME "ALTER FOREIGN TABLE %s OPTIONS (SET filename '%s')"
|
||||
#define FOREIGN_FILE_PATH_COMMAND "SELECT worker_foreign_file_path('%s')"
|
||||
#define SET_SEARCH_PATH_COMMAND "SET search_path TO %s"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
-- MULTI_BINARY_MASTER_COPY
|
||||
--
|
||||
-- Try binary master copy for different executors
|
||||
SET citus.binary_master_copy_format TO 'on';
|
||||
SET citus.binary_copy_format TO 'on';
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SELECT count(*) FROM lineitem;
|
||||
count
|
||||
|
|
|
@ -46,15 +46,15 @@ CREATE TABLE insert_target (
|
|||
);
|
||||
-- squelch WARNINGs that contain worker_port
|
||||
SET client_min_messages TO ERROR;
|
||||
SELECT master_create_distributed_table('insert_target', 'id', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('insert_target', 'id', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('insert_target', 2, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('insert_target', 2, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -39,29 +39,29 @@ CREATE TABLE table_to_distribute (
|
|||
test_type_data dummy_type
|
||||
);
|
||||
-- use an index instead of table name
|
||||
SELECT master_create_distributed_table('table_to_distribute_pkey', 'id', 'hash');
|
||||
SELECT create_distributed_table('table_to_distribute_pkey', 'id', 'hash');
|
||||
ERROR: cannot distribute relation: table_to_distribute_pkey
|
||||
DETAIL: Distributed relations must be regular or foreign tables.
|
||||
-- use a bad column name
|
||||
SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'hash');
|
||||
SELECT create_distributed_table('table_to_distribute', 'bad_column', 'hash');
|
||||
ERROR: column "bad_column" of relation "table_to_distribute" does not exist
|
||||
-- use unrecognized partition type
|
||||
SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized');
|
||||
SELECT create_distributed_table('table_to_distribute', 'name', 'unrecognized');
|
||||
ERROR: invalid input value for enum citus.distribution_type: "unrecognized"
|
||||
LINE 1: ..._distributed_table('table_to_distribute', 'name', 'unrecogni...
|
||||
^
|
||||
-- use a partition column of a type lacking any default operator class
|
||||
SELECT master_create_distributed_table('table_to_distribute', 'json_data', 'hash');
|
||||
SELECT create_distributed_table('table_to_distribute', 'json_data', 'hash');
|
||||
ERROR: data type json has no default operator class for specified partition method
|
||||
DETAIL: Partition column types must have a default operator class defined.
|
||||
-- use a partition column of type lacking the required support function (hash)
|
||||
SELECT master_create_distributed_table('table_to_distribute', 'test_type_data', 'hash');
|
||||
SELECT create_distributed_table('table_to_distribute', 'test_type_data', 'hash');
|
||||
ERROR: could not identify a hash function for type dummy_type
|
||||
DETAIL: Partition column types must have a hash function defined to use hash partitioning.
|
||||
-- distribute table and inspect side effects
|
||||
SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('table_to_distribute', 'name', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -73,19 +73,19 @@ SELECT partmethod, partkey FROM pg_dist_partition
|
|||
(1 row)
|
||||
|
||||
-- use a bad shard count
|
||||
SELECT master_create_worker_shards('table_to_distribute', 0, 1);
|
||||
SELECT create_worker_shards('table_to_distribute', 0, 1);
|
||||
ERROR: shard_count must be positive
|
||||
-- use a bad replication factor
|
||||
SELECT master_create_worker_shards('table_to_distribute', 16, 0);
|
||||
SELECT create_worker_shards('table_to_distribute', 16, 0);
|
||||
ERROR: replication_factor must be positive
|
||||
-- use a replication factor higher than shard count
|
||||
SELECT master_create_worker_shards('table_to_distribute', 16, 3);
|
||||
SELECT create_worker_shards('table_to_distribute', 16, 3);
|
||||
ERROR: replication_factor (3) exceeds number of worker nodes (2)
|
||||
HINT: Add more worker nodes or try again with a lower replication factor.
|
||||
-- finally, create shards and inspect metadata
|
||||
SELECT master_create_worker_shards('table_to_distribute', 16, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('table_to_distribute', 16, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -130,7 +130,7 @@ SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relk
|
|||
(1 row)
|
||||
|
||||
-- try to create them again
|
||||
SELECT master_create_worker_shards('table_to_distribute', 16, 1);
|
||||
SELECT create_worker_shards('table_to_distribute', 16, 1);
|
||||
ERROR: table "table_to_distribute" has already had shards created for it
|
||||
-- test list sorting
|
||||
SELECT sort_names('sumedh', 'jason', 'ozgun');
|
||||
|
@ -155,16 +155,16 @@ CREATE FOREIGN TABLE foreign_table_to_distribute
|
|||
id bigint
|
||||
)
|
||||
SERVER fake_fdw_server;
|
||||
SELECT master_create_distributed_table('foreign_table_to_distribute', 'id', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('foreign_table_to_distribute', 16, 1);
|
||||
SELECT create_worker_shards('foreign_table_to_distribute', 16, 1);
|
||||
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -197,15 +197,15 @@ CREATE TABLE weird_shard_count
|
|||
name text,
|
||||
id bigint
|
||||
);
|
||||
SELECT master_create_distributed_table('weird_shard_count', 'id', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('weird_shard_count', 'id', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('weird_shard_count', 7, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('weird_shard_count', 7, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -21,12 +21,12 @@ CREATE TABLE lineitem (
|
|||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null,
|
||||
PRIMARY KEY(l_orderkey, l_linenumber) );
|
||||
SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append');
|
||||
SELECT create_distributed_table('lineitem', 'l_orderkey', 'append');
|
||||
WARNING: table "lineitem" has a unique constraint
|
||||
DETAIL: Unique constraints and primary keys on append-partitioned tables cannot be enforced.
|
||||
HINT: Consider using hash partitioning.
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -42,12 +42,12 @@ CREATE TABLE orders (
|
|||
o_shippriority integer not null,
|
||||
o_comment varchar(79) not null,
|
||||
PRIMARY KEY(o_orderkey) );
|
||||
SELECT master_create_distributed_table('orders', 'o_orderkey', 'append');
|
||||
SELECT create_distributed_table('orders', 'o_orderkey', 'append');
|
||||
WARNING: table "orders" has a unique constraint
|
||||
DETAIL: Unique constraints and primary keys on append-partitioned tables cannot be enforced.
|
||||
HINT: Consider using hash partitioning.
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -60,9 +60,9 @@ CREATE TABLE customer (
|
|||
c_acctbal decimal(15,2) not null,
|
||||
c_mktsegment char(10) not null,
|
||||
c_comment varchar(117) not null);
|
||||
SELECT master_create_distributed_table('customer', 'c_custkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('customer', 'c_custkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -71,9 +71,9 @@ CREATE TABLE nation (
|
|||
n_name char(25) not null,
|
||||
n_regionkey integer not null,
|
||||
n_comment varchar(152));
|
||||
SELECT master_create_distributed_table('nation', 'n_nationkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('nation', 'n_nationkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -87,9 +87,9 @@ CREATE TABLE part (
|
|||
p_container char(10) not null,
|
||||
p_retailprice decimal(15,2) not null,
|
||||
p_comment varchar(23) not null);
|
||||
SELECT master_create_distributed_table('part', 'p_partkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('part', 'p_partkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -103,9 +103,9 @@ CREATE TABLE supplier
|
|||
s_acctbal decimal(15,2) not null,
|
||||
s_comment varchar(101) not null
|
||||
);
|
||||
SELECT master_create_distributed_table('supplier', 's_suppkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('supplier', 's_suppkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -116,7 +116,7 @@ CREATE TABLE primary_key_on_non_part_col
|
|||
partition_col integer,
|
||||
other_col integer PRIMARY KEY
|
||||
);
|
||||
SELECT master_create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash');
|
||||
SELECT create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash');
|
||||
ERROR: cannot distribute relation: "primary_key_on_non_part_col"
|
||||
DETAIL: Distributed relations cannot have UNIQUE constraints or PRIMARY KEYs that do not include the partition column.
|
||||
CREATE TABLE unique_const_on_non_part_col
|
||||
|
@ -124,7 +124,7 @@ CREATE TABLE unique_const_on_non_part_col
|
|||
partition_col integer,
|
||||
other_col integer UNIQUE
|
||||
);
|
||||
SELECT master_create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash');
|
||||
SELECT create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash');
|
||||
ERROR: cannot distribute relation: "primary_key_on_non_part_col"
|
||||
DETAIL: Distributed relations cannot have UNIQUE constraints or PRIMARY KEYs that do not include the partition column.
|
||||
-- now show that Citus can distribute unique constrints that include
|
||||
|
@ -134,9 +134,9 @@ CREATE TABLE primary_key_on_part_col
|
|||
partition_col integer PRIMARY KEY,
|
||||
other_col integer
|
||||
);
|
||||
SELECT master_create_distributed_table('primary_key_on_part_col', 'partition_col', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('primary_key_on_part_col', 'partition_col', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -145,9 +145,9 @@ CREATE TABLE unique_const_on_part_col
|
|||
partition_col integer UNIQUE,
|
||||
other_col integer
|
||||
);
|
||||
SELECT master_create_distributed_table('unique_const_on_part_col', 'partition_col', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('unique_const_on_part_col', 'partition_col', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -157,9 +157,9 @@ CREATE TABLE unique_const_on_two_columns
|
|||
other_col integer,
|
||||
UNIQUE (partition_col, other_col)
|
||||
);
|
||||
SELECT master_create_distributed_table('unique_const_on_two_columns', 'partition_col', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('unique_const_on_two_columns', 'partition_col', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -168,12 +168,12 @@ CREATE TABLE unique_const_append_partitioned_tables
|
|||
partition_col integer UNIQUE,
|
||||
other_col integer
|
||||
);
|
||||
SELECT master_create_distributed_table('unique_const_append_partitioned_tables', 'partition_col', 'append');
|
||||
SELECT create_distributed_table('unique_const_append_partitioned_tables', 'partition_col', 'append');
|
||||
WARNING: table "unique_const_append_partitioned_tables" has a unique constraint
|
||||
DETAIL: Unique constraints and primary keys on append-partitioned tables cannot be enforced.
|
||||
HINT: Consider using hash partitioning.
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -182,9 +182,9 @@ CREATE TABLE unique_const_range_partitioned_tables
|
|||
partition_col integer UNIQUE,
|
||||
other_col integer
|
||||
);
|
||||
SELECT master_create_distributed_table('unique_const_range_partitioned_tables', 'partition_col', 'range');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('unique_const_range_partitioned_tables', 'partition_col', 'range');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -44,15 +44,15 @@ CREATE TABLE composite_type_partitioned_table
|
|||
id integer,
|
||||
col test_composite_type
|
||||
);
|
||||
SELECT master_create_distributed_table('composite_type_partitioned_table', 'col', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('composite_type_partitioned_table', 4, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('composite_type_partitioned_table', 4, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -81,15 +81,15 @@ CREATE TABLE bugs (
|
|||
id integer,
|
||||
status bug_status
|
||||
);
|
||||
SELECT master_create_distributed_table('bugs', 'status', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('bugs', 'status', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('bugs', 4, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('bugs', 4, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -121,15 +121,15 @@ CREATE TABLE varchar_hash_partitioned_table
|
|||
id int,
|
||||
name varchar
|
||||
);
|
||||
SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('varchar_hash_partitioned_table', 4, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('varchar_hash_partitioned_table', 4, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -29,9 +29,9 @@ CREATE FOREIGN TABLE lineitem (
|
|||
l_comment varchar(44) not null)
|
||||
SERVER file_server
|
||||
OPTIONS (format 'text', filename '', delimiter '|', null '');
|
||||
SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('lineitem', 'l_orderkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -47,9 +47,9 @@ CREATE FOREIGN TABLE orders (
|
|||
o_comment varchar(79) not null)
|
||||
SERVER file_server
|
||||
OPTIONS (format 'text', filename '', delimiter '|', null '');
|
||||
SELECT master_create_distributed_table('orders', 'o_orderkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('orders', 'o_orderkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -64,9 +64,9 @@ CREATE FOREIGN TABLE customer (
|
|||
c_comment varchar(117) not null)
|
||||
SERVER file_server
|
||||
OPTIONS (format 'text', filename '', delimiter '|', null '');
|
||||
SELECT master_create_distributed_table('customer', 'c_custkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('customer', 'c_custkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -77,9 +77,9 @@ CREATE FOREIGN TABLE nation (
|
|||
n_comment varchar(152))
|
||||
SERVER file_server
|
||||
OPTIONS (format 'text', filename '', delimiter '|', null '');
|
||||
SELECT master_create_distributed_table('nation', 'n_nationkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('nation', 'n_nationkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -95,9 +95,9 @@ CREATE FOREIGN TABLE part (
|
|||
p_comment varchar(23) not null)
|
||||
SERVER file_server
|
||||
OPTIONS (format 'text', filename '', delimiter '|', null '');
|
||||
SELECT master_create_distributed_table('part', 'p_partkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('part', 'p_partkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -3,27 +3,27 @@
|
|||
--
|
||||
-- Tests that check the metadata returned by the master node.
|
||||
SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
||||
part_placement_policy FROM master_get_table_metadata('lineitem');
|
||||
part_placement_policy FROM get_table_metadata('lineitem');
|
||||
part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy
|
||||
-------------------+------------+--------------------+---------------+-----------------------
|
||||
f | l_orderkey | 2 | 307200 | 2
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM master_get_table_ddl_events('lineitem');
|
||||
master_get_table_ddl_events
|
||||
SELECT * FROM get_table_ddl_events('lineitem');
|
||||
get_table_ddl_events
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
CREATE EXTENSION IF NOT EXISTS file_fdw WITH SCHEMA public
|
||||
CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw
|
||||
CREATE FOREIGN TABLE lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', "null" '')
|
||||
(3 rows)
|
||||
|
||||
SELECT * FROM master_get_new_shardid();
|
||||
master_get_new_shardid
|
||||
------------------------
|
||||
102008
|
||||
SELECT * FROM get_new_shardid();
|
||||
get_new_shardid
|
||||
-----------------
|
||||
102008
|
||||
(1 row)
|
||||
|
||||
SELECT node_name FROM master_get_local_first_candidate_nodes();
|
||||
SELECT node_name FROM get_local_first_candidate_nodes();
|
||||
node_name
|
||||
-----------
|
||||
localhost
|
||||
|
|
|
@ -16,9 +16,9 @@ CREATE TABLE orders_hash_partitioned (
|
|||
o_clerk char(15),
|
||||
o_shippriority integer,
|
||||
o_comment varchar(79) );
|
||||
SELECT master_create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -8,54 +8,54 @@
|
|||
--
|
||||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102080;
|
||||
CREATE TABLE index_test_range(a int, b int, c int);
|
||||
SELECT master_create_distributed_table('index_test_range', 'a', 'range');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('index_test_range', 'a', 'range');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('index_test_range');
|
||||
master_create_empty_shard
|
||||
---------------------------
|
||||
102080
|
||||
SELECT create_empty_shard('index_test_range');
|
||||
create_empty_shard
|
||||
--------------------
|
||||
102080
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('index_test_range');
|
||||
master_create_empty_shard
|
||||
---------------------------
|
||||
102081
|
||||
SELECT create_empty_shard('index_test_range');
|
||||
create_empty_shard
|
||||
--------------------
|
||||
102081
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE index_test_hash(a int, b int, c int);
|
||||
SELECT master_create_distributed_table('index_test_hash', 'a', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('index_test_hash', 'a', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('index_test_hash', 8, 2);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('index_test_hash', 8, 2);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE index_test_append(a int, b int, c int);
|
||||
SELECT master_create_distributed_table('index_test_append', 'a', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('index_test_append', 'a', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('index_test_append');
|
||||
master_create_empty_shard
|
||||
---------------------------
|
||||
102090
|
||||
SELECT create_empty_shard('index_test_append');
|
||||
create_empty_shard
|
||||
--------------------
|
||||
102090
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('index_test_append');
|
||||
master_create_empty_shard
|
||||
---------------------------
|
||||
102091
|
||||
SELECT create_empty_shard('index_test_append');
|
||||
create_empty_shard
|
||||
--------------------
|
||||
102091
|
||||
(1 row)
|
||||
|
||||
--
|
||||
|
@ -114,7 +114,7 @@ SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%';
|
|||
0
|
||||
(1 row)
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
-- Verify that we error out on unsupported statement types
|
||||
CREATE INDEX CONCURRENTLY try_index ON lineitem (l_orderkey);
|
||||
ERROR: creating indexes concurrently on distributed tables is currently unsupported
|
||||
|
@ -211,7 +211,7 @@ SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
|
|||
------------+-----------+-----------+------------+----------
|
||||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
-- Drop created tables
|
||||
DROP TABLE index_test_range;
|
||||
DROP TABLE index_test_hash;
|
||||
|
|
|
@ -3,48 +3,48 @@
|
|||
--
|
||||
-- Tests that check the metadata returned by the master node.
|
||||
SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
||||
part_placement_policy FROM master_get_table_metadata('lineitem');
|
||||
part_placement_policy FROM get_table_metadata('lineitem');
|
||||
part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy
|
||||
-------------------+------------+--------------------+---------------+-----------------------
|
||||
t | l_orderkey | 2 | 307200 | 2
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM master_get_table_ddl_events('lineitem');
|
||||
master_get_table_ddl_events
|
||||
SELECT * FROM get_table_ddl_events('lineitem');
|
||||
get_table_ddl_events
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
CREATE TABLE lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL)
|
||||
CREATE INDEX lineitem_time_index ON lineitem USING btree (l_shipdate)
|
||||
ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber)
|
||||
(3 rows)
|
||||
|
||||
SELECT * FROM master_get_new_shardid();
|
||||
master_get_new_shardid
|
||||
------------------------
|
||||
102008
|
||||
SELECT * FROM get_new_shardid();
|
||||
get_new_shardid
|
||||
-----------------
|
||||
102008
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM master_get_local_first_candidate_nodes();
|
||||
SELECT * FROM get_local_first_candidate_nodes();
|
||||
node_name | node_port
|
||||
-----------+-----------
|
||||
localhost | 57638
|
||||
localhost | 57637
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM master_get_round_robin_candidate_nodes(1);
|
||||
SELECT * FROM get_round_robin_candidate_nodes(1);
|
||||
node_name | node_port
|
||||
-----------+-----------
|
||||
localhost | 57638
|
||||
localhost | 57637
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM master_get_round_robin_candidate_nodes(2);
|
||||
SELECT * FROM get_round_robin_candidate_nodes(2);
|
||||
node_name | node_port
|
||||
-----------+-----------
|
||||
localhost | 57637
|
||||
localhost | 57638
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM master_get_active_worker_nodes();
|
||||
SELECT * FROM get_active_worker_nodes();
|
||||
node_name | node_port
|
||||
-----------+-----------
|
||||
localhost | 57638
|
||||
|
|
|
@ -13,60 +13,60 @@ CREATE TABLE limit_orders (
|
|||
CREATE TABLE insufficient_shards ( LIKE limit_orders );
|
||||
CREATE TABLE range_partitioned ( LIKE limit_orders );
|
||||
CREATE TABLE append_partitioned ( LIKE limit_orders );
|
||||
SELECT master_create_distributed_table('limit_orders', 'id', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('limit_orders', 'id', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_distributed_table('insufficient_shards', 'id', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('insufficient_shards', 'id', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_distributed_table('range_partitioned', 'id', 'range');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('range_partitioned', 'id', 'range');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_distributed_table('append_partitioned', 'id', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('append_partitioned', 'id', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('limit_orders', 2, 2);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('limit_orders', 2, 2);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- make a single shard that covers no partition values
|
||||
SELECT master_create_worker_shards('insufficient_shards', 1, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('insufficient_shards', 1, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0
|
||||
WHERE logicalrelid = 'insufficient_shards'::regclass;
|
||||
-- create range-partitioned shards
|
||||
SELECT master_create_empty_shard('range_partitioned') AS new_shard_id
|
||||
SELECT create_empty_shard('range_partitioned') AS new_shard_id
|
||||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 49999
|
||||
WHERE shardid = :new_shard_id;
|
||||
SELECT master_create_empty_shard('range_partitioned') AS new_shard_id
|
||||
SELECT create_empty_shard('range_partitioned') AS new_shard_id
|
||||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 50000, shardmaxvalue = 99999
|
||||
WHERE shardid = :new_shard_id;
|
||||
-- create append-partitioned shards
|
||||
SELECT master_create_empty_shard('append_partitioned') AS new_shard_id
|
||||
SELECT create_empty_shard('append_partitioned') AS new_shard_id
|
||||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 500000
|
||||
WHERE shardid = :new_shard_id;
|
||||
SELECT master_create_empty_shard('append_partitioned') AS new_shard_id
|
||||
SELECT create_empty_shard('append_partitioned') AS new_shard_id
|
||||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 500000, shardmaxvalue = 1000000
|
||||
WHERE shardid = :new_shard_id;
|
||||
|
|
|
@ -65,9 +65,9 @@ CREATE TABLE varchar_partitioned_table
|
|||
(
|
||||
varchar_column varchar(100)
|
||||
);
|
||||
SELECT master_create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -93,9 +93,9 @@ CREATE TABLE array_partitioned_table
|
|||
(
|
||||
array_column text[]
|
||||
);
|
||||
SELECT master_create_distributed_table('array_partitioned_table', 'array_column', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -129,9 +129,9 @@ CREATE TABLE composite_partitioned_table
|
|||
(
|
||||
composite_column composite_type
|
||||
);
|
||||
SELECT master_create_distributed_table('composite_partitioned_table', 'composite_column', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -8,16 +8,16 @@ CREATE INDEX ON customer_engagements (id);
|
|||
CREATE INDEX ON customer_engagements (created_at);
|
||||
CREATE INDEX ON customer_engagements (event_data);
|
||||
-- distribute the table
|
||||
SELECT master_create_distributed_table('customer_engagements', 'id', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('customer_engagements', 'id', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- create a single shard on the first worker
|
||||
SELECT master_create_worker_shards('customer_engagements', 1, 2);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('customer_engagements', 1, 2);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -29,7 +29,7 @@ INSERT INTO customer_engagements VALUES (1, '03-01-2015', 'third event');
|
|||
-- (i) create a new shard
|
||||
-- (ii) mark the second shard placements as unhealthy
|
||||
-- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones
|
||||
-- (iv) do a successful master_copy_shard_placement from the first placement to the second
|
||||
-- (iv) do a successful copy_shard_placement from the first placement to the second
|
||||
-- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement
|
||||
-- get the newshardid
|
||||
SELECT shardid as newshardid FROM pg_dist_shard WHERE logicalrelid = 'customer_engagements'::regclass
|
||||
|
@ -39,15 +39,15 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = :newshardid AN
|
|||
-- add a fake healthy placement for the tests
|
||||
INSERT INTO pg_dist_shard_placement (nodename, nodeport, shardid, shardstate, shardlength)
|
||||
VALUES ('dummyhost', :worker_2_port, :newshardid, 1, 0);
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'dummyhost', :worker_2_port);
|
||||
SELECT copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'dummyhost', :worker_2_port);
|
||||
ERROR: target placement must be in inactive state
|
||||
-- also try to copy from an inactive placement
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
|
||||
SELECT copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
|
||||
ERROR: source placement must be in finalized state
|
||||
-- "copy" this shard from the first placement to the second one
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
master_copy_shard_placement
|
||||
-----------------------------
|
||||
SELECT copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
copy_shard_placement
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -69,17 +69,17 @@ CREATE FOREIGN TABLE remote_engagements (
|
|||
event_data text
|
||||
) SERVER fake_fdw_server;
|
||||
-- distribute the table
|
||||
SELECT master_create_distributed_table('remote_engagements', 'id', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('remote_engagements', 'id', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- create a single shard on the first worker
|
||||
SELECT master_create_worker_shards('remote_engagements', 1, 2);
|
||||
SELECT create_worker_shards('remote_engagements', 1, 2);
|
||||
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -89,6 +89,6 @@ SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remo
|
|||
-- now, update the second placement as unhealthy
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = :remotenewshardid AND nodeport = :worker_2_port;
|
||||
-- oops! we don't support repairing shards backed by foreign tables
|
||||
SELECT master_copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
ERROR: cannot repair shard
|
||||
DETAIL: Repairing shards backed by foreign tables is not supported.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
-- MULTI_REPARTITIONED_SUBQUERY_UDF
|
||||
--
|
||||
-- Create UDF in master and workers
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
DROP FUNCTION IF EXISTS median(double precision[]);
|
||||
NOTICE: function median(pg_catalog.float8[]) does not exist, skipping
|
||||
CREATE FUNCTION median(double precision[]) RETURNS double precision
|
||||
|
@ -33,7 +33,7 @@ LANGUAGE sql IMMUTABLE AS $_$
|
|||
OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub;
|
||||
$_$;
|
||||
-- Run query on master
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*)
|
||||
FROM lineitem GROUP BY l_partkey) AS a
|
||||
|
|
|
@ -11,15 +11,15 @@ CREATE TABLE articles (
|
|||
CREATE TABLE authors ( name text, id bigint );
|
||||
-- this table is used in router executor tests
|
||||
CREATE TABLE articles_single_shard (LIKE articles);
|
||||
SELECT master_create_distributed_table('articles', 'author_id', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('articles', 'author_id', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('articles_single_shard', 'author_id', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -30,15 +30,15 @@ SELECT count(*) from articles;
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('articles', 2, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('articles', 2, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('articles_single_shard', 1, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('articles_single_shard', 1, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
--
|
||||
-- Tests around changing the schema and dropping of a distributed table
|
||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -26,20 +26,20 @@ ERROR: cannot execute ALTER TABLE command involving partition column
|
|||
BEGIN;
|
||||
DROP TABLE testtableddl;
|
||||
ERROR: DROP distributed table cannot run inside a transaction block
|
||||
CONTEXT: SQL statement "SELECT master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name)"
|
||||
CONTEXT: SQL statement "SELECT drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name)"
|
||||
PL/pgSQL function citus_drop_trigger() line 15 at PERFORM
|
||||
ROLLBACK;
|
||||
-- verify that the table can be dropped
|
||||
DROP TABLE testtableddl;
|
||||
-- verify that the table can dropped even if shards exist
|
||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_create_empty_shard('testtableddl');
|
||||
SELECT 1 FROM create_empty_shard('testtableddl');
|
||||
?column?
|
||||
----------
|
||||
1
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
-- and shard placement data into system catalogs. We next run Explain command,
|
||||
-- and check that tasks are assigned to worker nodes as expected.
|
||||
CREATE TABLE task_assignment_test_table (test_id integer);
|
||||
SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -8,15 +8,15 @@ CREATE TABLE upsert_test
|
|||
third_col int
|
||||
);
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('upsert_test', 'part_key', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('upsert_test', '4', '2');
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('upsert_test', '4', '2');
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -112,15 +112,15 @@ CREATE TABLE upsert_test_2
|
|||
PRIMARY KEY (part_key, other_col)
|
||||
);
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('upsert_test_2', '4', '2');
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('upsert_test_2', '4', '2');
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -139,15 +139,15 @@ CREATE TABLE upsert_test_3
|
|||
-- note that this is not a unique index
|
||||
CREATE INDEX idx_ups_test ON upsert_test_3(part_key);
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('upsert_test_3', '4', '2');
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('upsert_test_3', '4', '2');
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -161,15 +161,15 @@ CREATE TABLE upsert_test_4
|
|||
count int
|
||||
);
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('upsert_test_4', '4', '2');
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('upsert_test_4', '4', '2');
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -191,15 +191,15 @@ SELECT * FROM upsert_test_4;
|
|||
|
||||
-- now test dropped columns
|
||||
CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float);
|
||||
SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('dropcol_distributed', 'key', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('dropcol_distributed', 4, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('dropcol_distributed', 4, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -8,14 +8,14 @@ CREATE TABLE upsert_test
|
|||
third_col int
|
||||
);
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash');
|
||||
master_create_distributed_table
|
||||
SELECT create_distributed_table('upsert_test', 'part_key', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('upsert_test', '4', '2');
|
||||
master_create_worker_shards
|
||||
SELECT create_worker_shards('upsert_test', '4', '2');
|
||||
create_worker_shards
|
||||
-----------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -145,14 +145,14 @@ CREATE TABLE upsert_test_2
|
|||
PRIMARY KEY (part_key, other_col)
|
||||
);
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash');
|
||||
master_create_distributed_table
|
||||
SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('upsert_test_2', '4', '2');
|
||||
master_create_worker_shards
|
||||
SELECT create_worker_shards('upsert_test_2', '4', '2');
|
||||
create_worker_shards
|
||||
-----------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -177,14 +177,14 @@ CREATE TABLE upsert_test_3
|
|||
-- note that this is not a unique index
|
||||
CREATE INDEX idx_ups_test ON upsert_test_3(part_key);
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash');
|
||||
master_create_distributed_table
|
||||
SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('upsert_test_3', '4', '2');
|
||||
master_create_worker_shards
|
||||
SELECT create_worker_shards('upsert_test_3', '4', '2');
|
||||
create_worker_shards
|
||||
-----------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -201,14 +201,14 @@ CREATE TABLE upsert_test_4
|
|||
count int
|
||||
);
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash');
|
||||
master_create_distributed_table
|
||||
SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('upsert_test_4', '4', '2');
|
||||
master_create_worker_shards
|
||||
SELECT create_worker_shards('upsert_test_4', '4', '2');
|
||||
create_worker_shards
|
||||
-----------------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -249,14 +249,14 @@ SELECT * FROM upsert_test_4;
|
|||
|
||||
-- now test dropped columns
|
||||
CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float);
|
||||
SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash');
|
||||
master_create_distributed_table
|
||||
SELECT create_distributed_table('dropcol_distributed', 'key', 'hash');
|
||||
create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('dropcol_distributed', 4, 1);
|
||||
master_create_worker_shards
|
||||
SELECT create_worker_shards('dropcol_distributed', 4, 1);
|
||||
create_worker_shards
|
||||
-----------------------------
|
||||
|
||||
(1 row)
|
||||
|
|
|
@ -2,15 +2,15 @@
|
|||
-- test utility statement functionality
|
||||
-- ===================================================================
|
||||
CREATE TABLE sharded_table ( name text, id bigint );
|
||||
SELECT master_create_distributed_table('sharded_table', 'id', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('sharded_table', 'id', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_worker_shards('sharded_table', 2, 1);
|
||||
master_create_worker_shards
|
||||
-----------------------------
|
||||
SELECT create_worker_shards('sharded_table', 2, 1);
|
||||
create_worker_shards
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -66,14 +66,14 @@ EXECUTE sharded_query;
|
|||
(0 rows)
|
||||
|
||||
-- try to drop shards with where clause
|
||||
SELECT master_apply_delete_command('DELETE FROM sharded_table WHERE id > 0');
|
||||
SELECT apply_delete_command('DELETE FROM sharded_table WHERE id > 0');
|
||||
ERROR: cannot delete from distributed table
|
||||
DETAIL: Delete statements on hash-partitioned tables with where clause is not supported
|
||||
-- drop all shards
|
||||
SELECT master_apply_delete_command('DELETE FROM sharded_table');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
2
|
||||
SELECT apply_delete_command('DELETE FROM sharded_table');
|
||||
apply_delete_command
|
||||
----------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- drop table
|
||||
|
|
|
@ -21,7 +21,7 @@ CREATE TABLE lineitem_range (
|
|||
l_shipinstruct char(25) not null,
|
||||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null );
|
||||
SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range');
|
||||
SELECT create_distributed_table('lineitem_range', 'l_orderkey', 'range');
|
||||
|
||||
\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
\STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'
|
||||
|
|
|
@ -18,7 +18,7 @@ CREATE TABLE aggregate_type (
|
|||
float_value float(20) not null,
|
||||
double_value float(40) not null,
|
||||
interval_value interval not null);
|
||||
SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append');
|
||||
SELECT create_distributed_table('aggregate_type', 'float_value', 'append');
|
||||
|
||||
\STAGE aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ CREATE TABLE lineitem_alter (
|
|||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null
|
||||
);
|
||||
SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
|
||||
SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
|
||||
\STAGE lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
|
||||
-- Verify that we can add columns
|
||||
|
@ -43,7 +43,7 @@ FROM
|
|||
(SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc
|
||||
JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid)
|
||||
ORDER BY attnum;
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
|
||||
\d lineitem_alter
|
||||
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
|
||||
|
@ -160,12 +160,12 @@ FROM
|
|||
(SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc
|
||||
JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid)
|
||||
ORDER BY attnum;
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
|
||||
-- Cleanup the table and its shards
|
||||
SELECT master_apply_delete_command('DELETE FROM lineitem_alter');
|
||||
SELECT apply_delete_command('DELETE FROM lineitem_alter');
|
||||
DROP TABLE lineitem_alter;
|
||||
-- check that nothing's left over on workers
|
||||
\c - - - :worker_1_port
|
||||
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
|
|
|
@ -7,14 +7,14 @@ CREATE TABLE multi_append_table_to_shard_right
|
|||
right_number INTEGER not null,
|
||||
right_text TEXT not null
|
||||
);
|
||||
SELECT master_create_distributed_table('multi_append_table_to_shard_right', 'right_number', 'append');
|
||||
SELECT create_distributed_table('multi_append_table_to_shard_right', 'right_number', 'append');
|
||||
|
||||
CREATE TABLE multi_append_table_to_shard_left
|
||||
(
|
||||
left_number INTEGER not null,
|
||||
left_text TEXT not null
|
||||
);
|
||||
SELECT master_create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append');
|
||||
SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append');
|
||||
|
||||
-- Replicate 'left' table on both workers
|
||||
SELECT set_config('citus.shard_replication_factor', '2', false);
|
||||
|
@ -43,7 +43,7 @@ CREATE TABLE multi_append_table_to_shard_stage
|
|||
|
||||
COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/agg.data';
|
||||
|
||||
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid;
|
||||
|
@ -58,7 +58,7 @@ WHERE left_number = right_number;
|
|||
DELETE FROM multi_append_table_to_shard_stage;
|
||||
COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|';
|
||||
|
||||
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid;
|
||||
|
@ -74,9 +74,9 @@ WHERE left_number = right_number;
|
|||
UPDATE pg_dist_partition SET partmethod = 'h' WHERE
|
||||
logicalrelid = 'multi_append_table_to_shard_right'::regclass::oid;
|
||||
|
||||
SELECT master_create_empty_shard('multi_append_table_to_shard_right');
|
||||
SELECT create_empty_shard('multi_append_table_to_shard_right');
|
||||
|
||||
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid;
|
||||
|
@ -85,8 +85,8 @@ UPDATE pg_dist_partition SET partmethod = 'a' WHERE
|
|||
logicalrelid = 'multi_append_table_to_shard_right'::regclass::oid;
|
||||
|
||||
-- Clean up after test
|
||||
SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_right');
|
||||
SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_left');
|
||||
SELECT apply_delete_command('DELETE FROM multi_append_table_to_shard_right');
|
||||
SELECT apply_delete_command('DELETE FROM multi_append_table_to_shard_left');
|
||||
DROP TABLE multi_append_table_to_shard_stage;
|
||||
DROP TABLE multi_append_table_to_shard_right;
|
||||
DROP TABLE multi_append_table_to_shard_left;
|
||||
|
|
|
@ -4,7 +4,7 @@ CREATE TABLE nation (
|
|||
n_name char(25) not null,
|
||||
n_regionkey integer not null,
|
||||
n_comment varchar(152));
|
||||
SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append');
|
||||
SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append');
|
||||
|
||||
\STAGE tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ CREATE TABLE customer_delete_protocol (
|
|||
c_acctbal decimal(15,2) not null,
|
||||
c_mktsegment char(10) not null,
|
||||
c_comment varchar(117) not null);
|
||||
SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', 'append');
|
||||
SELECT create_distributed_table('customer_delete_protocol', 'c_custkey', 'append');
|
||||
|
||||
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
|
||||
|
@ -21,33 +21,33 @@ SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey',
|
|||
|
||||
-- Check that we don't support conditions on columns other than partition key.
|
||||
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
WHERE c_acctbal > 0.0');
|
||||
|
||||
-- Check that we delete a shard if and only if all rows in the shard satisfy the condition.
|
||||
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
WHERE c_custkey > 6500');
|
||||
SELECT count(*) from customer_delete_protocol;
|
||||
|
||||
-- Delete one shard that satisfies the given conditions.
|
||||
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
WHERE c_custkey > 1000 AND c_custkey < 3000');
|
||||
SELECT count(*) from customer_delete_protocol;
|
||||
|
||||
-- Delete all shards if no condition is provided.
|
||||
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
SELECT count(*) FROM customer_delete_protocol;
|
||||
|
||||
-- Verify that empty shards are deleted if no condition is provided
|
||||
SELECT master_create_empty_shard('customer_delete_protocol');
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
SELECT create_empty_shard('customer_delete_protocol');
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
WHERE c_custkey > 1000');
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
|
||||
-- Verify that master_apply_delete_command cannot be called in a transaction block
|
||||
-- Verify that apply_delete_command cannot be called in a transaction block
|
||||
BEGIN;
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
ROLLBACK;
|
||||
|
|
|
@ -13,7 +13,7 @@ CREATE TABLE multi_outer_join_left
|
|||
l_mktsegment char(10) not null,
|
||||
l_comment varchar(117) not null
|
||||
);
|
||||
SELECT master_create_distributed_table('multi_outer_join_left', 'l_custkey', 'append');
|
||||
SELECT create_distributed_table('multi_outer_join_left', 'l_custkey', 'append');
|
||||
|
||||
CREATE TABLE multi_outer_join_right
|
||||
(
|
||||
|
@ -26,7 +26,7 @@ CREATE TABLE multi_outer_join_right
|
|||
r_mktsegment char(10) not null,
|
||||
r_comment varchar(117) not null
|
||||
);
|
||||
SELECT master_create_distributed_table('multi_outer_join_right', 'r_custkey', 'append');
|
||||
SELECT create_distributed_table('multi_outer_join_right', 'r_custkey', 'append');
|
||||
|
||||
CREATE TABLE multi_outer_join_third
|
||||
(
|
||||
|
@ -39,7 +39,7 @@ CREATE TABLE multi_outer_join_third
|
|||
t_mktsegment char(10) not null,
|
||||
t_comment varchar(117) not null
|
||||
);
|
||||
SELECT master_create_distributed_table('multi_outer_join_third', 't_custkey', 'append');
|
||||
SELECT create_distributed_table('multi_outer_join_third', 't_custkey', 'append');
|
||||
|
||||
-- Make sure we do not crash if both tables have no shards
|
||||
SELECT
|
||||
|
@ -133,8 +133,8 @@ FROM
|
|||
multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey);
|
||||
|
||||
-- empty tables
|
||||
SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left');
|
||||
SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right');
|
||||
SELECT * FROM apply_delete_command('DELETE FROM multi_outer_join_left');
|
||||
SELECT * FROM apply_delete_command('DELETE FROM multi_outer_join_right');
|
||||
|
||||
-- reload shards with 1-1 matching
|
||||
\STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|'
|
||||
|
|
|
@ -25,6 +25,6 @@ CREATE TABLE nation_hash_partitioned (
|
|||
n_name char(25) not null,
|
||||
n_regionkey integer not null,
|
||||
n_comment varchar(152));
|
||||
SELECT master_create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash');
|
||||
SELECT create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash');
|
||||
|
||||
\STAGE nation_hash_partitioned FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
SET citus.shard_max_size TO "256kB";
|
||||
|
||||
CREATE TABLE large_records_table (data_id integer, data text);
|
||||
SELECT master_create_distributed_table('large_records_table', 'data_id', 'append');
|
||||
SELECT create_distributed_table('large_records_table', 'data_id', 'append');
|
||||
|
||||
\STAGE large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ CREATE TABLE lineitem_subquery (
|
|||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null,
|
||||
PRIMARY KEY(l_orderkey, l_linenumber) );
|
||||
SELECT master_create_distributed_table('lineitem_subquery', 'l_orderkey', 'range');
|
||||
SELECT create_distributed_table('lineitem_subquery', 'l_orderkey', 'range');
|
||||
|
||||
CREATE TABLE orders_subquery (
|
||||
o_orderkey bigint not null,
|
||||
|
@ -35,7 +35,7 @@ CREATE TABLE orders_subquery (
|
|||
o_shippriority integer not null,
|
||||
o_comment varchar(79) not null,
|
||||
PRIMARY KEY(o_orderkey) );
|
||||
SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range');
|
||||
SELECT create_distributed_table('orders_subquery', 'o_orderkey', 'range');
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
|
||||
|
|
|
@ -19,9 +19,9 @@ CREATE TABLE lineitem_range (
|
|||
l_shipinstruct char(25) not null,
|
||||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null );
|
||||
SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('lineitem_range', 'l_orderkey', 'range');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -33,9 +33,9 @@ CREATE TABLE aggregate_type (
|
|||
float_value float(20) not null,
|
||||
double_value float(40) not null,
|
||||
interval_value interval not null);
|
||||
SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('aggregate_type', 'float_value', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -23,9 +23,9 @@ CREATE TABLE lineitem_alter (
|
|||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null
|
||||
);
|
||||
SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -74,7 +74,7 @@ ORDER BY attnum;
|
|||
null_column | integer
|
||||
(27 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
\d lineitem_alter
|
||||
Table "public.lineitem_alter"
|
||||
Column | Type | Modifiers
|
||||
|
@ -451,12 +451,12 @@ ORDER BY attnum;
|
|||
........pg.dropped.23........ | -
|
||||
(29 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
-- Cleanup the table and its shards
|
||||
SELECT master_apply_delete_command('DELETE FROM lineitem_alter');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
9
|
||||
SELECT apply_delete_command('DELETE FROM lineitem_alter');
|
||||
apply_delete_command
|
||||
----------------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
DROP TABLE lineitem_alter;
|
||||
|
@ -467,4 +467,4 @@ SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
|
|||
---------
|
||||
(0 rows)
|
||||
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
|
|
|
@ -7,9 +7,9 @@ CREATE TABLE multi_append_table_to_shard_right
|
|||
right_number INTEGER not null,
|
||||
right_text TEXT not null
|
||||
);
|
||||
SELECT master_create_distributed_table('multi_append_table_to_shard_right', 'right_number', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('multi_append_table_to_shard_right', 'right_number', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -18,9 +18,9 @@ CREATE TABLE multi_append_table_to_shard_left
|
|||
left_number INTEGER not null,
|
||||
left_text TEXT not null
|
||||
);
|
||||
SELECT master_create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -65,13 +65,13 @@ CREATE TABLE multi_append_table_to_shard_stage
|
|||
text TEXT not null
|
||||
);
|
||||
COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/agg.data';
|
||||
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid;
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.0533333
|
||||
append_table_to_shard
|
||||
-----------------------
|
||||
0.0533333
|
||||
(1 row)
|
||||
|
||||
-- Only the primary worker will see the new matches, as the secondary still uses a cached shard
|
||||
|
@ -87,13 +87,13 @@ WHERE left_number = right_number;
|
|||
-- Now add a lot of data to ensure we increase the size on disk
|
||||
DELETE FROM multi_append_table_to_shard_stage;
|
||||
COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|';
|
||||
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid;
|
||||
master_append_table_to_shard
|
||||
------------------------------
|
||||
0.106667
|
||||
append_table_to_shard
|
||||
-----------------------
|
||||
0.106667
|
||||
(1 row)
|
||||
|
||||
-- This join will refresh the shard on the secondary, all 8 rows in the left table will match twice (16)
|
||||
|
@ -109,10 +109,10 @@ WHERE left_number = right_number;
|
|||
-- Check that we error out if we try to append data to a hash partitioned table.
|
||||
UPDATE pg_dist_partition SET partmethod = 'h' WHERE
|
||||
logicalrelid = 'multi_append_table_to_shard_right'::regclass::oid;
|
||||
SELECT master_create_empty_shard('multi_append_table_to_shard_right');
|
||||
SELECT create_empty_shard('multi_append_table_to_shard_right');
|
||||
ERROR: relation "multi_append_table_to_shard_right" is a hash partitioned table
|
||||
DETAIL: We currently don't support creating shards on hash-partitioned tables
|
||||
SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636)
|
||||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid;
|
||||
|
@ -121,16 +121,16 @@ DETAIL: We currently don't support appending to shards in hash-partitioned tabl
|
|||
UPDATE pg_dist_partition SET partmethod = 'a' WHERE
|
||||
logicalrelid = 'multi_append_table_to_shard_right'::regclass::oid;
|
||||
-- Clean up after test
|
||||
SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_right');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
1
|
||||
SELECT apply_delete_command('DELETE FROM multi_append_table_to_shard_right');
|
||||
apply_delete_command
|
||||
----------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_left');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
2
|
||||
SELECT apply_delete_command('DELETE FROM multi_append_table_to_shard_left');
|
||||
apply_delete_command
|
||||
----------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
DROP TABLE multi_append_table_to_shard_stage;
|
||||
|
|
|
@ -6,9 +6,9 @@ CREATE TABLE nation (
|
|||
n_comment varchar(152));
|
||||
NOTICE: Citus partially supports CREATE SCHEMA for distributed databases
|
||||
DETAIL: schema usage in joins and in some UDFs provided by Citus are not supported yet
|
||||
SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -11,9 +11,9 @@ CREATE TABLE customer_delete_protocol (
|
|||
c_acctbal decimal(15,2) not null,
|
||||
c_mktsegment char(10) not null,
|
||||
c_comment varchar(117) not null);
|
||||
SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('customer_delete_protocol', 'c_custkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -21,16 +21,16 @@ SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey',
|
|||
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
|
||||
\STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
|
||||
-- Check that we don't support conditions on columns other than partition key.
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
WHERE c_acctbal > 0.0');
|
||||
ERROR: cannot delete from distributed table
|
||||
DETAIL: Where clause includes a column other than partition column
|
||||
-- Check that we delete a shard if and only if all rows in the shard satisfy the condition.
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
WHERE c_custkey > 6500');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
0
|
||||
apply_delete_command
|
||||
----------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) from customer_delete_protocol;
|
||||
|
@ -40,11 +40,11 @@ SELECT count(*) from customer_delete_protocol;
|
|||
(1 row)
|
||||
|
||||
-- Delete one shard that satisfies the given conditions.
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
WHERE c_custkey > 1000 AND c_custkey < 3000');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
1
|
||||
apply_delete_command
|
||||
----------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) from customer_delete_protocol;
|
||||
|
@ -54,10 +54,10 @@ SELECT count(*) from customer_delete_protocol;
|
|||
(1 row)
|
||||
|
||||
-- Delete all shards if no condition is provided.
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
2
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
apply_delete_command
|
||||
----------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM customer_delete_protocol;
|
||||
|
@ -67,27 +67,27 @@ SELECT count(*) FROM customer_delete_protocol;
|
|||
(1 row)
|
||||
|
||||
-- Verify that empty shards are deleted if no condition is provided
|
||||
SELECT master_create_empty_shard('customer_delete_protocol');
|
||||
master_create_empty_shard
|
||||
---------------------------
|
||||
102041
|
||||
SELECT create_empty_shard('customer_delete_protocol');
|
||||
create_empty_shard
|
||||
--------------------
|
||||
102041
|
||||
(1 row)
|
||||
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol
|
||||
WHERE c_custkey > 1000');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
0
|
||||
apply_delete_command
|
||||
----------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
1
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
apply_delete_command
|
||||
----------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- Verify that master_apply_delete_command cannot be called in a transaction block
|
||||
-- Verify that apply_delete_command cannot be called in a transaction block
|
||||
BEGIN;
|
||||
SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
ERROR: master_apply_delete_command cannot run inside a transaction block
|
||||
SELECT apply_delete_command('DELETE FROM customer_delete_protocol');
|
||||
ERROR: apply_delete_command cannot run inside a transaction block
|
||||
ROLLBACK;
|
||||
|
|
|
@ -12,9 +12,9 @@ CREATE TABLE multi_outer_join_left
|
|||
l_mktsegment char(10) not null,
|
||||
l_comment varchar(117) not null
|
||||
);
|
||||
SELECT master_create_distributed_table('multi_outer_join_left', 'l_custkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('multi_outer_join_left', 'l_custkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -29,9 +29,9 @@ CREATE TABLE multi_outer_join_right
|
|||
r_mktsegment char(10) not null,
|
||||
r_comment varchar(117) not null
|
||||
);
|
||||
SELECT master_create_distributed_table('multi_outer_join_right', 'r_custkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('multi_outer_join_right', 'r_custkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -46,9 +46,9 @@ CREATE TABLE multi_outer_join_third
|
|||
t_mktsegment char(10) not null,
|
||||
t_comment varchar(117) not null
|
||||
);
|
||||
SELECT master_create_distributed_table('multi_outer_join_third', 't_custkey', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('multi_outer_join_third', 't_custkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -172,16 +172,16 @@ FROM
|
|||
ERROR: cannot perform distributed planning on this query
|
||||
DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning
|
||||
-- empty tables
|
||||
SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
2
|
||||
SELECT * FROM apply_delete_command('DELETE FROM multi_outer_join_left');
|
||||
apply_delete_command
|
||||
----------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right');
|
||||
master_apply_delete_command
|
||||
-----------------------------
|
||||
2
|
||||
SELECT * FROM apply_delete_command('DELETE FROM multi_outer_join_right');
|
||||
apply_delete_command
|
||||
----------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- reload shards with 1-1 matching
|
||||
|
|
|
@ -19,9 +19,9 @@ CREATE TABLE nation_hash_partitioned (
|
|||
n_name char(25) not null,
|
||||
n_regionkey integer not null,
|
||||
n_comment varchar(152));
|
||||
SELECT master_create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
-- are creating shards of correct size even when records are large.
|
||||
SET citus.shard_max_size TO "256kB";
|
||||
CREATE TABLE large_records_table (data_id integer, data text);
|
||||
SELECT master_create_distributed_table('large_records_table', 'data_id', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('large_records_table', 'data_id', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -20,9 +20,9 @@ CREATE TABLE lineitem_subquery (
|
|||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null,
|
||||
PRIMARY KEY(l_orderkey, l_linenumber) );
|
||||
SELECT master_create_distributed_table('lineitem_subquery', 'l_orderkey', 'range');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('lineitem_subquery', 'l_orderkey', 'range');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
@ -37,9 +37,9 @@ CREATE TABLE orders_subquery (
|
|||
o_shippriority integer not null,
|
||||
o_comment varchar(79) not null,
|
||||
PRIMARY KEY(o_orderkey) );
|
||||
SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
SELECT create_distributed_table('orders_subquery', 'o_orderkey', 'range');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ sysopen my $fh, "tmp_check/tmp-bin/psql", O_CREAT|O_TRUNC|O_RDWR, 0700
|
|||
or die "Could not create psql wrapper";
|
||||
print $fh "#!/bin/bash\n";
|
||||
print $fh "exec $bindir/csql ";
|
||||
print $fh "--variable=master_port=$masterPort ";
|
||||
print $fh "--variable=port=$masterPort ";
|
||||
for my $workeroff (0 .. $#workerPorts)
|
||||
{
|
||||
my $port = $workerPorts[$workeroff];
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
-- Try binary master copy for different executors
|
||||
|
||||
SET citus.binary_master_copy_format TO 'on';
|
||||
SET citus.binary_copy_format TO 'on';
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
|
||||
SELECT count(*) FROM lineitem;
|
||||
|
|
|
@ -47,8 +47,8 @@ CREATE TABLE insert_target (
|
|||
-- squelch WARNINGs that contain worker_port
|
||||
SET client_min_messages TO ERROR;
|
||||
|
||||
SELECT master_create_distributed_table('insert_target', 'id', 'hash');
|
||||
SELECT master_create_worker_shards('insert_target', 2, 1);
|
||||
SELECT create_distributed_table('insert_target', 'id', 'hash');
|
||||
SELECT create_worker_shards('insert_target', 2, 1);
|
||||
|
||||
CREATE TEMPORARY SEQUENCE rows_inserted;
|
||||
SELECT create_insert_proxy_for_table('insert_target', 'rows_inserted') AS proxy_tablename
|
||||
|
|
|
@ -48,36 +48,36 @@ CREATE TABLE table_to_distribute (
|
|||
);
|
||||
|
||||
-- use an index instead of table name
|
||||
SELECT master_create_distributed_table('table_to_distribute_pkey', 'id', 'hash');
|
||||
SELECT create_distributed_table('table_to_distribute_pkey', 'id', 'hash');
|
||||
|
||||
-- use a bad column name
|
||||
SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'hash');
|
||||
SELECT create_distributed_table('table_to_distribute', 'bad_column', 'hash');
|
||||
|
||||
-- use unrecognized partition type
|
||||
SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized');
|
||||
SELECT create_distributed_table('table_to_distribute', 'name', 'unrecognized');
|
||||
|
||||
-- use a partition column of a type lacking any default operator class
|
||||
SELECT master_create_distributed_table('table_to_distribute', 'json_data', 'hash');
|
||||
SELECT create_distributed_table('table_to_distribute', 'json_data', 'hash');
|
||||
|
||||
-- use a partition column of type lacking the required support function (hash)
|
||||
SELECT master_create_distributed_table('table_to_distribute', 'test_type_data', 'hash');
|
||||
SELECT create_distributed_table('table_to_distribute', 'test_type_data', 'hash');
|
||||
|
||||
-- distribute table and inspect side effects
|
||||
SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash');
|
||||
SELECT create_distributed_table('table_to_distribute', 'name', 'hash');
|
||||
SELECT partmethod, partkey FROM pg_dist_partition
|
||||
WHERE logicalrelid = 'table_to_distribute'::regclass;
|
||||
|
||||
-- use a bad shard count
|
||||
SELECT master_create_worker_shards('table_to_distribute', 0, 1);
|
||||
SELECT create_worker_shards('table_to_distribute', 0, 1);
|
||||
|
||||
-- use a bad replication factor
|
||||
SELECT master_create_worker_shards('table_to_distribute', 16, 0);
|
||||
SELECT create_worker_shards('table_to_distribute', 16, 0);
|
||||
|
||||
-- use a replication factor higher than shard count
|
||||
SELECT master_create_worker_shards('table_to_distribute', 16, 3);
|
||||
SELECT create_worker_shards('table_to_distribute', 16, 3);
|
||||
|
||||
-- finally, create shards and inspect metadata
|
||||
SELECT master_create_worker_shards('table_to_distribute', 16, 1);
|
||||
SELECT create_worker_shards('table_to_distribute', 16, 1);
|
||||
|
||||
SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard
|
||||
WHERE logicalrelid = 'table_to_distribute'::regclass
|
||||
|
@ -93,7 +93,7 @@ SELECT count(*) AS shard_count,
|
|||
SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relkind = 'r';
|
||||
|
||||
-- try to create them again
|
||||
SELECT master_create_worker_shards('table_to_distribute', 16, 1);
|
||||
SELECT create_worker_shards('table_to_distribute', 16, 1);
|
||||
|
||||
-- test list sorting
|
||||
SELECT sort_names('sumedh', 'jason', 'ozgun');
|
||||
|
@ -108,8 +108,8 @@ CREATE FOREIGN TABLE foreign_table_to_distribute
|
|||
)
|
||||
SERVER fake_fdw_server;
|
||||
|
||||
SELECT master_create_distributed_table('foreign_table_to_distribute', 'id', 'hash');
|
||||
SELECT master_create_worker_shards('foreign_table_to_distribute', 16, 1);
|
||||
SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash');
|
||||
SELECT create_worker_shards('foreign_table_to_distribute', 16, 1);
|
||||
|
||||
SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard
|
||||
WHERE logicalrelid = 'foreign_table_to_distribute'::regclass
|
||||
|
@ -122,8 +122,8 @@ CREATE TABLE weird_shard_count
|
|||
id bigint
|
||||
);
|
||||
|
||||
SELECT master_create_distributed_table('weird_shard_count', 'id', 'hash');
|
||||
SELECT master_create_worker_shards('weird_shard_count', 7, 1);
|
||||
SELECT create_distributed_table('weird_shard_count', 'id', 'hash');
|
||||
SELECT create_worker_shards('weird_shard_count', 7, 1);
|
||||
|
||||
-- Citus ensures all shards are roughly the same size
|
||||
SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size
|
||||
|
|
|
@ -23,7 +23,7 @@ CREATE TABLE lineitem (
|
|||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null,
|
||||
PRIMARY KEY(l_orderkey, l_linenumber) );
|
||||
SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append');
|
||||
SELECT create_distributed_table('lineitem', 'l_orderkey', 'append');
|
||||
|
||||
CREATE INDEX lineitem_time_index ON lineitem (l_shipdate);
|
||||
|
||||
|
@ -38,7 +38,7 @@ CREATE TABLE orders (
|
|||
o_shippriority integer not null,
|
||||
o_comment varchar(79) not null,
|
||||
PRIMARY KEY(o_orderkey) );
|
||||
SELECT master_create_distributed_table('orders', 'o_orderkey', 'append');
|
||||
SELECT create_distributed_table('orders', 'o_orderkey', 'append');
|
||||
|
||||
CREATE TABLE customer (
|
||||
c_custkey integer not null,
|
||||
|
@ -49,14 +49,14 @@ CREATE TABLE customer (
|
|||
c_acctbal decimal(15,2) not null,
|
||||
c_mktsegment char(10) not null,
|
||||
c_comment varchar(117) not null);
|
||||
SELECT master_create_distributed_table('customer', 'c_custkey', 'append');
|
||||
SELECT create_distributed_table('customer', 'c_custkey', 'append');
|
||||
|
||||
CREATE TABLE nation (
|
||||
n_nationkey integer not null,
|
||||
n_name char(25) not null,
|
||||
n_regionkey integer not null,
|
||||
n_comment varchar(152));
|
||||
SELECT master_create_distributed_table('nation', 'n_nationkey', 'append');
|
||||
SELECT create_distributed_table('nation', 'n_nationkey', 'append');
|
||||
|
||||
CREATE TABLE part (
|
||||
p_partkey integer not null,
|
||||
|
@ -68,7 +68,7 @@ CREATE TABLE part (
|
|||
p_container char(10) not null,
|
||||
p_retailprice decimal(15,2) not null,
|
||||
p_comment varchar(23) not null);
|
||||
SELECT master_create_distributed_table('part', 'p_partkey', 'append');
|
||||
SELECT create_distributed_table('part', 'p_partkey', 'append');
|
||||
|
||||
CREATE TABLE supplier
|
||||
(
|
||||
|
@ -80,7 +80,7 @@ CREATE TABLE supplier
|
|||
s_acctbal decimal(15,2) not null,
|
||||
s_comment varchar(101) not null
|
||||
);
|
||||
SELECT master_create_distributed_table('supplier', 's_suppkey', 'append');
|
||||
SELECT create_distributed_table('supplier', 's_suppkey', 'append');
|
||||
|
||||
|
||||
-- now test that Citus cannot distribute unique constraints that do not include
|
||||
|
@ -90,14 +90,14 @@ CREATE TABLE primary_key_on_non_part_col
|
|||
partition_col integer,
|
||||
other_col integer PRIMARY KEY
|
||||
);
|
||||
SELECT master_create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash');
|
||||
SELECT create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash');
|
||||
|
||||
CREATE TABLE unique_const_on_non_part_col
|
||||
(
|
||||
partition_col integer,
|
||||
other_col integer UNIQUE
|
||||
);
|
||||
SELECT master_create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash');
|
||||
SELECT create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash');
|
||||
|
||||
-- now show that Citus can distribute unique constrints that include
|
||||
-- the partition column
|
||||
|
@ -106,14 +106,14 @@ CREATE TABLE primary_key_on_part_col
|
|||
partition_col integer PRIMARY KEY,
|
||||
other_col integer
|
||||
);
|
||||
SELECT master_create_distributed_table('primary_key_on_part_col', 'partition_col', 'hash');
|
||||
SELECT create_distributed_table('primary_key_on_part_col', 'partition_col', 'hash');
|
||||
|
||||
CREATE TABLE unique_const_on_part_col
|
||||
(
|
||||
partition_col integer UNIQUE,
|
||||
other_col integer
|
||||
);
|
||||
SELECT master_create_distributed_table('unique_const_on_part_col', 'partition_col', 'hash');
|
||||
SELECT create_distributed_table('unique_const_on_part_col', 'partition_col', 'hash');
|
||||
|
||||
CREATE TABLE unique_const_on_two_columns
|
||||
(
|
||||
|
@ -121,21 +121,21 @@ CREATE TABLE unique_const_on_two_columns
|
|||
other_col integer,
|
||||
UNIQUE (partition_col, other_col)
|
||||
);
|
||||
SELECT master_create_distributed_table('unique_const_on_two_columns', 'partition_col', 'hash');
|
||||
SELECT create_distributed_table('unique_const_on_two_columns', 'partition_col', 'hash');
|
||||
|
||||
CREATE TABLE unique_const_append_partitioned_tables
|
||||
(
|
||||
partition_col integer UNIQUE,
|
||||
other_col integer
|
||||
);
|
||||
SELECT master_create_distributed_table('unique_const_append_partitioned_tables', 'partition_col', 'append');
|
||||
SELECT create_distributed_table('unique_const_append_partitioned_tables', 'partition_col', 'append');
|
||||
|
||||
CREATE TABLE unique_const_range_partitioned_tables
|
||||
(
|
||||
partition_col integer UNIQUE,
|
||||
other_col integer
|
||||
);
|
||||
SELECT master_create_distributed_table('unique_const_range_partitioned_tables', 'partition_col', 'range');
|
||||
SELECT create_distributed_table('unique_const_range_partitioned_tables', 'partition_col', 'range');
|
||||
|
||||
-- drop unnecessary tables
|
||||
DROP TABLE primary_key_on_non_part_col, unique_const_on_non_part_col CASCADE;
|
||||
|
|
|
@ -54,9 +54,9 @@ CREATE TABLE composite_type_partitioned_table
|
|||
col test_composite_type
|
||||
);
|
||||
|
||||
SELECT master_create_distributed_table('composite_type_partitioned_table', 'col', 'hash');
|
||||
SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash');
|
||||
|
||||
SELECT master_create_worker_shards('composite_type_partitioned_table', 4, 1);
|
||||
SELECT create_worker_shards('composite_type_partitioned_table', 4, 1);
|
||||
|
||||
-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table
|
||||
INSERT INTO composite_type_partitioned_table VALUES (1, '(1, 2)'::test_composite_type);
|
||||
|
@ -80,9 +80,9 @@ CREATE TABLE bugs (
|
|||
status bug_status
|
||||
);
|
||||
|
||||
SELECT master_create_distributed_table('bugs', 'status', 'hash');
|
||||
SELECT create_distributed_table('bugs', 'status', 'hash');
|
||||
|
||||
SELECT master_create_worker_shards('bugs', 4, 1);
|
||||
SELECT create_worker_shards('bugs', 4, 1);
|
||||
|
||||
-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table
|
||||
INSERT INTO bugs VALUES (1, 'new');
|
||||
|
@ -104,8 +104,8 @@ CREATE TABLE varchar_hash_partitioned_table
|
|||
name varchar
|
||||
);
|
||||
|
||||
SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash');
|
||||
SELECT master_create_worker_shards('varchar_hash_partitioned_table', 4, 1);
|
||||
SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash');
|
||||
SELECT create_worker_shards('varchar_hash_partitioned_table', 4, 1);
|
||||
|
||||
-- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table
|
||||
INSERT INTO varchar_hash_partitioned_table VALUES (1, 'Jason');
|
||||
|
|
|
@ -28,7 +28,7 @@ CREATE FOREIGN TABLE lineitem (
|
|||
l_comment varchar(44) not null)
|
||||
SERVER file_server
|
||||
OPTIONS (format 'text', filename '', delimiter '|', null '');
|
||||
SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append');
|
||||
SELECT create_distributed_table('lineitem', 'l_orderkey', 'append');
|
||||
|
||||
CREATE FOREIGN TABLE orders (
|
||||
o_orderkey bigint not null,
|
||||
|
@ -42,7 +42,7 @@ CREATE FOREIGN TABLE orders (
|
|||
o_comment varchar(79) not null)
|
||||
SERVER file_server
|
||||
OPTIONS (format 'text', filename '', delimiter '|', null '');
|
||||
SELECT master_create_distributed_table('orders', 'o_orderkey', 'append');
|
||||
SELECT create_distributed_table('orders', 'o_orderkey', 'append');
|
||||
|
||||
CREATE FOREIGN TABLE customer (
|
||||
c_custkey integer not null,
|
||||
|
@ -55,7 +55,7 @@ CREATE FOREIGN TABLE customer (
|
|||
c_comment varchar(117) not null)
|
||||
SERVER file_server
|
||||
OPTIONS (format 'text', filename '', delimiter '|', null '');
|
||||
SELECT master_create_distributed_table('customer', 'c_custkey', 'append');
|
||||
SELECT create_distributed_table('customer', 'c_custkey', 'append');
|
||||
|
||||
CREATE FOREIGN TABLE nation (
|
||||
n_nationkey integer not null,
|
||||
|
@ -64,7 +64,7 @@ CREATE FOREIGN TABLE nation (
|
|||
n_comment varchar(152))
|
||||
SERVER file_server
|
||||
OPTIONS (format 'text', filename '', delimiter '|', null '');
|
||||
SELECT master_create_distributed_table('nation', 'n_nationkey', 'append');
|
||||
SELECT create_distributed_table('nation', 'n_nationkey', 'append');
|
||||
|
||||
CREATE FOREIGN TABLE part (
|
||||
p_partkey integer not null,
|
||||
|
@ -78,4 +78,4 @@ CREATE FOREIGN TABLE part (
|
|||
p_comment varchar(23) not null)
|
||||
SERVER file_server
|
||||
OPTIONS (format 'text', filename '', delimiter '|', null '');
|
||||
SELECT master_create_distributed_table('part', 'p_partkey', 'append');
|
||||
SELECT create_distributed_table('part', 'p_partkey', 'append');
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
-- Tests that check the metadata returned by the master node.
|
||||
|
||||
SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
||||
part_placement_policy FROM master_get_table_metadata('lineitem');
|
||||
part_placement_policy FROM get_table_metadata('lineitem');
|
||||
|
||||
SELECT * FROM master_get_table_ddl_events('lineitem');
|
||||
SELECT * FROM get_table_ddl_events('lineitem');
|
||||
|
||||
SELECT * FROM master_get_new_shardid();
|
||||
SELECT * FROM get_new_shardid();
|
||||
|
||||
SELECT node_name FROM master_get_local_first_candidate_nodes();
|
||||
SELECT node_name FROM get_local_first_candidate_nodes();
|
||||
|
|
|
@ -19,7 +19,7 @@ CREATE TABLE orders_hash_partitioned (
|
|||
o_clerk char(15),
|
||||
o_shippriority integer,
|
||||
o_comment varchar(79) );
|
||||
SELECT master_create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'append');
|
||||
SELECT create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'append');
|
||||
|
||||
UPDATE pg_dist_partition SET partmethod = 'h'
|
||||
WHERE logicalrelid = 'orders_hash_partitioned'::regclass;
|
||||
|
|
|
@ -12,18 +12,18 @@
|
|||
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102080;
|
||||
|
||||
CREATE TABLE index_test_range(a int, b int, c int);
|
||||
SELECT master_create_distributed_table('index_test_range', 'a', 'range');
|
||||
SELECT master_create_empty_shard('index_test_range');
|
||||
SELECT master_create_empty_shard('index_test_range');
|
||||
SELECT create_distributed_table('index_test_range', 'a', 'range');
|
||||
SELECT create_empty_shard('index_test_range');
|
||||
SELECT create_empty_shard('index_test_range');
|
||||
|
||||
CREATE TABLE index_test_hash(a int, b int, c int);
|
||||
SELECT master_create_distributed_table('index_test_hash', 'a', 'hash');
|
||||
SELECT master_create_worker_shards('index_test_hash', 8, 2);
|
||||
SELECT create_distributed_table('index_test_hash', 'a', 'hash');
|
||||
SELECT create_worker_shards('index_test_hash', 8, 2);
|
||||
|
||||
CREATE TABLE index_test_append(a int, b int, c int);
|
||||
SELECT master_create_distributed_table('index_test_append', 'a', 'append');
|
||||
SELECT master_create_empty_shard('index_test_append');
|
||||
SELECT master_create_empty_shard('index_test_append');
|
||||
SELECT create_distributed_table('index_test_append', 'a', 'append');
|
||||
SELECT create_empty_shard('index_test_append');
|
||||
SELECT create_empty_shard('index_test_append');
|
||||
|
||||
--
|
||||
-- CREATE INDEX
|
||||
|
@ -53,7 +53,7 @@ SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class
|
|||
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%';
|
||||
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%';
|
||||
SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%';
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
|
||||
-- Verify that we error out on unsupported statement types
|
||||
|
||||
|
@ -112,7 +112,7 @@ SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
|
|||
\c - - - :worker_1_port
|
||||
SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%';
|
||||
SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname;
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
|
||||
-- Drop created tables
|
||||
DROP TABLE index_test_range;
|
||||
|
|
|
@ -5,16 +5,16 @@
|
|||
-- Tests that check the metadata returned by the master node.
|
||||
|
||||
SELECT part_storage_type, part_key, part_replica_count, part_max_size,
|
||||
part_placement_policy FROM master_get_table_metadata('lineitem');
|
||||
part_placement_policy FROM get_table_metadata('lineitem');
|
||||
|
||||
SELECT * FROM master_get_table_ddl_events('lineitem');
|
||||
SELECT * FROM get_table_ddl_events('lineitem');
|
||||
|
||||
SELECT * FROM master_get_new_shardid();
|
||||
SELECT * FROM get_new_shardid();
|
||||
|
||||
SELECT * FROM master_get_local_first_candidate_nodes();
|
||||
SELECT * FROM get_local_first_candidate_nodes();
|
||||
|
||||
SELECT * FROM master_get_round_robin_candidate_nodes(1);
|
||||
SELECT * FROM get_round_robin_candidate_nodes(1);
|
||||
|
||||
SELECT * FROM master_get_round_robin_candidate_nodes(2);
|
||||
SELECT * FROM get_round_robin_candidate_nodes(2);
|
||||
|
||||
SELECT * FROM master_get_active_worker_nodes();
|
||||
SELECT * FROM get_active_worker_nodes();
|
||||
|
|
|
@ -17,36 +17,36 @@ CREATE TABLE insufficient_shards ( LIKE limit_orders );
|
|||
CREATE TABLE range_partitioned ( LIKE limit_orders );
|
||||
CREATE TABLE append_partitioned ( LIKE limit_orders );
|
||||
|
||||
SELECT master_create_distributed_table('limit_orders', 'id', 'hash');
|
||||
SELECT master_create_distributed_table('insufficient_shards', 'id', 'hash');
|
||||
SELECT master_create_distributed_table('range_partitioned', 'id', 'range');
|
||||
SELECT master_create_distributed_table('append_partitioned', 'id', 'append');
|
||||
SELECT create_distributed_table('limit_orders', 'id', 'hash');
|
||||
SELECT create_distributed_table('insufficient_shards', 'id', 'hash');
|
||||
SELECT create_distributed_table('range_partitioned', 'id', 'range');
|
||||
SELECT create_distributed_table('append_partitioned', 'id', 'append');
|
||||
|
||||
SELECT master_create_worker_shards('limit_orders', 2, 2);
|
||||
SELECT create_worker_shards('limit_orders', 2, 2);
|
||||
|
||||
-- make a single shard that covers no partition values
|
||||
SELECT master_create_worker_shards('insufficient_shards', 1, 1);
|
||||
SELECT create_worker_shards('insufficient_shards', 1, 1);
|
||||
UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0
|
||||
WHERE logicalrelid = 'insufficient_shards'::regclass;
|
||||
|
||||
-- create range-partitioned shards
|
||||
SELECT master_create_empty_shard('range_partitioned') AS new_shard_id
|
||||
SELECT create_empty_shard('range_partitioned') AS new_shard_id
|
||||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 49999
|
||||
WHERE shardid = :new_shard_id;
|
||||
|
||||
SELECT master_create_empty_shard('range_partitioned') AS new_shard_id
|
||||
SELECT create_empty_shard('range_partitioned') AS new_shard_id
|
||||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 50000, shardmaxvalue = 99999
|
||||
WHERE shardid = :new_shard_id;
|
||||
|
||||
-- create append-partitioned shards
|
||||
SELECT master_create_empty_shard('append_partitioned') AS new_shard_id
|
||||
SELECT create_empty_shard('append_partitioned') AS new_shard_id
|
||||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 500000
|
||||
WHERE shardid = :new_shard_id;
|
||||
|
||||
SELECT master_create_empty_shard('append_partitioned') AS new_shard_id
|
||||
SELECT create_empty_shard('append_partitioned') AS new_shard_id
|
||||
\gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = 500000, shardmaxvalue = 1000000
|
||||
WHERE shardid = :new_shard_id;
|
||||
|
|
|
@ -36,7 +36,7 @@ CREATE TABLE varchar_partitioned_table
|
|||
(
|
||||
varchar_column varchar(100)
|
||||
);
|
||||
SELECT master_create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append');
|
||||
SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append');
|
||||
|
||||
-- Create logical shards and shard placements with shardid 100,101
|
||||
|
||||
|
@ -65,7 +65,7 @@ CREATE TABLE array_partitioned_table
|
|||
(
|
||||
array_column text[]
|
||||
);
|
||||
SELECT master_create_distributed_table('array_partitioned_table', 'array_column', 'append');
|
||||
SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append');
|
||||
SET client_min_messages TO DEBUG2;
|
||||
|
||||
-- Create logical shard with shardid 102, 103
|
||||
|
@ -103,7 +103,7 @@ CREATE TABLE composite_partitioned_table
|
|||
(
|
||||
composite_column composite_type
|
||||
);
|
||||
SELECT master_create_distributed_table('composite_partitioned_table', 'composite_column', 'append');
|
||||
SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append');
|
||||
SET client_min_messages TO DEBUG2;
|
||||
|
||||
-- Create logical shard with shardid 104, 105
|
||||
|
|
|
@ -11,10 +11,10 @@ CREATE INDEX ON customer_engagements (created_at);
|
|||
CREATE INDEX ON customer_engagements (event_data);
|
||||
|
||||
-- distribute the table
|
||||
SELECT master_create_distributed_table('customer_engagements', 'id', 'hash');
|
||||
SELECT create_distributed_table('customer_engagements', 'id', 'hash');
|
||||
|
||||
-- create a single shard on the first worker
|
||||
SELECT master_create_worker_shards('customer_engagements', 1, 2);
|
||||
SELECT create_worker_shards('customer_engagements', 1, 2);
|
||||
|
||||
-- ingest some data for the tests
|
||||
INSERT INTO customer_engagements VALUES (1, '01-01-2015', 'first event');
|
||||
|
@ -25,7 +25,7 @@ INSERT INTO customer_engagements VALUES (1, '03-01-2015', 'third event');
|
|||
-- (i) create a new shard
|
||||
-- (ii) mark the second shard placements as unhealthy
|
||||
-- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones
|
||||
-- (iv) do a successful master_copy_shard_placement from the first placement to the second
|
||||
-- (iv) do a successful copy_shard_placement from the first placement to the second
|
||||
-- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement
|
||||
|
||||
-- get the newshardid
|
||||
|
@ -39,13 +39,13 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = :newshardid AN
|
|||
INSERT INTO pg_dist_shard_placement (nodename, nodeport, shardid, shardstate, shardlength)
|
||||
VALUES ('dummyhost', :worker_2_port, :newshardid, 1, 0);
|
||||
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'dummyhost', :worker_2_port);
|
||||
SELECT copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'dummyhost', :worker_2_port);
|
||||
|
||||
-- also try to copy from an inactive placement
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
|
||||
SELECT copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port);
|
||||
|
||||
-- "copy" this shard from the first placement to the second one
|
||||
SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
|
||||
-- now, update first placement as unhealthy (and raise a notice) so that queries are not routed to there
|
||||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = :newshardid AND nodeport = :worker_1_port;
|
||||
|
@ -61,10 +61,10 @@ CREATE FOREIGN TABLE remote_engagements (
|
|||
) SERVER fake_fdw_server;
|
||||
|
||||
-- distribute the table
|
||||
SELECT master_create_distributed_table('remote_engagements', 'id', 'hash');
|
||||
SELECT create_distributed_table('remote_engagements', 'id', 'hash');
|
||||
|
||||
-- create a single shard on the first worker
|
||||
SELECT master_create_worker_shards('remote_engagements', 1, 2);
|
||||
SELECT create_worker_shards('remote_engagements', 1, 2);
|
||||
|
||||
-- get the newshardid
|
||||
SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remote_engagements'::regclass
|
||||
|
@ -74,4 +74,4 @@ SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remo
|
|||
UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = :remotenewshardid AND nodeport = :worker_2_port;
|
||||
|
||||
-- oops! we don't support repairing shards backed by foreign tables
|
||||
SELECT master_copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
SELECT copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
--
|
||||
|
||||
-- Create UDF in master and workers
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
DROP FUNCTION IF EXISTS median(double precision[]);
|
||||
|
||||
CREATE FUNCTION median(double precision[]) RETURNS double precision
|
||||
|
@ -37,7 +37,7 @@ LANGUAGE sql IMMUTABLE AS $_$
|
|||
$_$;
|
||||
|
||||
-- Run query on master
|
||||
\c - - - :master_port
|
||||
\c - - - :port
|
||||
|
||||
SET citus.task_executor_type TO 'task-tracker';
|
||||
|
||||
|
|
|
@ -15,15 +15,15 @@ CREATE TABLE authors ( name text, id bigint );
|
|||
-- this table is used in router executor tests
|
||||
CREATE TABLE articles_single_shard (LIKE articles);
|
||||
|
||||
SELECT master_create_distributed_table('articles', 'author_id', 'hash');
|
||||
SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash');
|
||||
SELECT create_distributed_table('articles', 'author_id', 'hash');
|
||||
SELECT create_distributed_table('articles_single_shard', 'author_id', 'hash');
|
||||
|
||||
|
||||
-- test when a table is distributed but no shards created yet
|
||||
SELECT count(*) from articles;
|
||||
|
||||
SELECT master_create_worker_shards('articles', 2, 1);
|
||||
SELECT master_create_worker_shards('articles_single_shard', 1, 1);
|
||||
SELECT create_worker_shards('articles', 2, 1);
|
||||
SELECT create_worker_shards('articles_single_shard', 1, 1);
|
||||
|
||||
-- create a bunch of test data
|
||||
INSERT INTO articles VALUES ( 1, 1, 'arsenous', 9572);
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
-- Tests around changing the schema and dropping of a distributed table
|
||||
|
||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||
|
||||
-- verify that the citus extension can't be dropped while distributed tables exist
|
||||
DROP EXTENSION citus;
|
||||
|
@ -25,8 +25,8 @@ DROP TABLE testtableddl;
|
|||
|
||||
-- verify that the table can dropped even if shards exist
|
||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||
SELECT 1 FROM master_create_empty_shard('testtableddl');
|
||||
SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||
SELECT 1 FROM create_empty_shard('testtableddl');
|
||||
DROP TABLE testtableddl;
|
||||
|
||||
-- ensure no metadata of distributed tables are remaining
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
-- and check that tasks are assigned to worker nodes as expected.
|
||||
|
||||
CREATE TABLE task_assignment_test_table (test_id integer);
|
||||
SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append');
|
||||
SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append');
|
||||
|
||||
-- Create logical shards with shardids 200, 201, and 202
|
||||
|
||||
|
|
|
@ -10,8 +10,8 @@ CREATE TABLE upsert_test
|
|||
);
|
||||
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash');
|
||||
SELECT master_create_worker_shards('upsert_test', '4', '2');
|
||||
SELECT create_distributed_table('upsert_test', 'part_key', 'hash');
|
||||
SELECT create_worker_shards('upsert_test', '4', '2');
|
||||
|
||||
-- do a regular insert
|
||||
INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1);
|
||||
|
@ -90,8 +90,8 @@ CREATE TABLE upsert_test_2
|
|||
);
|
||||
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash');
|
||||
SELECT master_create_worker_shards('upsert_test_2', '4', '2');
|
||||
SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash');
|
||||
SELECT create_worker_shards('upsert_test_2', '4', '2');
|
||||
|
||||
-- now show that Citus works with multiple columns as the PRIMARY KEY, including the partiton key
|
||||
INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1);
|
||||
|
@ -111,8 +111,8 @@ CREATE TABLE upsert_test_3
|
|||
CREATE INDEX idx_ups_test ON upsert_test_3(part_key);
|
||||
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash');
|
||||
SELECT master_create_worker_shards('upsert_test_3', '4', '2');
|
||||
SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash');
|
||||
SELECT create_worker_shards('upsert_test_3', '4', '2');
|
||||
|
||||
-- since there are no unique indexes, error-out
|
||||
INSERT INTO upsert_test_3 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_3.count + 1;
|
||||
|
@ -125,8 +125,8 @@ CREATE TABLE upsert_test_4
|
|||
);
|
||||
|
||||
-- distribute the table and create shards
|
||||
SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash');
|
||||
SELECT master_create_worker_shards('upsert_test_4', '4', '2');
|
||||
SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash');
|
||||
SELECT create_worker_shards('upsert_test_4', '4', '2');
|
||||
|
||||
-- a single row insert
|
||||
INSERT INTO upsert_test_4 VALUES (1, 0);
|
||||
|
@ -144,8 +144,8 @@ SELECT * FROM upsert_test_4;
|
|||
|
||||
-- now test dropped columns
|
||||
CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float);
|
||||
SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash');
|
||||
SELECT master_create_worker_shards('dropcol_distributed', 4, 1);
|
||||
SELECT create_distributed_table('dropcol_distributed', 'key', 'hash');
|
||||
SELECT create_worker_shards('dropcol_distributed', 4, 1);
|
||||
|
||||
INSERT INTO dropcol_distributed AS dropcol (key, keep1, keep2) VALUES (1, '5', 5) ON CONFLICT(key)
|
||||
DO UPDATE SET keep1 = dropcol.keep1;
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
-- ===================================================================
|
||||
|
||||
CREATE TABLE sharded_table ( name text, id bigint );
|
||||
SELECT master_create_distributed_table('sharded_table', 'id', 'hash');
|
||||
SELECT master_create_worker_shards('sharded_table', 2, 1);
|
||||
SELECT create_distributed_table('sharded_table', 'id', 'hash');
|
||||
SELECT create_worker_shards('sharded_table', 2, 1);
|
||||
|
||||
-- COPY out is supported with distributed tables
|
||||
COPY sharded_table TO STDOUT;
|
||||
|
@ -37,10 +37,10 @@ EXECUTE sharded_delete;
|
|||
EXECUTE sharded_query;
|
||||
|
||||
-- try to drop shards with where clause
|
||||
SELECT master_apply_delete_command('DELETE FROM sharded_table WHERE id > 0');
|
||||
SELECT apply_delete_command('DELETE FROM sharded_table WHERE id > 0');
|
||||
|
||||
-- drop all shards
|
||||
SELECT master_apply_delete_command('DELETE FROM sharded_table');
|
||||
SELECT apply_delete_command('DELETE FROM sharded_table');
|
||||
|
||||
-- drop table
|
||||
DROP TABLE sharded_table;
|
||||
|
|
Loading…
Reference in New Issue