From 2c73c7695a269b804bdbd1001ac621e134aa64f6 Mon Sep 17 00:00:00 2001 From: Marco Slot Date: Fri, 1 Apr 2016 19:43:14 +0200 Subject: [PATCH] Make Citus masterless --- src/backend/distributed/citus.sql | 88 +++++++++---------- .../commands/create_distributed_table.c | 6 +- .../distributed/master/master_create_shards.c | 10 +-- .../master/master_delete_protocol.c | 16 ++-- .../distributed/master/master_node_protocol.c | 36 ++++---- .../distributed/master/master_repair_shards.c | 6 +- .../master/master_stage_protocol.c | 16 ++-- .../distributed/planner/modify_planner.c | 2 +- src/backend/distributed/shared_library_init.c | 2 +- .../distributed/test/distribution_metadata.c | 2 +- .../distributed/utils/metadata_cache.c | 12 +-- src/bin/csql/stage.h | 10 +-- src/include/distributed/master_protocol.h | 24 ++--- src/include/distributed/worker_protocol.h | 2 +- .../multi_binary_master_copy_format.out | 2 +- .../expected/multi_create_insert_proxy.out | 12 +-- .../regress/expected/multi_create_shards.out | 54 ++++++------ .../regress/expected/multi_create_table.out | 70 +++++++-------- .../regress/expected/multi_data_types.out | 36 ++++---- .../expected/multi_fdw_create_table.out | 30 +++---- .../expected/multi_fdw_master_protocol.out | 16 ++-- .../regress/expected/multi_hash_pruning.out | 6 +- .../expected/multi_index_statements.out | 60 ++++++------- .../expected/multi_master_protocol.out | 22 ++--- .../regress/expected/multi_modifications.out | 44 +++++----- .../expected/multi_partition_pruning.out | 18 ++-- .../regress/expected/multi_repair_shards.out | 38 ++++---- .../multi_repartitioned_subquery_udf.out | 4 +- .../regress/expected/multi_simple_queries.out | 24 ++--- src/test/regress/expected/multi_table_ddl.out | 16 ++-- .../expected/multi_task_assignment_policy.out | 6 +- src/test/regress/expected/multi_upsert.out | 60 ++++++------- src/test/regress/expected/multi_upsert_0.out | 40 ++++----- src/test/regress/expected/multi_utilities.out | 22 ++--- .../regress/input/multi_agg_distinct.source | 2 +- .../input/multi_agg_type_conversion.source | 2 +- .../input/multi_alter_table_statements.source | 10 +-- .../input/multi_append_table_to_shard.source | 16 ++-- .../regress/input/multi_create_schema.source | 2 +- .../input/multi_master_delete_protocol.source | 20 ++--- .../regress/input/multi_outer_join.source | 10 +-- .../regress/input/multi_stage_data.source | 2 +- .../input/multi_stage_large_records.source | 2 +- src/test/regress/input/multi_subquery.source | 4 +- .../regress/output/multi_agg_distinct.source | 6 +- .../output/multi_agg_type_conversion.source | 6 +- .../multi_alter_table_statements.source | 20 ++--- .../output/multi_append_table_to_shard.source | 48 +++++----- .../regress/output/multi_create_schema.source | 6 +- .../multi_master_delete_protocol.source | 62 ++++++------- .../regress/output/multi_outer_join.source | 34 +++---- .../regress/output/multi_stage_data.source | 6 +- .../output/multi_stage_large_records.source | 6 +- src/test/regress/output/multi_subquery.source | 12 +-- src/test/regress/pg_regress_multi.pl | 2 +- .../sql/multi_binary_master_copy_format.sql | 2 +- .../regress/sql/multi_create_insert_proxy.sql | 4 +- src/test/regress/sql/multi_create_shards.sql | 30 +++---- src/test/regress/sql/multi_create_table.sql | 26 +++--- src/test/regress/sql/multi_data_types.sql | 12 +-- .../regress/sql/multi_fdw_create_table.sql | 10 +-- .../regress/sql/multi_fdw_master_protocol.sql | 8 +- src/test/regress/sql/multi_hash_pruning.sql | 2 +- .../regress/sql/multi_index_statements.sql | 20 ++--- .../regress/sql/multi_master_protocol.sql | 14 +-- src/test/regress/sql/multi_modifications.sql | 20 ++--- .../regress/sql/multi_partition_pruning.sql | 6 +- src/test/regress/sql/multi_repair_shards.sql | 18 ++-- .../sql/multi_repartitioned_subquery_udf.sql | 4 +- src/test/regress/sql/multi_simple_queries.sql | 8 +- src/test/regress/sql/multi_table_ddl.sql | 6 +- .../sql/multi_task_assignment_policy.sql | 2 +- src/test/regress/sql/multi_upsert.sql | 20 ++--- src/test/regress/sql/multi_utilities.sql | 8 +- 74 files changed, 655 insertions(+), 655 deletions(-) diff --git a/src/backend/distributed/citus.sql b/src/backend/distributed/citus.sql index 9fb23bb7f..0c44de4fa 100644 --- a/src/backend/distributed/citus.sql +++ b/src/backend/distributed/citus.sql @@ -100,7 +100,7 @@ SET search_path = 'pg_catalog'; /* master_* functions */ -CREATE FUNCTION master_get_table_metadata(relation_name text, OUT logical_relid oid, +CREATE FUNCTION get_table_metadata(relation_name text, OUT logical_relid oid, OUT part_storage_type "char", OUT part_method "char", OUT part_key text, OUT part_replica_count integer, @@ -108,91 +108,91 @@ CREATE FUNCTION master_get_table_metadata(relation_name text, OUT logical_relid OUT part_placement_policy integer) RETURNS record LANGUAGE C STABLE STRICT - AS 'MODULE_PATHNAME', $$master_get_table_metadata$$; -COMMENT ON FUNCTION master_get_table_metadata(relation_name text) + AS 'MODULE_PATHNAME', $$get_table_metadata$$; +COMMENT ON FUNCTION get_table_metadata(relation_name text) IS 'fetch metadata values for the table'; -CREATE FUNCTION master_get_table_ddl_events(text) +CREATE FUNCTION get_table_ddl_events(text) RETURNS SETOF text LANGUAGE C STRICT ROWS 100 - AS 'MODULE_PATHNAME', $$master_get_table_ddl_events$$; -COMMENT ON FUNCTION master_get_table_ddl_events(text) + AS 'MODULE_PATHNAME', $$get_table_ddl_events$$; +COMMENT ON FUNCTION get_table_ddl_events(text) IS 'fetch set of ddl statements for the table'; -CREATE FUNCTION master_get_new_shardid() +CREATE FUNCTION get_new_shardid() RETURNS bigint LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$master_get_new_shardid$$; -COMMENT ON FUNCTION master_get_new_shardid() + AS 'MODULE_PATHNAME', $$get_new_shardid$$; +COMMENT ON FUNCTION get_new_shardid() IS 'fetch unique shardId'; -CREATE FUNCTION master_get_local_first_candidate_nodes(OUT node_name text, +CREATE FUNCTION get_local_first_candidate_nodes(OUT node_name text, OUT node_port bigint) RETURNS SETOF record LANGUAGE C STRICT ROWS 100 - AS 'MODULE_PATHNAME', $$master_get_local_first_candidate_nodes$$; -COMMENT ON FUNCTION master_get_local_first_candidate_nodes() + AS 'MODULE_PATHNAME', $$get_local_first_candidate_nodes$$; +COMMENT ON FUNCTION get_local_first_candidate_nodes() IS 'fetch set of candidate nodes for shard uploading choosing the local node first'; -CREATE FUNCTION master_create_empty_shard(text) +CREATE FUNCTION create_empty_shard(text) RETURNS bigint LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$master_create_empty_shard$$; -COMMENT ON FUNCTION master_create_empty_shard(text) + AS 'MODULE_PATHNAME', $$create_empty_shard$$; +COMMENT ON FUNCTION create_empty_shard(text) IS 'create an empty shard and shard placements for the table'; -CREATE FUNCTION master_append_table_to_shard(bigint, text, text, integer) +CREATE FUNCTION append_table_to_shard(bigint, text, text, integer) RETURNS real LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$master_append_table_to_shard$$; -COMMENT ON FUNCTION master_append_table_to_shard(bigint, text, text, integer) + AS 'MODULE_PATHNAME', $$append_table_to_shard$$; +COMMENT ON FUNCTION append_table_to_shard(bigint, text, text, integer) IS 'append given table to all shard placements and update metadata'; -CREATE FUNCTION master_drop_all_shards(logicalrelid regclass, +CREATE FUNCTION drop_all_shards(logicalrelid regclass, schema_name text, table_name text) RETURNS integer LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$master_drop_all_shards$$; -COMMENT ON FUNCTION master_drop_all_shards(regclass, text, text) + AS 'MODULE_PATHNAME', $$drop_all_shards$$; +COMMENT ON FUNCTION drop_all_shards(regclass, text, text) IS 'drop all shards in a relation and update metadata'; -CREATE FUNCTION master_apply_delete_command(text) +CREATE FUNCTION apply_delete_command(text) RETURNS integer LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$master_apply_delete_command$$; -COMMENT ON FUNCTION master_apply_delete_command(text) + AS 'MODULE_PATHNAME', $$apply_delete_command$$; +COMMENT ON FUNCTION apply_delete_command(text) IS 'drop shards matching delete criteria and update metadata'; -CREATE FUNCTION master_get_active_worker_nodes(OUT node_name text, OUT node_port bigint) +CREATE FUNCTION get_active_worker_nodes(OUT node_name text, OUT node_port bigint) RETURNS SETOF record LANGUAGE C STRICT ROWS 100 - AS 'MODULE_PATHNAME', $$master_get_active_worker_nodes$$; -COMMENT ON FUNCTION master_get_active_worker_nodes() + AS 'MODULE_PATHNAME', $$get_active_worker_nodes$$; +COMMENT ON FUNCTION get_active_worker_nodes() IS 'fetch set of active worker nodes'; -CREATE FUNCTION master_get_round_robin_candidate_nodes(shard_id bigint, +CREATE FUNCTION get_round_robin_candidate_nodes(shard_id bigint, OUT node_name text, OUT node_port bigint) RETURNS SETOF record LANGUAGE C STRICT ROWS 100 - AS 'MODULE_PATHNAME', $$master_get_round_robin_candidate_nodes$$; -COMMENT ON FUNCTION master_get_round_robin_candidate_nodes(shard_id bigint) + AS 'MODULE_PATHNAME', $$get_round_robin_candidate_nodes$$; +COMMENT ON FUNCTION get_round_robin_candidate_nodes(shard_id bigint) IS 'fetch set of candidate nodes for shard uploading in round-robin manner'; -CREATE FUNCTION master_create_distributed_table(table_name regclass, +CREATE FUNCTION create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) RETURNS void LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$master_create_distributed_table$$; -COMMENT ON FUNCTION master_create_distributed_table(table_name regclass, + AS 'MODULE_PATHNAME', $$create_distributed_table$$; +COMMENT ON FUNCTION create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) IS 'define the table distribution functions'; -- define shard creation function for hash-partitioned tables -CREATE FUNCTION master_create_worker_shards(table_name text, shard_count integer, +CREATE FUNCTION create_worker_shards(table_name text, shard_count integer, replication_factor integer DEFAULT 2) RETURNS void AS 'MODULE_PATHNAME' @@ -341,7 +341,7 @@ BEGIN END IF; -- ensure all shards are dropped - PERFORM master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name); + PERFORM drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name); -- delete partition entry DELETE FROM pg_dist_partition WHERE logicalrelid = v_obj.objid; @@ -352,18 +352,18 @@ $cdbdt$; COMMENT ON FUNCTION citus_drop_trigger() IS 'perform checks and actions at the end of DROP actions'; -CREATE FUNCTION master_dist_partition_cache_invalidate() +CREATE FUNCTION dist_partition_cache_invalidate() RETURNS trigger LANGUAGE C - AS 'MODULE_PATHNAME', $$master_dist_partition_cache_invalidate$$; -COMMENT ON FUNCTION master_dist_partition_cache_invalidate() + AS 'MODULE_PATHNAME', $$dist_partition_cache_invalidate$$; +COMMENT ON FUNCTION dist_partition_cache_invalidate() IS 'register relcache invalidation for changed rows'; -CREATE FUNCTION master_dist_shard_cache_invalidate() +CREATE FUNCTION dist_shard_cache_invalidate() RETURNS trigger LANGUAGE C - AS 'MODULE_PATHNAME', $$master_dist_shard_cache_invalidate$$; -COMMENT ON FUNCTION master_dist_shard_cache_invalidate() + AS 'MODULE_PATHNAME', $$dist_shard_cache_invalidate$$; +COMMENT ON FUNCTION dist_shard_cache_invalidate() IS 'register relcache invalidation for changed rows'; @@ -388,12 +388,12 @@ CREATE EVENT TRIGGER citus_cascade_to_partition CREATE TRIGGER dist_partition_cache_invalidate AFTER INSERT OR UPDATE OR DELETE ON pg_catalog.pg_dist_partition - FOR EACH ROW EXECUTE PROCEDURE master_dist_partition_cache_invalidate(); + FOR EACH ROW EXECUTE PROCEDURE dist_partition_cache_invalidate(); CREATE TRIGGER dist_shard_cache_invalidate AFTER INSERT OR UPDATE OR DELETE ON pg_catalog.pg_dist_shard - FOR EACH ROW EXECUTE PROCEDURE master_dist_shard_cache_invalidate(); + FOR EACH ROW EXECUTE PROCEDURE dist_shard_cache_invalidate(); /***************************************************************************** @@ -485,7 +485,7 @@ COMMENT ON FUNCTION create_insert_proxy_for_table(regclass, regclass) IS 'create a proxy table that redirects INSERTed rows to a target table'; -- define shard repair function -CREATE FUNCTION master_copy_shard_placement(shard_id bigint, +CREATE FUNCTION copy_shard_placement(shard_id bigint, source_node_name text, source_node_port integer, target_node_name text, diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 917940a76..b87c0d7f2 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -51,11 +51,11 @@ static Oid SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId, /* exports for SQL callable functions */ -PG_FUNCTION_INFO_V1(master_create_distributed_table); +PG_FUNCTION_INFO_V1(create_distributed_table); /* - * master_create_distributed_table accepts a table, distribution column and + * create_distributed_table accepts a table, distribution column and * method and performs the corresponding catalog changes. * * XXX: We should perform more checks here to see if this table is fit for @@ -66,7 +66,7 @@ PG_FUNCTION_INFO_V1(master_create_distributed_table); * preexisting content. */ Datum -master_create_distributed_table(PG_FUNCTION_ARGS) +create_distributed_table(PG_FUNCTION_ARGS) { Oid distributedRelationId = PG_GETARG_OID(0); text *distributionColumnText = PG_GETARG_TEXT_P(1); diff --git a/src/backend/distributed/master/master_create_shards.c b/src/backend/distributed/master/master_create_shards.c index dcf374faf..341a23710 100644 --- a/src/backend/distributed/master/master_create_shards.c +++ b/src/backend/distributed/master/master_create_shards.c @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * master_create_shards.c + * create_shards.c * * This file contains functions to distribute a table by creating shards for it * across a set of worker nodes. @@ -54,11 +54,11 @@ static text * IntegerToText(int32 value); /* declarations for dynamic loading */ -PG_FUNCTION_INFO_V1(master_create_worker_shards); +PG_FUNCTION_INFO_V1(create_worker_shards); /* - * master_create_worker_shards creates empty shards for the given table based + * create_worker_shards creates empty shards for the given table based * on the specified number of initial shards. The function first gets a list of * candidate nodes and issues DDL commands on the nodes to create empty shard * placements on those nodes. The function then updates metadata on the master @@ -67,7 +67,7 @@ PG_FUNCTION_INFO_V1(master_create_worker_shards); * ranges for each shard, giving them an equal split of the hash space. */ Datum -master_create_worker_shards(PG_FUNCTION_ARGS) +create_worker_shards(PG_FUNCTION_ARGS) { text *tableNameText = PG_GETARG_TEXT_P(0); int32 shardCount = PG_GETARG_INT32(1); @@ -163,7 +163,7 @@ master_create_worker_shards(PG_FUNCTION_ARGS) text *maxHashTokenText = NULL; int32 shardMinHashToken = INT32_MIN + (shardIndex * hashTokenIncrement); int32 shardMaxHashToken = shardMinHashToken + (hashTokenIncrement - 1); - Datum shardIdDatum = master_get_new_shardid(NULL); + Datum shardIdDatum = get_new_shardid(NULL); int64 shardId = DatumGetInt64(shardIdDatum); /* if we are at the last shard, make sure the max token value is INT_MAX */ diff --git a/src/backend/distributed/master/master_delete_protocol.c b/src/backend/distributed/master/master_delete_protocol.c index 5b83802eb..06bb2c9c5 100644 --- a/src/backend/distributed/master/master_delete_protocol.c +++ b/src/backend/distributed/master/master_delete_protocol.c @@ -57,12 +57,12 @@ static bool ExecuteRemoteCommand(const char *nodeName, uint32 nodePort, /* exports for SQL callable functions */ -PG_FUNCTION_INFO_V1(master_apply_delete_command); -PG_FUNCTION_INFO_V1(master_drop_all_shards); +PG_FUNCTION_INFO_V1(apply_delete_command); +PG_FUNCTION_INFO_V1(drop_all_shards); /* - * master_apply_delete_command takes in a delete command, finds shards that + * apply_delete_command takes in a delete command, finds shards that * match the criteria defined in the delete command, drops the found shards from * the worker nodes, and updates the corresponding metadata on the master node. * This function drops a shard if and only if all rows in the shard satisfy @@ -75,7 +75,7 @@ PG_FUNCTION_INFO_V1(master_drop_all_shards); * even though related shard placements are not deleted. */ Datum -master_apply_delete_command(PG_FUNCTION_ARGS) +apply_delete_command(PG_FUNCTION_ARGS) { text *queryText = PG_GETARG_TEXT_P(0); char *queryString = text_to_cstring(queryText); @@ -98,7 +98,7 @@ master_apply_delete_command(PG_FUNCTION_ARGS) bool failOK = false; bool isTopLevel = true; - PreventTransactionChain(isTopLevel, "master_apply_delete_command"); + PreventTransactionChain(isTopLevel, "apply_delete_command"); queryTreeNode = ParseTreeNode(queryString); if (!IsA(queryTreeNode, DeleteStmt)) @@ -161,12 +161,12 @@ master_apply_delete_command(PG_FUNCTION_ARGS) /* - * master_drop_shards attempts to drop all shards for a given relation. - * Unlike master_apply_delete_command, this function can be called even + * drop_shards attempts to drop all shards for a given relation. + * Unlike apply_delete_command, this function can be called even * if the table has already been dropped. */ Datum -master_drop_all_shards(PG_FUNCTION_ARGS) +drop_all_shards(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); text *schemaNameText = PG_GETARG_TEXT_P(1); diff --git a/src/backend/distributed/master/master_node_protocol.c b/src/backend/distributed/master/master_node_protocol.c index b36f2503e..d32c29b3d 100644 --- a/src/backend/distributed/master/master_node_protocol.c +++ b/src/backend/distributed/master/master_node_protocol.c @@ -58,22 +58,22 @@ static Datum WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescripto /* exports for SQL callable functions */ -PG_FUNCTION_INFO_V1(master_get_table_metadata); -PG_FUNCTION_INFO_V1(master_get_table_ddl_events); -PG_FUNCTION_INFO_V1(master_get_new_shardid); -PG_FUNCTION_INFO_V1(master_get_local_first_candidate_nodes); -PG_FUNCTION_INFO_V1(master_get_round_robin_candidate_nodes); -PG_FUNCTION_INFO_V1(master_get_active_worker_nodes); +PG_FUNCTION_INFO_V1(get_table_metadata); +PG_FUNCTION_INFO_V1(get_table_ddl_events); +PG_FUNCTION_INFO_V1(get_new_shardid); +PG_FUNCTION_INFO_V1(get_local_first_candidate_nodes); +PG_FUNCTION_INFO_V1(get_round_robin_candidate_nodes); +PG_FUNCTION_INFO_V1(get_active_worker_nodes); /* - * master_get_table_metadata takes in a relation name, and returns partition + * get_table_metadata takes in a relation name, and returns partition * related metadata for the relation. These metadata are grouped and returned in * a tuple, and are used by the caller when creating new shards. The function * errors if given relation does not exist, or is not partitioned. */ Datum -master_get_table_metadata(PG_FUNCTION_ARGS) +get_table_metadata(PG_FUNCTION_ARGS) { text *relationName = PG_GETARG_TEXT_P(0); Oid relationId = ResolveRelationId(relationName); @@ -174,13 +174,13 @@ CStoreTable(Oid relationId) /* - * master_get_table_ddl_events takes in a relation name, and returns the set of + * get_table_ddl_events takes in a relation name, and returns the set of * DDL commands needed to reconstruct the relation. The returned DDL commands * are similar in flavor to schema definitions that pgdump returns. The function * errors if given relation does not exist. */ Datum -master_get_table_ddl_events(PG_FUNCTION_ARGS) +get_table_ddl_events(PG_FUNCTION_ARGS) { FuncCallContext *functionContext = NULL; ListCell *tableDDLEventCell = NULL; @@ -240,7 +240,7 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS) /* - * master_get_new_shardid allocates and returns a unique shardId for the shard + * get_new_shardid allocates and returns a unique shardId for the shard * to be created. This allocation occurs both in shared memory and in write * ahead logs; writing to logs avoids the risk of having shardId collisions. * @@ -249,7 +249,7 @@ master_get_table_ddl_events(PG_FUNCTION_ARGS) * on an internal sequence created in initdb to generate unique identifiers. */ Datum -master_get_new_shardid(PG_FUNCTION_ARGS) +get_new_shardid(PG_FUNCTION_ARGS) { text *sequenceName = cstring_to_text(SHARDID_SEQUENCE_NAME); Oid sequenceId = ResolveRelationId(sequenceName); @@ -264,7 +264,7 @@ master_get_new_shardid(PG_FUNCTION_ARGS) /* - * master_get_local_first_candidate_nodes returns a set of candidate host names + * get_local_first_candidate_nodes returns a set of candidate host names * and port numbers on which to place new shards. The function makes sure to * always allocate the first candidate node as the node the caller is connecting * from; and allocates additional nodes until the shard replication factor is @@ -273,7 +273,7 @@ master_get_new_shardid(PG_FUNCTION_ARGS) * replication factor. */ Datum -master_get_local_first_candidate_nodes(PG_FUNCTION_ARGS) +get_local_first_candidate_nodes(PG_FUNCTION_ARGS) { FuncCallContext *functionContext = NULL; uint32 desiredNodeCount = 0; @@ -380,14 +380,14 @@ master_get_local_first_candidate_nodes(PG_FUNCTION_ARGS) /* - * master_get_round_robin_candidate_nodes returns a set of candidate host names + * get_round_robin_candidate_nodes returns a set of candidate host names * and port numbers on which to place new shards. The function uses the round * robin policy to choose the nodes and tries to ensure that there is an even * distribution of shards across the worker nodes. This function errors out if * the number of available nodes falls short of the replication factor. */ Datum -master_get_round_robin_candidate_nodes(PG_FUNCTION_ARGS) +get_round_robin_candidate_nodes(PG_FUNCTION_ARGS) { uint64 shardId = PG_GETARG_INT64(0); FuncCallContext *functionContext = NULL; @@ -464,12 +464,12 @@ master_get_round_robin_candidate_nodes(PG_FUNCTION_ARGS) /* - * master_get_active_worker_nodes returns a set of active worker host names and + * get_active_worker_nodes returns a set of active worker host names and * port numbers in deterministic order. Currently we assume that all worker * nodes in pg_worker_list.conf are active. */ Datum -master_get_active_worker_nodes(PG_FUNCTION_ARGS) +get_active_worker_nodes(PG_FUNCTION_ARGS) { FuncCallContext *functionContext = NULL; uint32 workerNodeIndex = 0; diff --git a/src/backend/distributed/master/master_repair_shards.c b/src/backend/distributed/master/master_repair_shards.c index 005d02c83..f68e59ba0 100644 --- a/src/backend/distributed/master/master_repair_shards.c +++ b/src/backend/distributed/master/master_repair_shards.c @@ -45,11 +45,11 @@ static bool CopyDataFromFinalizedPlacement(Oid distributedTableId, int64 shardId /* declarations for dynamic loading */ -PG_FUNCTION_INFO_V1(master_copy_shard_placement); +PG_FUNCTION_INFO_V1(copy_shard_placement); /* - * master_copy_shard_placement implements a user-facing UDF to copy data from + * copy_shard_placement implements a user-facing UDF to copy data from * a healthy (source) node to an inactive (target) node. To accomplish this it * entirely recreates the table structure before copying all data. During this * time all modifications are paused to the shard. After successful repair, the @@ -58,7 +58,7 @@ PG_FUNCTION_INFO_V1(master_copy_shard_placement); * in an unhealthy state. */ Datum -master_copy_shard_placement(PG_FUNCTION_ARGS) +copy_shard_placement(PG_FUNCTION_ARGS) { int64 shardId = PG_GETARG_INT64(0); text *sourceNodeName = PG_GETARG_TEXT_P(1); diff --git a/src/backend/distributed/master/master_stage_protocol.c b/src/backend/distributed/master/master_stage_protocol.c index 3a79c4a73..88a4e1b62 100644 --- a/src/backend/distributed/master/master_stage_protocol.c +++ b/src/backend/distributed/master/master_stage_protocol.c @@ -52,19 +52,19 @@ static StringInfo WorkerPartitionValue(char *nodeName, uint32 nodePort, Oid rela /* exports for SQL callable functions */ -PG_FUNCTION_INFO_V1(master_create_empty_shard); -PG_FUNCTION_INFO_V1(master_append_table_to_shard); +PG_FUNCTION_INFO_V1(create_empty_shard); +PG_FUNCTION_INFO_V1(append_table_to_shard); /* - * master_create_empty_shard creates an empty shard for the given distributed + * create_empty_shard creates an empty shard for the given distributed * table. For this, the function first gets a list of candidate nodes, connects * to these nodes, and issues DDL commands on the nodes to create empty shard * placements. The function then updates metadata on the master node to make * this shard (and its placements) visible. */ Datum -master_create_empty_shard(PG_FUNCTION_ARGS) +create_empty_shard(PG_FUNCTION_ARGS) { text *relationNameText = PG_GETARG_TEXT_P(0); char *relationName = text_to_cstring(relationNameText); @@ -99,7 +99,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS) } /* generate new and unique shardId from sequence */ - shardIdDatum = master_get_new_shardid(NULL); + shardIdDatum = get_new_shardid(NULL); shardId = DatumGetInt64(shardIdDatum); /* get table DDL commands to replay on the worker node */ @@ -137,7 +137,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS) /* - * master_append_table_to_shard appends the given table's contents to the given + * append_table_to_shard appends the given table's contents to the given * shard, and updates shard metadata on the master node. If the function fails * to append table data to all shard placements, it doesn't update any metadata * and errors out. Else if the function fails to append table data to some of @@ -145,7 +145,7 @@ master_create_empty_shard(PG_FUNCTION_ARGS) * placements will get cleaned up during shard rebalancing. */ Datum -master_append_table_to_shard(PG_FUNCTION_ARGS) +append_table_to_shard(PG_FUNCTION_ARGS) { uint64 shardId = PG_GETARG_INT64(0); text *sourceTableNameText = PG_GETARG_TEXT_P(1); @@ -208,7 +208,7 @@ master_append_table_to_shard(PG_FUNCTION_ARGS) { ereport(ERROR, (errmsg("could not find any shard placements for shardId " UINT64_FORMAT, shardId), - errhint("Try running master_create_empty_shard() first"))); + errhint("Try running create_empty_shard() first"))); } /* issue command to append table to each shard placement */ diff --git a/src/backend/distributed/planner/modify_planner.c b/src/backend/distributed/planner/modify_planner.c index 4484e53bb..2cc37f0f9 100644 --- a/src/backend/distributed/planner/modify_planner.c +++ b/src/backend/distributed/planner/modify_planner.c @@ -524,7 +524,7 @@ DistributedModifyShardInterval(Query *query) errmsg("could not find any shards for modification"), errdetail("No shards exist for distributed table \"%s\".", relationName), - errhint("Run master_create_worker_shards to create shards " + errhint("Run create_worker_shards to create shards " "and try again."))); } diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 8f2eac018..b61338a4e 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -182,7 +182,7 @@ RegisterCitusConfigVariables(void) NormalizeWorkerListPath(); DefineCustomBoolVariable( - "citus.binary_master_copy_format", + "citus.binary_copy_format", gettext_noop("Use the binary master copy format."), gettext_noop("When enabled, data is copied from workers to the master " "in PostgreSQL's binary serialization format."), diff --git a/src/backend/distributed/test/distribution_metadata.c b/src/backend/distributed/test/distribution_metadata.c index f91fe780c..c2827fca5 100644 --- a/src/backend/distributed/test/distribution_metadata.c +++ b/src/backend/distributed/test/distribution_metadata.c @@ -276,7 +276,7 @@ create_monolithic_shard_row(PG_FUNCTION_ARGS) Oid distributedTableId = PG_GETARG_OID(0); StringInfo minInfo = makeStringInfo(); StringInfo maxInfo = makeStringInfo(); - Datum newShardIdDatum = master_get_new_shardid(NULL); + Datum newShardIdDatum = get_new_shardid(NULL); int64 newShardId = DatumGetInt64(newShardIdDatum); text *maxInfoText = NULL; text *minInfoText = NULL; diff --git a/src/backend/distributed/utils/metadata_cache.c b/src/backend/distributed/utils/metadata_cache.c index 974455f39..64faa4ade 100644 --- a/src/backend/distributed/utils/metadata_cache.c +++ b/src/backend/distributed/utils/metadata_cache.c @@ -59,8 +59,8 @@ static void CachedRelationLookup(const char *relationName, Oid *cachedOid); /* exports for SQL callable functions */ -PG_FUNCTION_INFO_V1(master_dist_partition_cache_invalidate); -PG_FUNCTION_INFO_V1(master_dist_shard_cache_invalidate); +PG_FUNCTION_INFO_V1(dist_partition_cache_invalidate); +PG_FUNCTION_INFO_V1(dist_shard_cache_invalidate); /* @@ -439,12 +439,12 @@ CitusExtraDataContainerFuncId(void) /* - * master_dist_partition_cache_invalidate is a trigger function that performs + * dist_partition_cache_invalidate is a trigger function that performs * relcache invalidations when the contents of pg_dist_partition are changed * on the SQL level. */ Datum -master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS) +dist_partition_cache_invalidate(PG_FUNCTION_ARGS) { TriggerData *triggerData = (TriggerData *) fcinfo->context; HeapTuple newTuple = NULL; @@ -497,12 +497,12 @@ master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS) /* - * master_dist_shard_cache_invalidate is a trigger function that performs + * dist_shard_cache_invalidate is a trigger function that performs * relcache invalidations when the contents of pg_dist_shard are changed * on the SQL level. */ Datum -master_dist_shard_cache_invalidate(PG_FUNCTION_ARGS) +dist_shard_cache_invalidate(PG_FUNCTION_ARGS) { TriggerData *triggerData = (TriggerData *) fcinfo->context; HeapTuple newTuple = NULL; diff --git a/src/bin/csql/stage.h b/src/bin/csql/stage.h index 097102ed3..6bd13337c 100644 --- a/src/bin/csql/stage.h +++ b/src/bin/csql/stage.h @@ -30,13 +30,13 @@ #define ROLLBACK_COMMAND "ROLLBACK" /* Names of remote function calls to execute on the master. */ -#define MASTER_GET_TABLE_METADATA "SELECT * FROM master_get_table_metadata($1::text)" -#define MASTER_GET_TABLE_DDL_EVENTS "SELECT * FROM master_get_table_ddl_events($1::text)" -#define MASTER_GET_NEW_SHARDID "SELECT * FROM master_get_new_shardid()" +#define MASTER_GET_TABLE_METADATA "SELECT * FROM get_table_metadata($1::text)" +#define MASTER_GET_TABLE_DDL_EVENTS "SELECT * FROM get_table_ddl_events($1::text)" +#define MASTER_GET_NEW_SHARDID "SELECT * FROM get_new_shardid()" #define MASTER_GET_LOCAL_FIRST_CANDIDATE_NODES \ - "SELECT * FROM master_get_local_first_candidate_nodes()" + "SELECT * FROM get_local_first_candidate_nodes()" #define MASTER_GET_ROUND_ROBIN_CANDIDATE_NODES \ - "SELECT * FROM master_get_round_robin_candidate_nodes($1::int8)" + "SELECT * FROM get_round_robin_candidate_nodes($1::int8)" #define MASTER_INSERT_SHARD_ROW \ "INSERT INTO pg_dist_shard " \ diff --git a/src/include/distributed/master_protocol.h b/src/include/distributed/master_protocol.h index c3d543bca..6e0cf55cd 100644 --- a/src/include/distributed/master_protocol.h +++ b/src/include/distributed/master_protocol.h @@ -87,23 +87,23 @@ extern void CreateShardPlacements(int64 shardId, List *ddlEventList, int replicationFactor); /* Function declarations for generating metadata for shard creation */ -extern Datum master_get_table_metadata(PG_FUNCTION_ARGS); -extern Datum master_get_table_ddl_events(PG_FUNCTION_ARGS); -extern Datum master_get_new_shardid(PG_FUNCTION_ARGS); -extern Datum master_get_local_first_candidate_nodes(PG_FUNCTION_ARGS); -extern Datum master_get_round_robin_candidate_nodes(PG_FUNCTION_ARGS); -extern Datum master_get_active_worker_nodes(PG_FUNCTION_ARGS); +extern Datum get_table_metadata(PG_FUNCTION_ARGS); +extern Datum get_table_ddl_events(PG_FUNCTION_ARGS); +extern Datum get_new_shardid(PG_FUNCTION_ARGS); +extern Datum get_local_first_candidate_nodes(PG_FUNCTION_ARGS); +extern Datum get_round_robin_candidate_nodes(PG_FUNCTION_ARGS); +extern Datum get_active_worker_nodes(PG_FUNCTION_ARGS); /* Function declarations to help with data staging and deletion */ -extern Datum master_create_empty_shard(PG_FUNCTION_ARGS); -extern Datum master_append_table_to_shard(PG_FUNCTION_ARGS); -extern Datum master_apply_delete_command(PG_FUNCTION_ARGS); -extern Datum master_drop_all_shards(PG_FUNCTION_ARGS); +extern Datum create_empty_shard(PG_FUNCTION_ARGS); +extern Datum append_table_to_shard(PG_FUNCTION_ARGS); +extern Datum apply_delete_command(PG_FUNCTION_ARGS); +extern Datum drop_all_shards(PG_FUNCTION_ARGS); /* function declarations for shard creation functionality */ -extern Datum master_create_worker_shards(PG_FUNCTION_ARGS); +extern Datum create_worker_shards(PG_FUNCTION_ARGS); /* function declarations for shard repair functionality */ -extern Datum master_copy_shard_placement(PG_FUNCTION_ARGS); +extern Datum copy_shard_placement(PG_FUNCTION_ARGS); #endif /* MASTER_PROTOCOL_H */ diff --git a/src/include/distributed/worker_protocol.h b/src/include/distributed/worker_protocol.h index db78f8138..b003b60ce 100644 --- a/src/include/distributed/worker_protocol.h +++ b/src/include/distributed/worker_protocol.h @@ -48,7 +48,7 @@ /* Defines that relate to fetching foreign tables */ #define FOREIGN_CACHED_FILE_PATH "pg_foreign_file/cached/%s" -#define GET_TABLE_DDL_EVENTS "SELECT master_get_table_ddl_events('%s')" +#define GET_TABLE_DDL_EVENTS "SELECT get_table_ddl_events('%s')" #define SET_FOREIGN_TABLE_FILENAME "ALTER FOREIGN TABLE %s OPTIONS (SET filename '%s')" #define FOREIGN_FILE_PATH_COMMAND "SELECT worker_foreign_file_path('%s')" #define SET_SEARCH_PATH_COMMAND "SET search_path TO %s" diff --git a/src/test/regress/expected/multi_binary_master_copy_format.out b/src/test/regress/expected/multi_binary_master_copy_format.out index 505d18a34..2576b9d18 100644 --- a/src/test/regress/expected/multi_binary_master_copy_format.out +++ b/src/test/regress/expected/multi_binary_master_copy_format.out @@ -2,7 +2,7 @@ -- MULTI_BINARY_MASTER_COPY -- -- Try binary master copy for different executors -SET citus.binary_master_copy_format TO 'on'; +SET citus.binary_copy_format TO 'on'; SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM lineitem; count diff --git a/src/test/regress/expected/multi_create_insert_proxy.out b/src/test/regress/expected/multi_create_insert_proxy.out index d90262c4a..c6ae59df6 100644 --- a/src/test/regress/expected/multi_create_insert_proxy.out +++ b/src/test/regress/expected/multi_create_insert_proxy.out @@ -46,15 +46,15 @@ CREATE TABLE insert_target ( ); -- squelch WARNINGs that contain worker_port SET client_min_messages TO ERROR; -SELECT master_create_distributed_table('insert_target', 'id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('insert_target', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('insert_target', 2, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('insert_target', 2, 1); + create_worker_shards +---------------------- (1 row) diff --git a/src/test/regress/expected/multi_create_shards.out b/src/test/regress/expected/multi_create_shards.out index 39cb45eef..abedc93cf 100644 --- a/src/test/regress/expected/multi_create_shards.out +++ b/src/test/regress/expected/multi_create_shards.out @@ -39,29 +39,29 @@ CREATE TABLE table_to_distribute ( test_type_data dummy_type ); -- use an index instead of table name -SELECT master_create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); +SELECT create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); ERROR: cannot distribute relation: table_to_distribute_pkey DETAIL: Distributed relations must be regular or foreign tables. -- use a bad column name -SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'bad_column', 'hash'); ERROR: column "bad_column" of relation "table_to_distribute" does not exist -- use unrecognized partition type -SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized'); +SELECT create_distributed_table('table_to_distribute', 'name', 'unrecognized'); ERROR: invalid input value for enum citus.distribution_type: "unrecognized" LINE 1: ..._distributed_table('table_to_distribute', 'name', 'unrecogni... ^ -- use a partition column of a type lacking any default operator class -SELECT master_create_distributed_table('table_to_distribute', 'json_data', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'json_data', 'hash'); ERROR: data type json has no default operator class for specified partition method DETAIL: Partition column types must have a default operator class defined. -- use a partition column of type lacking the required support function (hash) -SELECT master_create_distributed_table('table_to_distribute', 'test_type_data', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'test_type_data', 'hash'); ERROR: could not identify a hash function for type dummy_type DETAIL: Partition column types must have a hash function defined to use hash partitioning. -- distribute table and inspect side effects -SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('table_to_distribute', 'name', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -73,19 +73,19 @@ SELECT partmethod, partkey FROM pg_dist_partition (1 row) -- use a bad shard count -SELECT master_create_worker_shards('table_to_distribute', 0, 1); +SELECT create_worker_shards('table_to_distribute', 0, 1); ERROR: shard_count must be positive -- use a bad replication factor -SELECT master_create_worker_shards('table_to_distribute', 16, 0); +SELECT create_worker_shards('table_to_distribute', 16, 0); ERROR: replication_factor must be positive -- use a replication factor higher than shard count -SELECT master_create_worker_shards('table_to_distribute', 16, 3); +SELECT create_worker_shards('table_to_distribute', 16, 3); ERROR: replication_factor (3) exceeds number of worker nodes (2) HINT: Add more worker nodes or try again with a lower replication factor. -- finally, create shards and inspect metadata -SELECT master_create_worker_shards('table_to_distribute', 16, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('table_to_distribute', 16, 1); + create_worker_shards +---------------------- (1 row) @@ -130,7 +130,7 @@ SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relk (1 row) -- try to create them again -SELECT master_create_worker_shards('table_to_distribute', 16, 1); +SELECT create_worker_shards('table_to_distribute', 16, 1); ERROR: table "table_to_distribute" has already had shards created for it -- test list sorting SELECT sort_names('sumedh', 'jason', 'ozgun'); @@ -155,16 +155,16 @@ CREATE FOREIGN TABLE foreign_table_to_distribute id bigint ) SERVER fake_fdw_server; -SELECT master_create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('foreign_table_to_distribute', 16, 1); +SELECT create_worker_shards('foreign_table_to_distribute', 16, 1); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - master_create_worker_shards ------------------------------ + create_worker_shards +---------------------- (1 row) @@ -197,15 +197,15 @@ CREATE TABLE weird_shard_count name text, id bigint ); -SELECT master_create_distributed_table('weird_shard_count', 'id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('weird_shard_count', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('weird_shard_count', 7, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('weird_shard_count', 7, 1); + create_worker_shards +---------------------- (1 row) diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index dd3f1c167..43c79fc29 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -21,12 +21,12 @@ CREATE TABLE lineitem ( l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); -SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append'); +SELECT create_distributed_table('lineitem', 'l_orderkey', 'append'); WARNING: table "lineitem" has a unique constraint DETAIL: Unique constraints and primary keys on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - master_create_distributed_table ---------------------------------- + create_distributed_table +-------------------------- (1 row) @@ -42,12 +42,12 @@ CREATE TABLE orders ( o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); -SELECT master_create_distributed_table('orders', 'o_orderkey', 'append'); +SELECT create_distributed_table('orders', 'o_orderkey', 'append'); WARNING: table "orders" has a unique constraint DETAIL: Unique constraints and primary keys on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - master_create_distributed_table ---------------------------------- + create_distributed_table +-------------------------- (1 row) @@ -60,9 +60,9 @@ CREATE TABLE customer ( c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); -SELECT master_create_distributed_table('customer', 'c_custkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('customer', 'c_custkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -71,9 +71,9 @@ CREATE TABLE nation ( n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); -SELECT master_create_distributed_table('nation', 'n_nationkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('nation', 'n_nationkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -87,9 +87,9 @@ CREATE TABLE part ( p_container char(10) not null, p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); -SELECT master_create_distributed_table('part', 'p_partkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('part', 'p_partkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -103,9 +103,9 @@ CREATE TABLE supplier s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); -SELECT master_create_distributed_table('supplier', 's_suppkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('supplier', 's_suppkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -116,7 +116,7 @@ CREATE TABLE primary_key_on_non_part_col partition_col integer, other_col integer PRIMARY KEY ); -SELECT master_create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash'); ERROR: cannot distribute relation: "primary_key_on_non_part_col" DETAIL: Distributed relations cannot have UNIQUE constraints or PRIMARY KEYs that do not include the partition column. CREATE TABLE unique_const_on_non_part_col @@ -124,7 +124,7 @@ CREATE TABLE unique_const_on_non_part_col partition_col integer, other_col integer UNIQUE ); -SELECT master_create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash'); ERROR: cannot distribute relation: "primary_key_on_non_part_col" DETAIL: Distributed relations cannot have UNIQUE constraints or PRIMARY KEYs that do not include the partition column. -- now show that Citus can distribute unique constrints that include @@ -134,9 +134,9 @@ CREATE TABLE primary_key_on_part_col partition_col integer PRIMARY KEY, other_col integer ); -SELECT master_create_distributed_table('primary_key_on_part_col', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('primary_key_on_part_col', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -145,9 +145,9 @@ CREATE TABLE unique_const_on_part_col partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('unique_const_on_part_col', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('unique_const_on_part_col', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -157,9 +157,9 @@ CREATE TABLE unique_const_on_two_columns other_col integer, UNIQUE (partition_col, other_col) ); -SELECT master_create_distributed_table('unique_const_on_two_columns', 'partition_col', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('unique_const_on_two_columns', 'partition_col', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -168,12 +168,12 @@ CREATE TABLE unique_const_append_partitioned_tables partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('unique_const_append_partitioned_tables', 'partition_col', 'append'); +SELECT create_distributed_table('unique_const_append_partitioned_tables', 'partition_col', 'append'); WARNING: table "unique_const_append_partitioned_tables" has a unique constraint DETAIL: Unique constraints and primary keys on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. - master_create_distributed_table ---------------------------------- + create_distributed_table +-------------------------- (1 row) @@ -182,9 +182,9 @@ CREATE TABLE unique_const_range_partitioned_tables partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('unique_const_range_partitioned_tables', 'partition_col', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('unique_const_range_partitioned_tables', 'partition_col', 'range'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_data_types.out b/src/test/regress/expected/multi_data_types.out index 18b7a54dc..519da40f8 100644 --- a/src/test/regress/expected/multi_data_types.out +++ b/src/test/regress/expected/multi_data_types.out @@ -44,15 +44,15 @@ CREATE TABLE composite_type_partitioned_table id integer, col test_composite_type ); -SELECT master_create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('composite_type_partitioned_table', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('composite_type_partitioned_table', 4, 1); + create_worker_shards +---------------------- (1 row) @@ -81,15 +81,15 @@ CREATE TABLE bugs ( id integer, status bug_status ); -SELECT master_create_distributed_table('bugs', 'status', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('bugs', 'status', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('bugs', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('bugs', 4, 1); + create_worker_shards +---------------------- (1 row) @@ -121,15 +121,15 @@ CREATE TABLE varchar_hash_partitioned_table id int, name varchar ); -SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('varchar_hash_partitioned_table', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('varchar_hash_partitioned_table', 4, 1); + create_worker_shards +---------------------- (1 row) diff --git a/src/test/regress/expected/multi_fdw_create_table.out b/src/test/regress/expected/multi_fdw_create_table.out index 6557e318a..b0584d7af 100644 --- a/src/test/regress/expected/multi_fdw_create_table.out +++ b/src/test/regress/expected/multi_fdw_create_table.out @@ -29,9 +29,9 @@ CREATE FOREIGN TABLE lineitem ( l_comment varchar(44) not null) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', null ''); -SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('lineitem', 'l_orderkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -47,9 +47,9 @@ CREATE FOREIGN TABLE orders ( o_comment varchar(79) not null) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', null ''); -SELECT master_create_distributed_table('orders', 'o_orderkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('orders', 'o_orderkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -64,9 +64,9 @@ CREATE FOREIGN TABLE customer ( c_comment varchar(117) not null) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', null ''); -SELECT master_create_distributed_table('customer', 'c_custkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('customer', 'c_custkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -77,9 +77,9 @@ CREATE FOREIGN TABLE nation ( n_comment varchar(152)) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', null ''); -SELECT master_create_distributed_table('nation', 'n_nationkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('nation', 'n_nationkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -95,9 +95,9 @@ CREATE FOREIGN TABLE part ( p_comment varchar(23) not null) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', null ''); -SELECT master_create_distributed_table('part', 'p_partkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('part', 'p_partkey', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_fdw_master_protocol.out b/src/test/regress/expected/multi_fdw_master_protocol.out index bffb6d356..fdda4a877 100644 --- a/src/test/regress/expected/multi_fdw_master_protocol.out +++ b/src/test/regress/expected/multi_fdw_master_protocol.out @@ -3,27 +3,27 @@ -- -- Tests that check the metadata returned by the master node. SELECT part_storage_type, part_key, part_replica_count, part_max_size, - part_placement_policy FROM master_get_table_metadata('lineitem'); + part_placement_policy FROM get_table_metadata('lineitem'); part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy -------------------+------------+--------------------+---------------+----------------------- f | l_orderkey | 2 | 307200 | 2 (1 row) -SELECT * FROM master_get_table_ddl_events('lineitem'); - master_get_table_ddl_events +SELECT * FROM get_table_ddl_events('lineitem'); + get_table_ddl_events ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- CREATE EXTENSION IF NOT EXISTS file_fdw WITH SCHEMA public CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw CREATE FOREIGN TABLE lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', "null" '') (3 rows) -SELECT * FROM master_get_new_shardid(); - master_get_new_shardid ------------------------- - 102008 +SELECT * FROM get_new_shardid(); + get_new_shardid +----------------- + 102008 (1 row) -SELECT node_name FROM master_get_local_first_candidate_nodes(); +SELECT node_name FROM get_local_first_candidate_nodes(); node_name ----------- localhost diff --git a/src/test/regress/expected/multi_hash_pruning.out b/src/test/regress/expected/multi_hash_pruning.out index 782360c04..ce53a0e7d 100644 --- a/src/test/regress/expected/multi_hash_pruning.out +++ b/src/test/regress/expected/multi_hash_pruning.out @@ -16,9 +16,9 @@ CREATE TABLE orders_hash_partitioned ( o_clerk char(15), o_shippriority integer, o_comment varchar(79) ); -SELECT master_create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_index_statements.out b/src/test/regress/expected/multi_index_statements.out index 73e834e50..8cf06bf28 100644 --- a/src/test/regress/expected/multi_index_statements.out +++ b/src/test/regress/expected/multi_index_statements.out @@ -8,54 +8,54 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102080; CREATE TABLE index_test_range(a int, b int, c int); -SELECT master_create_distributed_table('index_test_range', 'a', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('index_test_range', 'a', 'range'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_empty_shard('index_test_range'); - master_create_empty_shard ---------------------------- - 102080 +SELECT create_empty_shard('index_test_range'); + create_empty_shard +-------------------- + 102080 (1 row) -SELECT master_create_empty_shard('index_test_range'); - master_create_empty_shard ---------------------------- - 102081 +SELECT create_empty_shard('index_test_range'); + create_empty_shard +-------------------- + 102081 (1 row) CREATE TABLE index_test_hash(a int, b int, c int); -SELECT master_create_distributed_table('index_test_hash', 'a', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('index_test_hash', 'a', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('index_test_hash', 8, 2); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('index_test_hash', 8, 2); + create_worker_shards +---------------------- (1 row) CREATE TABLE index_test_append(a int, b int, c int); -SELECT master_create_distributed_table('index_test_append', 'a', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('index_test_append', 'a', 'append'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_empty_shard('index_test_append'); - master_create_empty_shard ---------------------------- - 102090 +SELECT create_empty_shard('index_test_append'); + create_empty_shard +-------------------- + 102090 (1 row) -SELECT master_create_empty_shard('index_test_append'); - master_create_empty_shard ---------------------------- - 102091 +SELECT create_empty_shard('index_test_append'); + create_empty_shard +-------------------- + 102091 (1 row) -- @@ -114,7 +114,7 @@ SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; 0 (1 row) -\c - - - :master_port +\c - - - :port -- Verify that we error out on unsupported statement types CREATE INDEX CONCURRENTLY try_index ON lineitem (l_orderkey); ERROR: creating indexes concurrently on distributed tables is currently unsupported @@ -211,7 +211,7 @@ SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; ------------+-----------+-----------+------------+---------- (0 rows) -\c - - - :master_port +\c - - - :port -- Drop created tables DROP TABLE index_test_range; DROP TABLE index_test_hash; diff --git a/src/test/regress/expected/multi_master_protocol.out b/src/test/regress/expected/multi_master_protocol.out index 3d6e627a5..383ec98ef 100644 --- a/src/test/regress/expected/multi_master_protocol.out +++ b/src/test/regress/expected/multi_master_protocol.out @@ -3,48 +3,48 @@ -- -- Tests that check the metadata returned by the master node. SELECT part_storage_type, part_key, part_replica_count, part_max_size, - part_placement_policy FROM master_get_table_metadata('lineitem'); + part_placement_policy FROM get_table_metadata('lineitem'); part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy -------------------+------------+--------------------+---------------+----------------------- t | l_orderkey | 2 | 307200 | 2 (1 row) -SELECT * FROM master_get_table_ddl_events('lineitem'); - master_get_table_ddl_events +SELECT * FROM get_table_ddl_events('lineitem'); + get_table_ddl_events ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- CREATE TABLE lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL) CREATE INDEX lineitem_time_index ON lineitem USING btree (l_shipdate) ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber) (3 rows) -SELECT * FROM master_get_new_shardid(); - master_get_new_shardid ------------------------- - 102008 +SELECT * FROM get_new_shardid(); + get_new_shardid +----------------- + 102008 (1 row) -SELECT * FROM master_get_local_first_candidate_nodes(); +SELECT * FROM get_local_first_candidate_nodes(); node_name | node_port -----------+----------- localhost | 57638 localhost | 57637 (2 rows) -SELECT * FROM master_get_round_robin_candidate_nodes(1); +SELECT * FROM get_round_robin_candidate_nodes(1); node_name | node_port -----------+----------- localhost | 57638 localhost | 57637 (2 rows) -SELECT * FROM master_get_round_robin_candidate_nodes(2); +SELECT * FROM get_round_robin_candidate_nodes(2); node_name | node_port -----------+----------- localhost | 57637 localhost | 57638 (2 rows) -SELECT * FROM master_get_active_worker_nodes(); +SELECT * FROM get_active_worker_nodes(); node_name | node_port -----------+----------- localhost | 57638 diff --git a/src/test/regress/expected/multi_modifications.out b/src/test/regress/expected/multi_modifications.out index d28f173cc..1a2ee0389 100644 --- a/src/test/regress/expected/multi_modifications.out +++ b/src/test/regress/expected/multi_modifications.out @@ -13,60 +13,60 @@ CREATE TABLE limit_orders ( CREATE TABLE insufficient_shards ( LIKE limit_orders ); CREATE TABLE range_partitioned ( LIKE limit_orders ); CREATE TABLE append_partitioned ( LIKE limit_orders ); -SELECT master_create_distributed_table('limit_orders', 'id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('limit_orders', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_distributed_table('insufficient_shards', 'id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('insufficient_shards', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_distributed_table('range_partitioned', 'id', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('range_partitioned', 'id', 'range'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_distributed_table('append_partitioned', 'id', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('append_partitioned', 'id', 'append'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('limit_orders', 2, 2); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('limit_orders', 2, 2); + create_worker_shards +---------------------- (1 row) -- make a single shard that covers no partition values -SELECT master_create_worker_shards('insufficient_shards', 1, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('insufficient_shards', 1, 1); + create_worker_shards +---------------------- (1 row) UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0 WHERE logicalrelid = 'insufficient_shards'::regclass; -- create range-partitioned shards -SELECT master_create_empty_shard('range_partitioned') AS new_shard_id +SELECT create_empty_shard('range_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 49999 WHERE shardid = :new_shard_id; -SELECT master_create_empty_shard('range_partitioned') AS new_shard_id +SELECT create_empty_shard('range_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 50000, shardmaxvalue = 99999 WHERE shardid = :new_shard_id; -- create append-partitioned shards -SELECT master_create_empty_shard('append_partitioned') AS new_shard_id +SELECT create_empty_shard('append_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 500000 WHERE shardid = :new_shard_id; -SELECT master_create_empty_shard('append_partitioned') AS new_shard_id +SELECT create_empty_shard('append_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 500000, shardmaxvalue = 1000000 WHERE shardid = :new_shard_id; diff --git a/src/test/regress/expected/multi_partition_pruning.out b/src/test/regress/expected/multi_partition_pruning.out index cc312078b..c0b17117e 100644 --- a/src/test/regress/expected/multi_partition_pruning.out +++ b/src/test/regress/expected/multi_partition_pruning.out @@ -65,9 +65,9 @@ CREATE TABLE varchar_partitioned_table ( varchar_column varchar(100) ); -SELECT master_create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -93,9 +93,9 @@ CREATE TABLE array_partitioned_table ( array_column text[] ); -SELECT master_create_distributed_table('array_partitioned_table', 'array_column', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -129,9 +129,9 @@ CREATE TABLE composite_partitioned_table ( composite_column composite_type ); -SELECT master_create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_repair_shards.out b/src/test/regress/expected/multi_repair_shards.out index bf76df9c5..046b02301 100644 --- a/src/test/regress/expected/multi_repair_shards.out +++ b/src/test/regress/expected/multi_repair_shards.out @@ -8,16 +8,16 @@ CREATE INDEX ON customer_engagements (id); CREATE INDEX ON customer_engagements (created_at); CREATE INDEX ON customer_engagements (event_data); -- distribute the table -SELECT master_create_distributed_table('customer_engagements', 'id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('customer_engagements', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) -- create a single shard on the first worker -SELECT master_create_worker_shards('customer_engagements', 1, 2); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('customer_engagements', 1, 2); + create_worker_shards +---------------------- (1 row) @@ -29,7 +29,7 @@ INSERT INTO customer_engagements VALUES (1, '03-01-2015', 'third event'); -- (i) create a new shard -- (ii) mark the second shard placements as unhealthy -- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones --- (iv) do a successful master_copy_shard_placement from the first placement to the second +-- (iv) do a successful copy_shard_placement from the first placement to the second -- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement -- get the newshardid SELECT shardid as newshardid FROM pg_dist_shard WHERE logicalrelid = 'customer_engagements'::regclass @@ -39,15 +39,15 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = :newshardid AN -- add a fake healthy placement for the tests INSERT INTO pg_dist_shard_placement (nodename, nodeport, shardid, shardstate, shardlength) VALUES ('dummyhost', :worker_2_port, :newshardid, 1, 0); -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'dummyhost', :worker_2_port); +SELECT copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'dummyhost', :worker_2_port); ERROR: target placement must be in inactive state -- also try to copy from an inactive placement -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port); ERROR: source placement must be in finalized state -- "copy" this shard from the first placement to the second one -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); - master_copy_shard_placement ------------------------------ +SELECT copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); + copy_shard_placement +---------------------- (1 row) @@ -69,17 +69,17 @@ CREATE FOREIGN TABLE remote_engagements ( event_data text ) SERVER fake_fdw_server; -- distribute the table -SELECT master_create_distributed_table('remote_engagements', 'id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('remote_engagements', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) -- create a single shard on the first worker -SELECT master_create_worker_shards('remote_engagements', 1, 2); +SELECT create_worker_shards('remote_engagements', 1, 2); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined - master_create_worker_shards ------------------------------ + create_worker_shards +---------------------- (1 row) @@ -89,6 +89,6 @@ SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remo -- now, update the second placement as unhealthy UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = :remotenewshardid AND nodeport = :worker_2_port; -- oops! we don't support repairing shards backed by foreign tables -SELECT master_copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); ERROR: cannot repair shard DETAIL: Repairing shards backed by foreign tables is not supported. diff --git a/src/test/regress/expected/multi_repartitioned_subquery_udf.out b/src/test/regress/expected/multi_repartitioned_subquery_udf.out index 288ee5427..a34fd8e2e 100644 --- a/src/test/regress/expected/multi_repartitioned_subquery_udf.out +++ b/src/test/regress/expected/multi_repartitioned_subquery_udf.out @@ -2,7 +2,7 @@ -- MULTI_REPARTITIONED_SUBQUERY_UDF -- -- Create UDF in master and workers -\c - - - :master_port +\c - - - :port DROP FUNCTION IF EXISTS median(double precision[]); NOTICE: function median(pg_catalog.float8[]) does not exist, skipping CREATE FUNCTION median(double precision[]) RETURNS double precision @@ -33,7 +33,7 @@ LANGUAGE sql IMMUTABLE AS $_$ OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; -- Run query on master -\c - - - :master_port +\c - - - :port SET citus.task_executor_type TO 'task-tracker'; SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*) FROM lineitem GROUP BY l_partkey) AS a diff --git a/src/test/regress/expected/multi_simple_queries.out b/src/test/regress/expected/multi_simple_queries.out index 764e3c705..3c09749e4 100644 --- a/src/test/regress/expected/multi_simple_queries.out +++ b/src/test/regress/expected/multi_simple_queries.out @@ -11,15 +11,15 @@ CREATE TABLE articles ( CREATE TABLE authors ( name text, id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard (LIKE articles); -SELECT master_create_distributed_table('articles', 'author_id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('articles', 'author_id', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('articles_single_shard', 'author_id', 'hash'); + create_distributed_table +-------------------------- (1 row) @@ -30,15 +30,15 @@ SELECT count(*) from articles; (1 row) -SELECT master_create_worker_shards('articles', 2, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('articles', 2, 1); + create_worker_shards +---------------------- (1 row) -SELECT master_create_worker_shards('articles_single_shard', 1, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('articles_single_shard', 1, 1); + create_worker_shards +---------------------- (1 row) diff --git a/src/test/regress/expected/multi_table_ddl.out b/src/test/regress/expected/multi_table_ddl.out index e384ae32b..fb021649c 100644 --- a/src/test/regress/expected/multi_table_ddl.out +++ b/src/test/regress/expected/multi_table_ddl.out @@ -3,9 +3,9 @@ -- -- Tests around changing the schema and dropping of a distributed table CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -26,20 +26,20 @@ ERROR: cannot execute ALTER TABLE command involving partition column BEGIN; DROP TABLE testtableddl; ERROR: DROP distributed table cannot run inside a transaction block -CONTEXT: SQL statement "SELECT master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name)" +CONTEXT: SQL statement "SELECT drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name)" PL/pgSQL function citus_drop_trigger() line 15 at PERFORM ROLLBACK; -- verify that the table can be dropped DROP TABLE testtableddl; -- verify that the table can dropped even if shards exist CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); + create_distributed_table +-------------------------- (1 row) -SELECT 1 FROM master_create_empty_shard('testtableddl'); +SELECT 1 FROM create_empty_shard('testtableddl'); ?column? ---------- 1 diff --git a/src/test/regress/expected/multi_task_assignment_policy.out b/src/test/regress/expected/multi_task_assignment_policy.out index 381d57b50..eb374edff 100644 --- a/src/test/regress/expected/multi_task_assignment_policy.out +++ b/src/test/regress/expected/multi_task_assignment_policy.out @@ -6,9 +6,9 @@ -- and shard placement data into system catalogs. We next run Explain command, -- and check that tasks are assigned to worker nodes as expected. CREATE TABLE task_assignment_test_table (test_id integer); -SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/expected/multi_upsert.out b/src/test/regress/expected/multi_upsert.out index cc16748d0..9468a4f72 100644 --- a/src/test/regress/expected/multi_upsert.out +++ b/src/test/regress/expected/multi_upsert.out @@ -8,15 +8,15 @@ CREATE TABLE upsert_test third_col int ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('upsert_test', 'part_key', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('upsert_test', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('upsert_test', '4', '2'); + create_worker_shards +---------------------- (1 row) @@ -112,15 +112,15 @@ CREATE TABLE upsert_test_2 PRIMARY KEY (part_key, other_col) ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('upsert_test_2', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('upsert_test_2', '4', '2'); + create_worker_shards +---------------------- (1 row) @@ -139,15 +139,15 @@ CREATE TABLE upsert_test_3 -- note that this is not a unique index CREATE INDEX idx_ups_test ON upsert_test_3(part_key); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('upsert_test_3', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('upsert_test_3', '4', '2'); + create_worker_shards +---------------------- (1 row) @@ -161,15 +161,15 @@ CREATE TABLE upsert_test_4 count int ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('upsert_test_4', '4', '2'); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('upsert_test_4', '4', '2'); + create_worker_shards +---------------------- (1 row) @@ -191,15 +191,15 @@ SELECT * FROM upsert_test_4; -- now test dropped columns CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); -SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('dropcol_distributed', 'key', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('dropcol_distributed', 4, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('dropcol_distributed', 4, 1); + create_worker_shards +---------------------- (1 row) diff --git a/src/test/regress/expected/multi_upsert_0.out b/src/test/regress/expected/multi_upsert_0.out index c8c1e705e..552286dd7 100644 --- a/src/test/regress/expected/multi_upsert_0.out +++ b/src/test/regress/expected/multi_upsert_0.out @@ -8,14 +8,14 @@ CREATE TABLE upsert_test third_col int ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash'); - master_create_distributed_table +SELECT create_distributed_table('upsert_test', 'part_key', 'hash'); + create_distributed_table --------------------------------- (1 row) -SELECT master_create_worker_shards('upsert_test', '4', '2'); - master_create_worker_shards +SELECT create_worker_shards('upsert_test', '4', '2'); + create_worker_shards ----------------------------- (1 row) @@ -145,14 +145,14 @@ CREATE TABLE upsert_test_2 PRIMARY KEY (part_key, other_col) ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash'); - master_create_distributed_table +SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash'); + create_distributed_table --------------------------------- (1 row) -SELECT master_create_worker_shards('upsert_test_2', '4', '2'); - master_create_worker_shards +SELECT create_worker_shards('upsert_test_2', '4', '2'); + create_worker_shards ----------------------------- (1 row) @@ -177,14 +177,14 @@ CREATE TABLE upsert_test_3 -- note that this is not a unique index CREATE INDEX idx_ups_test ON upsert_test_3(part_key); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash'); - master_create_distributed_table +SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash'); + create_distributed_table --------------------------------- (1 row) -SELECT master_create_worker_shards('upsert_test_3', '4', '2'); - master_create_worker_shards +SELECT create_worker_shards('upsert_test_3', '4', '2'); + create_worker_shards ----------------------------- (1 row) @@ -201,14 +201,14 @@ CREATE TABLE upsert_test_4 count int ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash'); - master_create_distributed_table +SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash'); + create_distributed_table --------------------------------- (1 row) -SELECT master_create_worker_shards('upsert_test_4', '4', '2'); - master_create_worker_shards +SELECT create_worker_shards('upsert_test_4', '4', '2'); + create_worker_shards ----------------------------- (1 row) @@ -249,14 +249,14 @@ SELECT * FROM upsert_test_4; -- now test dropped columns CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); -SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash'); - master_create_distributed_table +SELECT create_distributed_table('dropcol_distributed', 'key', 'hash'); + create_distributed_table --------------------------------- (1 row) -SELECT master_create_worker_shards('dropcol_distributed', 4, 1); - master_create_worker_shards +SELECT create_worker_shards('dropcol_distributed', 4, 1); + create_worker_shards ----------------------------- (1 row) diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index 6deec6f9f..a93c23ab2 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -2,15 +2,15 @@ -- test utility statement functionality -- =================================================================== CREATE TABLE sharded_table ( name text, id bigint ); -SELECT master_create_distributed_table('sharded_table', 'id', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('sharded_table', 'id', 'hash'); + create_distributed_table +-------------------------- (1 row) -SELECT master_create_worker_shards('sharded_table', 2, 1); - master_create_worker_shards ------------------------------ +SELECT create_worker_shards('sharded_table', 2, 1); + create_worker_shards +---------------------- (1 row) @@ -66,14 +66,14 @@ EXECUTE sharded_query; (0 rows) -- try to drop shards with where clause -SELECT master_apply_delete_command('DELETE FROM sharded_table WHERE id > 0'); +SELECT apply_delete_command('DELETE FROM sharded_table WHERE id > 0'); ERROR: cannot delete from distributed table DETAIL: Delete statements on hash-partitioned tables with where clause is not supported -- drop all shards -SELECT master_apply_delete_command('DELETE FROM sharded_table'); - master_apply_delete_command ------------------------------ - 2 +SELECT apply_delete_command('DELETE FROM sharded_table'); + apply_delete_command +---------------------- + 2 (1 row) -- drop table diff --git a/src/test/regress/input/multi_agg_distinct.source b/src/test/regress/input/multi_agg_distinct.source index 69e431f5a..92e24ba38 100644 --- a/src/test/regress/input/multi_agg_distinct.source +++ b/src/test/regress/input/multi_agg_distinct.source @@ -21,7 +21,7 @@ CREATE TABLE lineitem_range ( l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); -SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range'); +SELECT create_distributed_table('lineitem_range', 'l_orderkey', 'range'); \STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \STAGE lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' diff --git a/src/test/regress/input/multi_agg_type_conversion.source b/src/test/regress/input/multi_agg_type_conversion.source index 65e4375be..6a9044fbb 100644 --- a/src/test/regress/input/multi_agg_type_conversion.source +++ b/src/test/regress/input/multi_agg_type_conversion.source @@ -18,7 +18,7 @@ CREATE TABLE aggregate_type ( float_value float(20) not null, double_value float(40) not null, interval_value interval not null); -SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append'); +SELECT create_distributed_table('aggregate_type', 'float_value', 'append'); \STAGE aggregate_type FROM '@abs_srcdir@/data/agg_type.data' diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index 2b46cbc39..f6bbe5014 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -25,7 +25,7 @@ CREATE TABLE lineitem_alter ( l_shipmode char(10) not null, l_comment varchar(44) not null ); -SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); +SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); \STAGE lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- Verify that we can add columns @@ -43,7 +43,7 @@ FROM (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) ORDER BY attnum; -\c - - - :master_port +\c - - - :port \d lineitem_alter SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; @@ -160,12 +160,12 @@ FROM (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) ORDER BY attnum; -\c - - - :master_port +\c - - - :port -- Cleanup the table and its shards -SELECT master_apply_delete_command('DELETE FROM lineitem_alter'); +SELECT apply_delete_command('DELETE FROM lineitem_alter'); DROP TABLE lineitem_alter; -- check that nothing's left over on workers \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; -\c - - - :master_port +\c - - - :port diff --git a/src/test/regress/input/multi_append_table_to_shard.source b/src/test/regress/input/multi_append_table_to_shard.source index 04ff276d4..e90bde47e 100644 --- a/src/test/regress/input/multi_append_table_to_shard.source +++ b/src/test/regress/input/multi_append_table_to_shard.source @@ -7,14 +7,14 @@ CREATE TABLE multi_append_table_to_shard_right right_number INTEGER not null, right_text TEXT not null ); -SELECT master_create_distributed_table('multi_append_table_to_shard_right', 'right_number', 'append'); +SELECT create_distributed_table('multi_append_table_to_shard_right', 'right_number', 'append'); CREATE TABLE multi_append_table_to_shard_left ( left_number INTEGER not null, left_text TEXT not null ); -SELECT master_create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); +SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); -- Replicate 'left' table on both workers SELECT set_config('citus.shard_replication_factor', '2', false); @@ -43,7 +43,7 @@ CREATE TABLE multi_append_table_to_shard_stage COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/agg.data'; -SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) +SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid; @@ -58,7 +58,7 @@ WHERE left_number = right_number; DELETE FROM multi_append_table_to_shard_stage; COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'; -SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) +SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid; @@ -74,9 +74,9 @@ WHERE left_number = right_number; UPDATE pg_dist_partition SET partmethod = 'h' WHERE logicalrelid = 'multi_append_table_to_shard_right'::regclass::oid; -SELECT master_create_empty_shard('multi_append_table_to_shard_right'); +SELECT create_empty_shard('multi_append_table_to_shard_right'); -SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) +SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid; @@ -85,8 +85,8 @@ UPDATE pg_dist_partition SET partmethod = 'a' WHERE logicalrelid = 'multi_append_table_to_shard_right'::regclass::oid; -- Clean up after test -SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_right'); -SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_left'); +SELECT apply_delete_command('DELETE FROM multi_append_table_to_shard_right'); +SELECT apply_delete_command('DELETE FROM multi_append_table_to_shard_left'); DROP TABLE multi_append_table_to_shard_stage; DROP TABLE multi_append_table_to_shard_right; DROP TABLE multi_append_table_to_shard_left; diff --git a/src/test/regress/input/multi_create_schema.source b/src/test/regress/input/multi_create_schema.source index 333188d86..d5b60272e 100644 --- a/src/test/regress/input/multi_create_schema.source +++ b/src/test/regress/input/multi_create_schema.source @@ -4,7 +4,7 @@ CREATE TABLE nation ( n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); -SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append'); +SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append'); \STAGE tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' diff --git a/src/test/regress/input/multi_master_delete_protocol.source b/src/test/regress/input/multi_master_delete_protocol.source index c98febba4..44c0ce7b1 100644 --- a/src/test/regress/input/multi_master_delete_protocol.source +++ b/src/test/regress/input/multi_master_delete_protocol.source @@ -13,7 +13,7 @@ CREATE TABLE customer_delete_protocol ( c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); -SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', 'append'); +SELECT create_distributed_table('customer_delete_protocol', 'c_custkey', 'append'); \STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' @@ -21,33 +21,33 @@ SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', -- Check that we don't support conditions on columns other than partition key. -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol +SELECT apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_acctbal > 0.0'); -- Check that we delete a shard if and only if all rows in the shard satisfy the condition. -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol +SELECT apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 6500'); SELECT count(*) from customer_delete_protocol; -- Delete one shard that satisfies the given conditions. -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol +SELECT apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 1000 AND c_custkey < 3000'); SELECT count(*) from customer_delete_protocol; -- Delete all shards if no condition is provided. -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); +SELECT apply_delete_command('DELETE FROM customer_delete_protocol'); SELECT count(*) FROM customer_delete_protocol; -- Verify that empty shards are deleted if no condition is provided -SELECT master_create_empty_shard('customer_delete_protocol'); -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol +SELECT create_empty_shard('customer_delete_protocol'); +SELECT apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 1000'); -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); +SELECT apply_delete_command('DELETE FROM customer_delete_protocol'); --- Verify that master_apply_delete_command cannot be called in a transaction block +-- Verify that apply_delete_command cannot be called in a transaction block BEGIN; -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); +SELECT apply_delete_command('DELETE FROM customer_delete_protocol'); ROLLBACK; diff --git a/src/test/regress/input/multi_outer_join.source b/src/test/regress/input/multi_outer_join.source index b0d0fce73..3420c9fe9 100644 --- a/src/test/regress/input/multi_outer_join.source +++ b/src/test/regress/input/multi_outer_join.source @@ -13,7 +13,7 @@ CREATE TABLE multi_outer_join_left l_mktsegment char(10) not null, l_comment varchar(117) not null ); -SELECT master_create_distributed_table('multi_outer_join_left', 'l_custkey', 'append'); +SELECT create_distributed_table('multi_outer_join_left', 'l_custkey', 'append'); CREATE TABLE multi_outer_join_right ( @@ -26,7 +26,7 @@ CREATE TABLE multi_outer_join_right r_mktsegment char(10) not null, r_comment varchar(117) not null ); -SELECT master_create_distributed_table('multi_outer_join_right', 'r_custkey', 'append'); +SELECT create_distributed_table('multi_outer_join_right', 'r_custkey', 'append'); CREATE TABLE multi_outer_join_third ( @@ -39,7 +39,7 @@ CREATE TABLE multi_outer_join_third t_mktsegment char(10) not null, t_comment varchar(117) not null ); -SELECT master_create_distributed_table('multi_outer_join_third', 't_custkey', 'append'); +SELECT create_distributed_table('multi_outer_join_third', 't_custkey', 'append'); -- Make sure we do not crash if both tables have no shards SELECT @@ -133,8 +133,8 @@ FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); -- empty tables -SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left'); -SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right'); +SELECT * FROM apply_delete_command('DELETE FROM multi_outer_join_left'); +SELECT * FROM apply_delete_command('DELETE FROM multi_outer_join_right'); -- reload shards with 1-1 matching \STAGE multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' diff --git a/src/test/regress/input/multi_stage_data.source b/src/test/regress/input/multi_stage_data.source index 9effe58d5..b0772f88c 100644 --- a/src/test/regress/input/multi_stage_data.source +++ b/src/test/regress/input/multi_stage_data.source @@ -25,6 +25,6 @@ CREATE TABLE nation_hash_partitioned ( n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); -SELECT master_create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash'); +SELECT create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash'); \STAGE nation_hash_partitioned FROM '@abs_srcdir@/data/nation.data' with delimiter '|' diff --git a/src/test/regress/input/multi_stage_large_records.source b/src/test/regress/input/multi_stage_large_records.source index 8e015d537..1e8f21dd3 100644 --- a/src/test/regress/input/multi_stage_large_records.source +++ b/src/test/regress/input/multi_stage_large_records.source @@ -9,7 +9,7 @@ SET citus.shard_max_size TO "256kB"; CREATE TABLE large_records_table (data_id integer, data text); -SELECT master_create_distributed_table('large_records_table', 'data_id', 'append'); +SELECT create_distributed_table('large_records_table', 'data_id', 'append'); \STAGE large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' diff --git a/src/test/regress/input/multi_subquery.source b/src/test/regress/input/multi_subquery.source index 0accf9135..fe5c505db 100644 --- a/src/test/regress/input/multi_subquery.source +++ b/src/test/regress/input/multi_subquery.source @@ -22,7 +22,7 @@ CREATE TABLE lineitem_subquery ( l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); -SELECT master_create_distributed_table('lineitem_subquery', 'l_orderkey', 'range'); +SELECT create_distributed_table('lineitem_subquery', 'l_orderkey', 'range'); CREATE TABLE orders_subquery ( o_orderkey bigint not null, @@ -35,7 +35,7 @@ CREATE TABLE orders_subquery ( o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); -SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range'); +SELECT create_distributed_table('orders_subquery', 'o_orderkey', 'range'); SET citus.task_executor_type TO 'task-tracker'; diff --git a/src/test/regress/output/multi_agg_distinct.source b/src/test/regress/output/multi_agg_distinct.source index 060bb39d2..cc8e7b5ce 100644 --- a/src/test/regress/output/multi_agg_distinct.source +++ b/src/test/regress/output/multi_agg_distinct.source @@ -19,9 +19,9 @@ CREATE TABLE lineitem_range ( l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); -SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('lineitem_range', 'l_orderkey', 'range'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/output/multi_agg_type_conversion.source b/src/test/regress/output/multi_agg_type_conversion.source index cb73a98cb..7033f0fcf 100644 --- a/src/test/regress/output/multi_agg_type_conversion.source +++ b/src/test/regress/output/multi_agg_type_conversion.source @@ -33,9 +33,9 @@ CREATE TABLE aggregate_type ( float_value float(20) not null, double_value float(40) not null, interval_value interval not null); -SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('aggregate_type', 'float_value', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index 9466c3170..a3f4e9b49 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -23,9 +23,9 @@ CREATE TABLE lineitem_alter ( l_shipmode char(10) not null, l_comment varchar(44) not null ); -SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -74,7 +74,7 @@ ORDER BY attnum; null_column | integer (27 rows) -\c - - - :master_port +\c - - - :port \d lineitem_alter Table "public.lineitem_alter" Column | Type | Modifiers @@ -451,12 +451,12 @@ ORDER BY attnum; ........pg.dropped.23........ | - (29 rows) -\c - - - :master_port +\c - - - :port -- Cleanup the table and its shards -SELECT master_apply_delete_command('DELETE FROM lineitem_alter'); - master_apply_delete_command ------------------------------ - 9 +SELECT apply_delete_command('DELETE FROM lineitem_alter'); + apply_delete_command +---------------------- + 9 (1 row) DROP TABLE lineitem_alter; @@ -467,4 +467,4 @@ SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; --------- (0 rows) -\c - - - :master_port +\c - - - :port diff --git a/src/test/regress/output/multi_append_table_to_shard.source b/src/test/regress/output/multi_append_table_to_shard.source index 682671c74..251401373 100644 --- a/src/test/regress/output/multi_append_table_to_shard.source +++ b/src/test/regress/output/multi_append_table_to_shard.source @@ -7,9 +7,9 @@ CREATE TABLE multi_append_table_to_shard_right right_number INTEGER not null, right_text TEXT not null ); -SELECT master_create_distributed_table('multi_append_table_to_shard_right', 'right_number', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('multi_append_table_to_shard_right', 'right_number', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -18,9 +18,9 @@ CREATE TABLE multi_append_table_to_shard_left left_number INTEGER not null, left_text TEXT not null ); -SELECT master_create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -65,13 +65,13 @@ CREATE TABLE multi_append_table_to_shard_stage text TEXT not null ); COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/agg.data'; -SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) +SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid; - master_append_table_to_shard ------------------------------- - 0.0533333 + append_table_to_shard +----------------------- + 0.0533333 (1 row) -- Only the primary worker will see the new matches, as the secondary still uses a cached shard @@ -87,13 +87,13 @@ WHERE left_number = right_number; -- Now add a lot of data to ensure we increase the size on disk DELETE FROM multi_append_table_to_shard_stage; COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'; -SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) +SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid; - master_append_table_to_shard ------------------------------- - 0.106667 + append_table_to_shard +----------------------- + 0.106667 (1 row) -- This join will refresh the shard on the secondary, all 8 rows in the left table will match twice (16) @@ -109,10 +109,10 @@ WHERE left_number = right_number; -- Check that we error out if we try to append data to a hash partitioned table. UPDATE pg_dist_partition SET partmethod = 'h' WHERE logicalrelid = 'multi_append_table_to_shard_right'::regclass::oid; -SELECT master_create_empty_shard('multi_append_table_to_shard_right'); +SELECT create_empty_shard('multi_append_table_to_shard_right'); ERROR: relation "multi_append_table_to_shard_right" is a hash partitioned table DETAIL: We currently don't support creating shards on hash-partitioned tables -SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) +SELECT append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid; @@ -121,16 +121,16 @@ DETAIL: We currently don't support appending to shards in hash-partitioned tabl UPDATE pg_dist_partition SET partmethod = 'a' WHERE logicalrelid = 'multi_append_table_to_shard_right'::regclass::oid; -- Clean up after test -SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_right'); - master_apply_delete_command ------------------------------ - 1 +SELECT apply_delete_command('DELETE FROM multi_append_table_to_shard_right'); + apply_delete_command +---------------------- + 1 (1 row) -SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_left'); - master_apply_delete_command ------------------------------ - 2 +SELECT apply_delete_command('DELETE FROM multi_append_table_to_shard_left'); + apply_delete_command +---------------------- + 2 (1 row) DROP TABLE multi_append_table_to_shard_stage; diff --git a/src/test/regress/output/multi_create_schema.source b/src/test/regress/output/multi_create_schema.source index 690a309d4..7a47026db 100644 --- a/src/test/regress/output/multi_create_schema.source +++ b/src/test/regress/output/multi_create_schema.source @@ -6,9 +6,9 @@ CREATE TABLE nation ( n_comment varchar(152)); NOTICE: Citus partially supports CREATE SCHEMA for distributed databases DETAIL: schema usage in joins and in some UDFs provided by Citus are not supported yet -SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/output/multi_master_delete_protocol.source b/src/test/regress/output/multi_master_delete_protocol.source index 84fd2579c..60c1d46d9 100644 --- a/src/test/regress/output/multi_master_delete_protocol.source +++ b/src/test/regress/output/multi_master_delete_protocol.source @@ -11,9 +11,9 @@ CREATE TABLE customer_delete_protocol ( c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); -SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('customer_delete_protocol', 'c_custkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -21,16 +21,16 @@ SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', \STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \STAGE customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -- Check that we don't support conditions on columns other than partition key. -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol +SELECT apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_acctbal > 0.0'); ERROR: cannot delete from distributed table DETAIL: Where clause includes a column other than partition column -- Check that we delete a shard if and only if all rows in the shard satisfy the condition. -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol +SELECT apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 6500'); - master_apply_delete_command ------------------------------ - 0 + apply_delete_command +---------------------- + 0 (1 row) SELECT count(*) from customer_delete_protocol; @@ -40,11 +40,11 @@ SELECT count(*) from customer_delete_protocol; (1 row) -- Delete one shard that satisfies the given conditions. -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol +SELECT apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 1000 AND c_custkey < 3000'); - master_apply_delete_command ------------------------------ - 1 + apply_delete_command +---------------------- + 1 (1 row) SELECT count(*) from customer_delete_protocol; @@ -54,10 +54,10 @@ SELECT count(*) from customer_delete_protocol; (1 row) -- Delete all shards if no condition is provided. -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); - master_apply_delete_command ------------------------------ - 2 +SELECT apply_delete_command('DELETE FROM customer_delete_protocol'); + apply_delete_command +---------------------- + 2 (1 row) SELECT count(*) FROM customer_delete_protocol; @@ -67,27 +67,27 @@ SELECT count(*) FROM customer_delete_protocol; (1 row) -- Verify that empty shards are deleted if no condition is provided -SELECT master_create_empty_shard('customer_delete_protocol'); - master_create_empty_shard ---------------------------- - 102041 +SELECT create_empty_shard('customer_delete_protocol'); + create_empty_shard +-------------------- + 102041 (1 row) -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol +SELECT apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 1000'); - master_apply_delete_command ------------------------------ - 0 + apply_delete_command +---------------------- + 0 (1 row) -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); - master_apply_delete_command ------------------------------ - 1 +SELECT apply_delete_command('DELETE FROM customer_delete_protocol'); + apply_delete_command +---------------------- + 1 (1 row) --- Verify that master_apply_delete_command cannot be called in a transaction block +-- Verify that apply_delete_command cannot be called in a transaction block BEGIN; -SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); -ERROR: master_apply_delete_command cannot run inside a transaction block +SELECT apply_delete_command('DELETE FROM customer_delete_protocol'); +ERROR: apply_delete_command cannot run inside a transaction block ROLLBACK; diff --git a/src/test/regress/output/multi_outer_join.source b/src/test/regress/output/multi_outer_join.source index 025f258fe..87749c379 100644 --- a/src/test/regress/output/multi_outer_join.source +++ b/src/test/regress/output/multi_outer_join.source @@ -12,9 +12,9 @@ CREATE TABLE multi_outer_join_left l_mktsegment char(10) not null, l_comment varchar(117) not null ); -SELECT master_create_distributed_table('multi_outer_join_left', 'l_custkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('multi_outer_join_left', 'l_custkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -29,9 +29,9 @@ CREATE TABLE multi_outer_join_right r_mktsegment char(10) not null, r_comment varchar(117) not null ); -SELECT master_create_distributed_table('multi_outer_join_right', 'r_custkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('multi_outer_join_right', 'r_custkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -46,9 +46,9 @@ CREATE TABLE multi_outer_join_third t_mktsegment char(10) not null, t_comment varchar(117) not null ); -SELECT master_create_distributed_table('multi_outer_join_third', 't_custkey', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('multi_outer_join_third', 't_custkey', 'append'); + create_distributed_table +-------------------------- (1 row) @@ -172,16 +172,16 @@ FROM ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning -- empty tables -SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left'); - master_apply_delete_command ------------------------------ - 2 +SELECT * FROM apply_delete_command('DELETE FROM multi_outer_join_left'); + apply_delete_command +---------------------- + 2 (1 row) -SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right'); - master_apply_delete_command ------------------------------ - 2 +SELECT * FROM apply_delete_command('DELETE FROM multi_outer_join_right'); + apply_delete_command +---------------------- + 2 (1 row) -- reload shards with 1-1 matching diff --git a/src/test/regress/output/multi_stage_data.source b/src/test/regress/output/multi_stage_data.source index 0eed8965f..73cafcf6f 100644 --- a/src/test/regress/output/multi_stage_data.source +++ b/src/test/regress/output/multi_stage_data.source @@ -19,9 +19,9 @@ CREATE TABLE nation_hash_partitioned ( n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); -SELECT master_create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('nation_hash_partitioned', 'n_nationkey', 'hash'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/output/multi_stage_large_records.source b/src/test/regress/output/multi_stage_large_records.source index 24eaa14e6..4ba41a1ff 100644 --- a/src/test/regress/output/multi_stage_large_records.source +++ b/src/test/regress/output/multi_stage_large_records.source @@ -6,9 +6,9 @@ -- are creating shards of correct size even when records are large. SET citus.shard_max_size TO "256kB"; CREATE TABLE large_records_table (data_id integer, data text); -SELECT master_create_distributed_table('large_records_table', 'data_id', 'append'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('large_records_table', 'data_id', 'append'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/output/multi_subquery.source b/src/test/regress/output/multi_subquery.source index f0e6f0250..4e919af2b 100644 --- a/src/test/regress/output/multi_subquery.source +++ b/src/test/regress/output/multi_subquery.source @@ -20,9 +20,9 @@ CREATE TABLE lineitem_subquery ( l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); -SELECT master_create_distributed_table('lineitem_subquery', 'l_orderkey', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('lineitem_subquery', 'l_orderkey', 'range'); + create_distributed_table +-------------------------- (1 row) @@ -37,9 +37,9 @@ CREATE TABLE orders_subquery ( o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); -SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range'); - master_create_distributed_table ---------------------------------- +SELECT create_distributed_table('orders_subquery', 'o_orderkey', 'range'); + create_distributed_table +-------------------------- (1 row) diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 1e80826a2..99af8a1c8 100644 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -133,7 +133,7 @@ sysopen my $fh, "tmp_check/tmp-bin/psql", O_CREAT|O_TRUNC|O_RDWR, 0700 or die "Could not create psql wrapper"; print $fh "#!/bin/bash\n"; print $fh "exec $bindir/csql "; -print $fh "--variable=master_port=$masterPort "; +print $fh "--variable=port=$masterPort "; for my $workeroff (0 .. $#workerPorts) { my $port = $workerPorts[$workeroff]; diff --git a/src/test/regress/sql/multi_binary_master_copy_format.sql b/src/test/regress/sql/multi_binary_master_copy_format.sql index 4c67eb2d2..354a9f504 100644 --- a/src/test/regress/sql/multi_binary_master_copy_format.sql +++ b/src/test/regress/sql/multi_binary_master_copy_format.sql @@ -4,7 +4,7 @@ -- Try binary master copy for different executors -SET citus.binary_master_copy_format TO 'on'; +SET citus.binary_copy_format TO 'on'; SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM lineitem; diff --git a/src/test/regress/sql/multi_create_insert_proxy.sql b/src/test/regress/sql/multi_create_insert_proxy.sql index bfd3adcbd..a0d6f25ad 100644 --- a/src/test/regress/sql/multi_create_insert_proxy.sql +++ b/src/test/regress/sql/multi_create_insert_proxy.sql @@ -47,8 +47,8 @@ CREATE TABLE insert_target ( -- squelch WARNINGs that contain worker_port SET client_min_messages TO ERROR; -SELECT master_create_distributed_table('insert_target', 'id', 'hash'); -SELECT master_create_worker_shards('insert_target', 2, 1); +SELECT create_distributed_table('insert_target', 'id', 'hash'); +SELECT create_worker_shards('insert_target', 2, 1); CREATE TEMPORARY SEQUENCE rows_inserted; SELECT create_insert_proxy_for_table('insert_target', 'rows_inserted') AS proxy_tablename diff --git a/src/test/regress/sql/multi_create_shards.sql b/src/test/regress/sql/multi_create_shards.sql index 1180338ac..60254a705 100644 --- a/src/test/regress/sql/multi_create_shards.sql +++ b/src/test/regress/sql/multi_create_shards.sql @@ -48,36 +48,36 @@ CREATE TABLE table_to_distribute ( ); -- use an index instead of table name -SELECT master_create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); +SELECT create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); -- use a bad column name -SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'bad_column', 'hash'); -- use unrecognized partition type -SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized'); +SELECT create_distributed_table('table_to_distribute', 'name', 'unrecognized'); -- use a partition column of a type lacking any default operator class -SELECT master_create_distributed_table('table_to_distribute', 'json_data', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'json_data', 'hash'); -- use a partition column of type lacking the required support function (hash) -SELECT master_create_distributed_table('table_to_distribute', 'test_type_data', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'test_type_data', 'hash'); -- distribute table and inspect side effects -SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash'); +SELECT create_distributed_table('table_to_distribute', 'name', 'hash'); SELECT partmethod, partkey FROM pg_dist_partition WHERE logicalrelid = 'table_to_distribute'::regclass; -- use a bad shard count -SELECT master_create_worker_shards('table_to_distribute', 0, 1); +SELECT create_worker_shards('table_to_distribute', 0, 1); -- use a bad replication factor -SELECT master_create_worker_shards('table_to_distribute', 16, 0); +SELECT create_worker_shards('table_to_distribute', 16, 0); -- use a replication factor higher than shard count -SELECT master_create_worker_shards('table_to_distribute', 16, 3); +SELECT create_worker_shards('table_to_distribute', 16, 3); -- finally, create shards and inspect metadata -SELECT master_create_worker_shards('table_to_distribute', 16, 1); +SELECT create_worker_shards('table_to_distribute', 16, 1); SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'table_to_distribute'::regclass @@ -93,7 +93,7 @@ SELECT count(*) AS shard_count, SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relkind = 'r'; -- try to create them again -SELECT master_create_worker_shards('table_to_distribute', 16, 1); +SELECT create_worker_shards('table_to_distribute', 16, 1); -- test list sorting SELECT sort_names('sumedh', 'jason', 'ozgun'); @@ -108,8 +108,8 @@ CREATE FOREIGN TABLE foreign_table_to_distribute ) SERVER fake_fdw_server; -SELECT master_create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); -SELECT master_create_worker_shards('foreign_table_to_distribute', 16, 1); +SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); +SELECT create_worker_shards('foreign_table_to_distribute', 16, 1); SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass @@ -122,8 +122,8 @@ CREATE TABLE weird_shard_count id bigint ); -SELECT master_create_distributed_table('weird_shard_count', 'id', 'hash'); -SELECT master_create_worker_shards('weird_shard_count', 7, 1); +SELECT create_distributed_table('weird_shard_count', 'id', 'hash'); +SELECT create_worker_shards('weird_shard_count', 7, 1); -- Citus ensures all shards are roughly the same size SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size diff --git a/src/test/regress/sql/multi_create_table.sql b/src/test/regress/sql/multi_create_table.sql index 3509436f6..76710fd8a 100644 --- a/src/test/regress/sql/multi_create_table.sql +++ b/src/test/regress/sql/multi_create_table.sql @@ -23,7 +23,7 @@ CREATE TABLE lineitem ( l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); -SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append'); +SELECT create_distributed_table('lineitem', 'l_orderkey', 'append'); CREATE INDEX lineitem_time_index ON lineitem (l_shipdate); @@ -38,7 +38,7 @@ CREATE TABLE orders ( o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); -SELECT master_create_distributed_table('orders', 'o_orderkey', 'append'); +SELECT create_distributed_table('orders', 'o_orderkey', 'append'); CREATE TABLE customer ( c_custkey integer not null, @@ -49,14 +49,14 @@ CREATE TABLE customer ( c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); -SELECT master_create_distributed_table('customer', 'c_custkey', 'append'); +SELECT create_distributed_table('customer', 'c_custkey', 'append'); CREATE TABLE nation ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); -SELECT master_create_distributed_table('nation', 'n_nationkey', 'append'); +SELECT create_distributed_table('nation', 'n_nationkey', 'append'); CREATE TABLE part ( p_partkey integer not null, @@ -68,7 +68,7 @@ CREATE TABLE part ( p_container char(10) not null, p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); -SELECT master_create_distributed_table('part', 'p_partkey', 'append'); +SELECT create_distributed_table('part', 'p_partkey', 'append'); CREATE TABLE supplier ( @@ -80,7 +80,7 @@ CREATE TABLE supplier s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); -SELECT master_create_distributed_table('supplier', 's_suppkey', 'append'); +SELECT create_distributed_table('supplier', 's_suppkey', 'append'); -- now test that Citus cannot distribute unique constraints that do not include @@ -90,14 +90,14 @@ CREATE TABLE primary_key_on_non_part_col partition_col integer, other_col integer PRIMARY KEY ); -SELECT master_create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash'); CREATE TABLE unique_const_on_non_part_col ( partition_col integer, other_col integer UNIQUE ); -SELECT master_create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('primary_key_on_non_part_col', 'partition_col', 'hash'); -- now show that Citus can distribute unique constrints that include -- the partition column @@ -106,14 +106,14 @@ CREATE TABLE primary_key_on_part_col partition_col integer PRIMARY KEY, other_col integer ); -SELECT master_create_distributed_table('primary_key_on_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('primary_key_on_part_col', 'partition_col', 'hash'); CREATE TABLE unique_const_on_part_col ( partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('unique_const_on_part_col', 'partition_col', 'hash'); +SELECT create_distributed_table('unique_const_on_part_col', 'partition_col', 'hash'); CREATE TABLE unique_const_on_two_columns ( @@ -121,21 +121,21 @@ CREATE TABLE unique_const_on_two_columns other_col integer, UNIQUE (partition_col, other_col) ); -SELECT master_create_distributed_table('unique_const_on_two_columns', 'partition_col', 'hash'); +SELECT create_distributed_table('unique_const_on_two_columns', 'partition_col', 'hash'); CREATE TABLE unique_const_append_partitioned_tables ( partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('unique_const_append_partitioned_tables', 'partition_col', 'append'); +SELECT create_distributed_table('unique_const_append_partitioned_tables', 'partition_col', 'append'); CREATE TABLE unique_const_range_partitioned_tables ( partition_col integer UNIQUE, other_col integer ); -SELECT master_create_distributed_table('unique_const_range_partitioned_tables', 'partition_col', 'range'); +SELECT create_distributed_table('unique_const_range_partitioned_tables', 'partition_col', 'range'); -- drop unnecessary tables DROP TABLE primary_key_on_non_part_col, unique_const_on_non_part_col CASCADE; diff --git a/src/test/regress/sql/multi_data_types.sql b/src/test/regress/sql/multi_data_types.sql index 68b84044f..a7072a2a2 100644 --- a/src/test/regress/sql/multi_data_types.sql +++ b/src/test/regress/sql/multi_data_types.sql @@ -54,9 +54,9 @@ CREATE TABLE composite_type_partitioned_table col test_composite_type ); -SELECT master_create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); +SELECT create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); -SELECT master_create_worker_shards('composite_type_partitioned_table', 4, 1); +SELECT create_worker_shards('composite_type_partitioned_table', 4, 1); -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO composite_type_partitioned_table VALUES (1, '(1, 2)'::test_composite_type); @@ -80,9 +80,9 @@ CREATE TABLE bugs ( status bug_status ); -SELECT master_create_distributed_table('bugs', 'status', 'hash'); +SELECT create_distributed_table('bugs', 'status', 'hash'); -SELECT master_create_worker_shards('bugs', 4, 1); +SELECT create_worker_shards('bugs', 4, 1); -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO bugs VALUES (1, 'new'); @@ -104,8 +104,8 @@ CREATE TABLE varchar_hash_partitioned_table name varchar ); -SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); -SELECT master_create_worker_shards('varchar_hash_partitioned_table', 4, 1); +SELECT create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); +SELECT create_worker_shards('varchar_hash_partitioned_table', 4, 1); -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO varchar_hash_partitioned_table VALUES (1, 'Jason'); diff --git a/src/test/regress/sql/multi_fdw_create_table.sql b/src/test/regress/sql/multi_fdw_create_table.sql index e0ccd9a3e..457716117 100644 --- a/src/test/regress/sql/multi_fdw_create_table.sql +++ b/src/test/regress/sql/multi_fdw_create_table.sql @@ -28,7 +28,7 @@ CREATE FOREIGN TABLE lineitem ( l_comment varchar(44) not null) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', null ''); -SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append'); +SELECT create_distributed_table('lineitem', 'l_orderkey', 'append'); CREATE FOREIGN TABLE orders ( o_orderkey bigint not null, @@ -42,7 +42,7 @@ CREATE FOREIGN TABLE orders ( o_comment varchar(79) not null) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', null ''); -SELECT master_create_distributed_table('orders', 'o_orderkey', 'append'); +SELECT create_distributed_table('orders', 'o_orderkey', 'append'); CREATE FOREIGN TABLE customer ( c_custkey integer not null, @@ -55,7 +55,7 @@ CREATE FOREIGN TABLE customer ( c_comment varchar(117) not null) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', null ''); -SELECT master_create_distributed_table('customer', 'c_custkey', 'append'); +SELECT create_distributed_table('customer', 'c_custkey', 'append'); CREATE FOREIGN TABLE nation ( n_nationkey integer not null, @@ -64,7 +64,7 @@ CREATE FOREIGN TABLE nation ( n_comment varchar(152)) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', null ''); -SELECT master_create_distributed_table('nation', 'n_nationkey', 'append'); +SELECT create_distributed_table('nation', 'n_nationkey', 'append'); CREATE FOREIGN TABLE part ( p_partkey integer not null, @@ -78,4 +78,4 @@ CREATE FOREIGN TABLE part ( p_comment varchar(23) not null) SERVER file_server OPTIONS (format 'text', filename '', delimiter '|', null ''); -SELECT master_create_distributed_table('part', 'p_partkey', 'append'); +SELECT create_distributed_table('part', 'p_partkey', 'append'); diff --git a/src/test/regress/sql/multi_fdw_master_protocol.sql b/src/test/regress/sql/multi_fdw_master_protocol.sql index dfd495538..c207a65dc 100644 --- a/src/test/regress/sql/multi_fdw_master_protocol.sql +++ b/src/test/regress/sql/multi_fdw_master_protocol.sql @@ -5,10 +5,10 @@ -- Tests that check the metadata returned by the master node. SELECT part_storage_type, part_key, part_replica_count, part_max_size, - part_placement_policy FROM master_get_table_metadata('lineitem'); + part_placement_policy FROM get_table_metadata('lineitem'); -SELECT * FROM master_get_table_ddl_events('lineitem'); +SELECT * FROM get_table_ddl_events('lineitem'); -SELECT * FROM master_get_new_shardid(); +SELECT * FROM get_new_shardid(); -SELECT node_name FROM master_get_local_first_candidate_nodes(); +SELECT node_name FROM get_local_first_candidate_nodes(); diff --git a/src/test/regress/sql/multi_hash_pruning.sql b/src/test/regress/sql/multi_hash_pruning.sql index c70ede12b..b0aaf2e1d 100644 --- a/src/test/regress/sql/multi_hash_pruning.sql +++ b/src/test/regress/sql/multi_hash_pruning.sql @@ -19,7 +19,7 @@ CREATE TABLE orders_hash_partitioned ( o_clerk char(15), o_shippriority integer, o_comment varchar(79) ); -SELECT master_create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'append'); +SELECT create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'append'); UPDATE pg_dist_partition SET partmethod = 'h' WHERE logicalrelid = 'orders_hash_partitioned'::regclass; diff --git a/src/test/regress/sql/multi_index_statements.sql b/src/test/regress/sql/multi_index_statements.sql index 942680da6..84ba39c3a 100644 --- a/src/test/regress/sql/multi_index_statements.sql +++ b/src/test/regress/sql/multi_index_statements.sql @@ -12,18 +12,18 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102080; CREATE TABLE index_test_range(a int, b int, c int); -SELECT master_create_distributed_table('index_test_range', 'a', 'range'); -SELECT master_create_empty_shard('index_test_range'); -SELECT master_create_empty_shard('index_test_range'); +SELECT create_distributed_table('index_test_range', 'a', 'range'); +SELECT create_empty_shard('index_test_range'); +SELECT create_empty_shard('index_test_range'); CREATE TABLE index_test_hash(a int, b int, c int); -SELECT master_create_distributed_table('index_test_hash', 'a', 'hash'); -SELECT master_create_worker_shards('index_test_hash', 8, 2); +SELECT create_distributed_table('index_test_hash', 'a', 'hash'); +SELECT create_worker_shards('index_test_hash', 8, 2); CREATE TABLE index_test_append(a int, b int, c int); -SELECT master_create_distributed_table('index_test_append', 'a', 'append'); -SELECT master_create_empty_shard('index_test_append'); -SELECT master_create_empty_shard('index_test_append'); +SELECT create_distributed_table('index_test_append', 'a', 'append'); +SELECT create_empty_shard('index_test_append'); +SELECT create_empty_shard('index_test_append'); -- -- CREATE INDEX @@ -53,7 +53,7 @@ SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%'; SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%'; SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; -\c - - - :master_port +\c - - - :port -- Verify that we error out on unsupported statement types @@ -112,7 +112,7 @@ SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; \c - - - :worker_1_port SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; -\c - - - :master_port +\c - - - :port -- Drop created tables DROP TABLE index_test_range; diff --git a/src/test/regress/sql/multi_master_protocol.sql b/src/test/regress/sql/multi_master_protocol.sql index 054220e76..41009b4c8 100644 --- a/src/test/regress/sql/multi_master_protocol.sql +++ b/src/test/regress/sql/multi_master_protocol.sql @@ -5,16 +5,16 @@ -- Tests that check the metadata returned by the master node. SELECT part_storage_type, part_key, part_replica_count, part_max_size, - part_placement_policy FROM master_get_table_metadata('lineitem'); + part_placement_policy FROM get_table_metadata('lineitem'); -SELECT * FROM master_get_table_ddl_events('lineitem'); +SELECT * FROM get_table_ddl_events('lineitem'); -SELECT * FROM master_get_new_shardid(); +SELECT * FROM get_new_shardid(); -SELECT * FROM master_get_local_first_candidate_nodes(); +SELECT * FROM get_local_first_candidate_nodes(); -SELECT * FROM master_get_round_robin_candidate_nodes(1); +SELECT * FROM get_round_robin_candidate_nodes(1); -SELECT * FROM master_get_round_robin_candidate_nodes(2); +SELECT * FROM get_round_robin_candidate_nodes(2); -SELECT * FROM master_get_active_worker_nodes(); +SELECT * FROM get_active_worker_nodes(); diff --git a/src/test/regress/sql/multi_modifications.sql b/src/test/regress/sql/multi_modifications.sql index dc439f017..5e1bd1522 100644 --- a/src/test/regress/sql/multi_modifications.sql +++ b/src/test/regress/sql/multi_modifications.sql @@ -17,36 +17,36 @@ CREATE TABLE insufficient_shards ( LIKE limit_orders ); CREATE TABLE range_partitioned ( LIKE limit_orders ); CREATE TABLE append_partitioned ( LIKE limit_orders ); -SELECT master_create_distributed_table('limit_orders', 'id', 'hash'); -SELECT master_create_distributed_table('insufficient_shards', 'id', 'hash'); -SELECT master_create_distributed_table('range_partitioned', 'id', 'range'); -SELECT master_create_distributed_table('append_partitioned', 'id', 'append'); +SELECT create_distributed_table('limit_orders', 'id', 'hash'); +SELECT create_distributed_table('insufficient_shards', 'id', 'hash'); +SELECT create_distributed_table('range_partitioned', 'id', 'range'); +SELECT create_distributed_table('append_partitioned', 'id', 'append'); -SELECT master_create_worker_shards('limit_orders', 2, 2); +SELECT create_worker_shards('limit_orders', 2, 2); -- make a single shard that covers no partition values -SELECT master_create_worker_shards('insufficient_shards', 1, 1); +SELECT create_worker_shards('insufficient_shards', 1, 1); UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0 WHERE logicalrelid = 'insufficient_shards'::regclass; -- create range-partitioned shards -SELECT master_create_empty_shard('range_partitioned') AS new_shard_id +SELECT create_empty_shard('range_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 49999 WHERE shardid = :new_shard_id; -SELECT master_create_empty_shard('range_partitioned') AS new_shard_id +SELECT create_empty_shard('range_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 50000, shardmaxvalue = 99999 WHERE shardid = :new_shard_id; -- create append-partitioned shards -SELECT master_create_empty_shard('append_partitioned') AS new_shard_id +SELECT create_empty_shard('append_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 500000 WHERE shardid = :new_shard_id; -SELECT master_create_empty_shard('append_partitioned') AS new_shard_id +SELECT create_empty_shard('append_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 500000, shardmaxvalue = 1000000 WHERE shardid = :new_shard_id; diff --git a/src/test/regress/sql/multi_partition_pruning.sql b/src/test/regress/sql/multi_partition_pruning.sql index d3c407bf9..85035f9ef 100644 --- a/src/test/regress/sql/multi_partition_pruning.sql +++ b/src/test/regress/sql/multi_partition_pruning.sql @@ -36,7 +36,7 @@ CREATE TABLE varchar_partitioned_table ( varchar_column varchar(100) ); -SELECT master_create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); +SELECT create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); -- Create logical shards and shard placements with shardid 100,101 @@ -65,7 +65,7 @@ CREATE TABLE array_partitioned_table ( array_column text[] ); -SELECT master_create_distributed_table('array_partitioned_table', 'array_column', 'append'); +SELECT create_distributed_table('array_partitioned_table', 'array_column', 'append'); SET client_min_messages TO DEBUG2; -- Create logical shard with shardid 102, 103 @@ -103,7 +103,7 @@ CREATE TABLE composite_partitioned_table ( composite_column composite_type ); -SELECT master_create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); +SELECT create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); SET client_min_messages TO DEBUG2; -- Create logical shard with shardid 104, 105 diff --git a/src/test/regress/sql/multi_repair_shards.sql b/src/test/regress/sql/multi_repair_shards.sql index 7912f6a34..d2c29ddec 100644 --- a/src/test/regress/sql/multi_repair_shards.sql +++ b/src/test/regress/sql/multi_repair_shards.sql @@ -11,10 +11,10 @@ CREATE INDEX ON customer_engagements (created_at); CREATE INDEX ON customer_engagements (event_data); -- distribute the table -SELECT master_create_distributed_table('customer_engagements', 'id', 'hash'); +SELECT create_distributed_table('customer_engagements', 'id', 'hash'); -- create a single shard on the first worker -SELECT master_create_worker_shards('customer_engagements', 1, 2); +SELECT create_worker_shards('customer_engagements', 1, 2); -- ingest some data for the tests INSERT INTO customer_engagements VALUES (1, '01-01-2015', 'first event'); @@ -25,7 +25,7 @@ INSERT INTO customer_engagements VALUES (1, '03-01-2015', 'third event'); -- (i) create a new shard -- (ii) mark the second shard placements as unhealthy -- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones --- (iv) do a successful master_copy_shard_placement from the first placement to the second +-- (iv) do a successful copy_shard_placement from the first placement to the second -- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement -- get the newshardid @@ -39,13 +39,13 @@ UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = :newshardid AN INSERT INTO pg_dist_shard_placement (nodename, nodeport, shardid, shardstate, shardlength) VALUES ('dummyhost', :worker_2_port, :newshardid, 1, 0); -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'dummyhost', :worker_2_port); +SELECT copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'dummyhost', :worker_2_port); -- also try to copy from an inactive placement -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port); +SELECT copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port); -- "copy" this shard from the first placement to the second one -SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); -- now, update first placement as unhealthy (and raise a notice) so that queries are not routed to there UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = :newshardid AND nodeport = :worker_1_port; @@ -61,10 +61,10 @@ CREATE FOREIGN TABLE remote_engagements ( ) SERVER fake_fdw_server; -- distribute the table -SELECT master_create_distributed_table('remote_engagements', 'id', 'hash'); +SELECT create_distributed_table('remote_engagements', 'id', 'hash'); -- create a single shard on the first worker -SELECT master_create_worker_shards('remote_engagements', 1, 2); +SELECT create_worker_shards('remote_engagements', 1, 2); -- get the newshardid SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remote_engagements'::regclass @@ -74,4 +74,4 @@ SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remo UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = :remotenewshardid AND nodeport = :worker_2_port; -- oops! we don't support repairing shards backed by foreign tables -SELECT master_copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); +SELECT copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); diff --git a/src/test/regress/sql/multi_repartitioned_subquery_udf.sql b/src/test/regress/sql/multi_repartitioned_subquery_udf.sql index a245cd420..1824958c9 100644 --- a/src/test/regress/sql/multi_repartitioned_subquery_udf.sql +++ b/src/test/regress/sql/multi_repartitioned_subquery_udf.sql @@ -3,7 +3,7 @@ -- -- Create UDF in master and workers -\c - - - :master_port +\c - - - :port DROP FUNCTION IF EXISTS median(double precision[]); CREATE FUNCTION median(double precision[]) RETURNS double precision @@ -37,7 +37,7 @@ LANGUAGE sql IMMUTABLE AS $_$ $_$; -- Run query on master -\c - - - :master_port +\c - - - :port SET citus.task_executor_type TO 'task-tracker'; diff --git a/src/test/regress/sql/multi_simple_queries.sql b/src/test/regress/sql/multi_simple_queries.sql index 45fc1ebf2..b09e23eb3 100644 --- a/src/test/regress/sql/multi_simple_queries.sql +++ b/src/test/regress/sql/multi_simple_queries.sql @@ -15,15 +15,15 @@ CREATE TABLE authors ( name text, id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard (LIKE articles); -SELECT master_create_distributed_table('articles', 'author_id', 'hash'); -SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash'); +SELECT create_distributed_table('articles', 'author_id', 'hash'); +SELECT create_distributed_table('articles_single_shard', 'author_id', 'hash'); -- test when a table is distributed but no shards created yet SELECT count(*) from articles; -SELECT master_create_worker_shards('articles', 2, 1); -SELECT master_create_worker_shards('articles_single_shard', 1, 1); +SELECT create_worker_shards('articles', 2, 1); +SELECT create_worker_shards('articles_single_shard', 1, 1); -- create a bunch of test data INSERT INTO articles VALUES ( 1, 1, 'arsenous', 9572); diff --git a/src/test/regress/sql/multi_table_ddl.sql b/src/test/regress/sql/multi_table_ddl.sql index b5eb44bcf..211ddd1d0 100644 --- a/src/test/regress/sql/multi_table_ddl.sql +++ b/src/test/regress/sql/multi_table_ddl.sql @@ -4,7 +4,7 @@ -- Tests around changing the schema and dropping of a distributed table CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); -- verify that the citus extension can't be dropped while distributed tables exist DROP EXTENSION citus; @@ -25,8 +25,8 @@ DROP TABLE testtableddl; -- verify that the table can dropped even if shards exist CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); -SELECT 1 FROM master_create_empty_shard('testtableddl'); +SELECT create_distributed_table('testtableddl', 'distributecol', 'append'); +SELECT 1 FROM create_empty_shard('testtableddl'); DROP TABLE testtableddl; -- ensure no metadata of distributed tables are remaining diff --git a/src/test/regress/sql/multi_task_assignment_policy.sql b/src/test/regress/sql/multi_task_assignment_policy.sql index 5ee4cddfc..85b0b7e5e 100644 --- a/src/test/regress/sql/multi_task_assignment_policy.sql +++ b/src/test/regress/sql/multi_task_assignment_policy.sql @@ -8,7 +8,7 @@ -- and check that tasks are assigned to worker nodes as expected. CREATE TABLE task_assignment_test_table (test_id integer); -SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append'); +SELECT create_distributed_table('task_assignment_test_table', 'test_id', 'append'); -- Create logical shards with shardids 200, 201, and 202 diff --git a/src/test/regress/sql/multi_upsert.sql b/src/test/regress/sql/multi_upsert.sql index b61833eaf..28aa8897c 100644 --- a/src/test/regress/sql/multi_upsert.sql +++ b/src/test/regress/sql/multi_upsert.sql @@ -10,8 +10,8 @@ CREATE TABLE upsert_test ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash'); -SELECT master_create_worker_shards('upsert_test', '4', '2'); +SELECT create_distributed_table('upsert_test', 'part_key', 'hash'); +SELECT create_worker_shards('upsert_test', '4', '2'); -- do a regular insert INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1); @@ -90,8 +90,8 @@ CREATE TABLE upsert_test_2 ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash'); -SELECT master_create_worker_shards('upsert_test_2', '4', '2'); +SELECT create_distributed_table('upsert_test_2', 'part_key', 'hash'); +SELECT create_worker_shards('upsert_test_2', '4', '2'); -- now show that Citus works with multiple columns as the PRIMARY KEY, including the partiton key INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1); @@ -111,8 +111,8 @@ CREATE TABLE upsert_test_3 CREATE INDEX idx_ups_test ON upsert_test_3(part_key); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash'); -SELECT master_create_worker_shards('upsert_test_3', '4', '2'); +SELECT create_distributed_table('upsert_test_3', 'part_key', 'hash'); +SELECT create_worker_shards('upsert_test_3', '4', '2'); -- since there are no unique indexes, error-out INSERT INTO upsert_test_3 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_3.count + 1; @@ -125,8 +125,8 @@ CREATE TABLE upsert_test_4 ); -- distribute the table and create shards -SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash'); -SELECT master_create_worker_shards('upsert_test_4', '4', '2'); +SELECT create_distributed_table('upsert_test_4', 'part_key', 'hash'); +SELECT create_worker_shards('upsert_test_4', '4', '2'); -- a single row insert INSERT INTO upsert_test_4 VALUES (1, 0); @@ -144,8 +144,8 @@ SELECT * FROM upsert_test_4; -- now test dropped columns CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); -SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash'); -SELECT master_create_worker_shards('dropcol_distributed', 4, 1); +SELECT create_distributed_table('dropcol_distributed', 'key', 'hash'); +SELECT create_worker_shards('dropcol_distributed', 4, 1); INSERT INTO dropcol_distributed AS dropcol (key, keep1, keep2) VALUES (1, '5', 5) ON CONFLICT(key) DO UPDATE SET keep1 = dropcol.keep1; diff --git a/src/test/regress/sql/multi_utilities.sql b/src/test/regress/sql/multi_utilities.sql index a503b2c0d..48c39c830 100644 --- a/src/test/regress/sql/multi_utilities.sql +++ b/src/test/regress/sql/multi_utilities.sql @@ -3,8 +3,8 @@ -- =================================================================== CREATE TABLE sharded_table ( name text, id bigint ); -SELECT master_create_distributed_table('sharded_table', 'id', 'hash'); -SELECT master_create_worker_shards('sharded_table', 2, 1); +SELECT create_distributed_table('sharded_table', 'id', 'hash'); +SELECT create_worker_shards('sharded_table', 2, 1); -- COPY out is supported with distributed tables COPY sharded_table TO STDOUT; @@ -37,10 +37,10 @@ EXECUTE sharded_delete; EXECUTE sharded_query; -- try to drop shards with where clause -SELECT master_apply_delete_command('DELETE FROM sharded_table WHERE id > 0'); +SELECT apply_delete_command('DELETE FROM sharded_table WHERE id > 0'); -- drop all shards -SELECT master_apply_delete_command('DELETE FROM sharded_table'); +SELECT apply_delete_command('DELETE FROM sharded_table'); -- drop table DROP TABLE sharded_table;