Remove remaining master_create_distributed_table usages (#6477)

Co-authored-by: Marco Slot <marco.slot@gmail.com>
naisila/rebalancer
Marco Slot 2022-11-04 16:30:06 +01:00 committed by GitHub
parent 666696c01c
commit fcaabfdcf3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 212 additions and 1074 deletions

View File

@ -1255,7 +1255,7 @@ CreateDistributedTableLike(TableConversionState *con)
char partitionMethod = PartitionMethod(con->relationId);
CreateDistributedTable(con->newRelationId, distributionColumnName, partitionMethod,
newShardCount, true, newColocateWith, false);
newShardCount, true, newColocateWith);
}
@ -1273,7 +1273,7 @@ CreateCitusTableLike(TableConversionState *con)
else if (IsCitusTableType(con->relationId, REFERENCE_TABLE))
{
CreateDistributedTable(con->newRelationId, NULL, DISTRIBUTE_BY_NONE, 0, false,
NULL, false);
NULL);
}
else if (IsCitusTableType(con->relationId, CITUS_LOCAL_TABLE))
{

View File

@ -105,8 +105,7 @@ static void CreateDistributedTableConcurrently(Oid relationId,
char *colocateWithTableName,
int shardCount,
bool shardCountIsStrict);
static char DecideReplicationModel(char distributionMethod, char *colocateWithTableName,
bool viaDeprecatedAPI);
static char DecideReplicationModel(char distributionMethod, char *colocateWithTableName);
static List * HashSplitPointsForShardList(List *shardList);
static List * HashSplitPointsForShardCount(int shardCount);
static List * WorkerNodesForShardList(List *shardList);
@ -116,19 +115,16 @@ static void CreateHashDistributedTableShards(Oid relationId, int shardCount,
static uint32 ColocationIdForNewTable(Oid relationId, Var *distributionColumn,
char distributionMethod, char replicationModel,
int shardCount, bool shardCountIsStrict,
char *colocateWithTableName,
bool viaDeprecatedAPI);
char *colocateWithTableName);
static void EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
char distributionMethod, uint32 colocationId,
char replicationModel, bool viaDeprecatedAPI);
char replicationModel);
static void EnsureLocalTableEmpty(Oid relationId);
static void EnsureRelationHasNoTriggers(Oid relationId);
static Oid SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId,
int16 supportFunctionNumber);
static void EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMethod,
bool viaDeprecatedAPI);
static bool ShouldLocalTableBeEmpty(Oid relationId, char distributionMethod, bool
viaDeprecatedAPI);
static void EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMethod);
static bool ShouldLocalTableBeEmpty(Oid relationId, char distributionMethod);
static void EnsureCitusTableCanBeCreated(Oid relationOid);
static void PropagatePrerequisiteObjectsForDistributedTable(Oid relationId);
static void EnsureDistributedSequencesHaveOneType(Oid relationId,
@ -172,34 +168,13 @@ PG_FUNCTION_INFO_V1(create_reference_table);
/*
* master_create_distributed_table accepts a table, distribution column and
* method and performs the corresponding catalog changes.
*
* Note that this UDF is deprecated and cannot create colocated tables, so we
* always use INVALID_COLOCATION_ID.
* master_create_distributed_table is a deprecated predecessor to
* create_distributed_table
*/
Datum
master_create_distributed_table(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
Oid relationId = PG_GETARG_OID(0);
text *distributionColumnText = PG_GETARG_TEXT_P(1);
Oid distributionMethodOid = PG_GETARG_OID(2);
EnsureCitusTableCanBeCreated(relationId);
char *colocateWithTableName = NULL;
bool viaDeprecatedAPI = true;
char *distributionColumnName = text_to_cstring(distributionColumnText);
Assert(distributionColumnName != NULL);
char distributionMethod = LookupDistributionMethod(distributionMethodOid);
CreateDistributedTable(relationId, distributionColumnName, distributionMethod,
ShardCount, false, colocateWithTableName, viaDeprecatedAPI);
PG_RETURN_VOID();
ereport(ERROR, (errmsg("master_create_distributed_table has been deprecated")));
}
@ -217,7 +192,6 @@ create_distributed_table(PG_FUNCTION_ARGS)
{
PG_RETURN_VOID();
}
bool viaDeprecatedAPI = false;
Oid relationId = PG_GETARG_OID(0);
text *distributionColumnText = PG_GETARG_TEXT_P(1);
@ -277,8 +251,7 @@ create_distributed_table(PG_FUNCTION_ARGS)
}
CreateDistributedTable(relationId, distributionColumnName, distributionMethod,
shardCount, shardCountIsStrict, colocateWithTableName,
viaDeprecatedAPI);
shardCount, shardCountIsStrict, colocateWithTableName);
PG_RETURN_VOID();
}
@ -401,10 +374,8 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName,
EnsureForeignKeysForDistributedTableConcurrently(relationId);
bool viaDeprecatedAPI = false;
char replicationModel = DecideReplicationModel(distributionMethod,
colocateWithTableName,
viaDeprecatedAPI);
colocateWithTableName);
/*
* we fail transaction before local table conversion if the table could not be colocated with
@ -519,7 +490,7 @@ CreateDistributedTableConcurrently(Oid relationId, char *distributionColumnName,
}
EnsureRelationCanBeDistributed(relationId, distributionColumn, distributionMethod,
colocationId, replicationModel, viaDeprecatedAPI);
colocationId, replicationModel);
Oid colocatedTableId = InvalidOid;
if (colocationId != INVALID_COLOCATION_ID)
@ -648,10 +619,8 @@ static void
EnsureColocateWithTableIsValid(Oid relationId, char distributionMethod,
char *distributionColumnName, char *colocateWithTableName)
{
bool viaDeprecatedAPI = false;
char replicationModel = DecideReplicationModel(distributionMethod,
colocateWithTableName,
viaDeprecatedAPI);
colocateWithTableName);
/*
* we fail transaction before local table conversion if the table could not be colocated with
@ -891,8 +860,6 @@ create_reference_table(PG_FUNCTION_ARGS)
char *colocateWithTableName = NULL;
char *distributionColumnName = NULL;
bool viaDeprecatedAPI = false;
EnsureCitusTableCanBeCreated(relationId);
/* enable create_reference_table on an empty node */
@ -926,7 +893,7 @@ create_reference_table(PG_FUNCTION_ARGS)
}
CreateDistributedTable(relationId, distributionColumnName, DISTRIBUTE_BY_NONE,
ShardCount, false, colocateWithTableName, viaDeprecatedAPI);
ShardCount, false, colocateWithTableName);
PG_RETURN_VOID();
}
@ -987,16 +954,11 @@ EnsureRelationExists(Oid relationId)
* safe to distribute the table, this function creates distributed table metadata,
* creates shards and copies local data to shards. This function also handles
* partitioned tables by distributing its partitions as well.
*
* viaDeprecatedAPI boolean flag is not optimal way to implement this function,
* but it helps reducing code duplication a lot. We hope to remove that flag one
* day, once we deprecate master_create_distribute_table completely.
*/
void
CreateDistributedTable(Oid relationId, char *distributionColumnName,
char distributionMethod, int shardCount,
bool shardCountIsStrict, char *colocateWithTableName,
bool viaDeprecatedAPI)
bool shardCountIsStrict, char *colocateWithTableName)
{
/*
* EnsureTableNotDistributed errors out when relation is a citus table but
@ -1058,8 +1020,7 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName,
PropagatePrerequisiteObjectsForDistributedTable(relationId);
char replicationModel = DecideReplicationModel(distributionMethod,
colocateWithTableName,
viaDeprecatedAPI);
colocateWithTableName);
Var *distributionColumn = BuildDistributionKeyFromColumnName(relationId,
distributionColumnName,
@ -1072,11 +1033,10 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName,
uint32 colocationId = ColocationIdForNewTable(relationId, distributionColumn,
distributionMethod, replicationModel,
shardCount, shardCountIsStrict,
colocateWithTableName,
viaDeprecatedAPI);
colocateWithTableName);
EnsureRelationCanBeDistributed(relationId, distributionColumn, distributionMethod,
colocationId, replicationModel, viaDeprecatedAPI);
colocationId, replicationModel);
/*
* Make sure that existing reference tables have been replicated to all the nodes
@ -1114,16 +1074,6 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName,
CreateTruncateTrigger(relationId);
}
/*
* If we are using master_create_distributed_table, we don't need to continue,
* because deprecated API does not supports the following features.
*/
if (viaDeprecatedAPI)
{
Assert(colocateWithTableName == NULL);
return;
}
/* create shards for hash distributed and reference tables */
if (distributionMethod == DISTRIBUTE_BY_HASH)
{
@ -1167,7 +1117,7 @@ CreateDistributedTable(Oid relationId, char *distributionColumnName,
{
CreateDistributedTable(partitionRelationId, distributionColumnName,
distributionMethod, shardCount, false,
parentRelationName, viaDeprecatedAPI);
parentRelationName);
}
}
@ -1456,14 +1406,9 @@ DropFKeysRelationInvolvedWithTableType(Oid relationId, int tableTypeFlag)
* used depending on given distribution configuration.
*/
static char
DecideReplicationModel(char distributionMethod, char *colocateWithTableName, bool
viaDeprecatedAPI)
DecideReplicationModel(char distributionMethod, char *colocateWithTableName)
{
if (viaDeprecatedAPI)
{
return REPLICATION_MODEL_COORDINATOR;
}
else if (distributionMethod == DISTRIBUTE_BY_NONE)
if (distributionMethod == DISTRIBUTE_BY_NONE)
{
return REPLICATION_MODEL_2PC;
}
@ -1557,16 +1502,12 @@ static uint32
ColocationIdForNewTable(Oid relationId, Var *distributionColumn,
char distributionMethod, char replicationModel,
int shardCount, bool shardCountIsStrict,
char *colocateWithTableName, bool viaDeprecatedAPI)
char *colocateWithTableName)
{
uint32 colocationId = INVALID_COLOCATION_ID;
if (viaDeprecatedAPI)
{
return colocationId;
}
else if (distributionMethod == DISTRIBUTE_BY_APPEND ||
distributionMethod == DISTRIBUTE_BY_RANGE)
if (distributionMethod == DISTRIBUTE_BY_APPEND ||
distributionMethod == DISTRIBUTE_BY_RANGE)
{
if (pg_strncasecmp(colocateWithTableName, "default", NAMEDATALEN) != 0)
{
@ -1660,11 +1601,11 @@ ColocationIdForNewTable(Oid relationId, Var *distributionColumn,
static void
EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
char distributionMethod, uint32 colocationId,
char replicationModel, bool viaDeprecatedAPI)
char replicationModel)
{
Oid parentRelationId = InvalidOid;
EnsureLocalTableEmptyIfNecessary(relationId, distributionMethod, viaDeprecatedAPI);
EnsureLocalTableEmptyIfNecessary(relationId, distributionMethod);
/* user really wants triggers? */
if (EnableUnsafeTriggers)
@ -1782,14 +1723,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
*/
if (PartitionedTableNoLock(relationId))
{
/* we cannot distribute partitioned tables with master_create_distributed_table */
if (viaDeprecatedAPI)
{
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("distributing partitioned tables in only supported "
"with create_distributed_table UDF")));
}
/* distributing partitioned tables in only supported for hash-distribution */
if (distributionMethod != DISTRIBUTE_BY_HASH)
{
@ -1857,10 +1790,9 @@ ErrorIfTableIsACatalogTable(Relation relation)
* according to ShouldLocalTableBeEmpty but it is not.
*/
static void
EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMethod,
bool viaDeprecatedAPI)
EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMethod)
{
if (ShouldLocalTableBeEmpty(relationId, distributionMethod, viaDeprecatedAPI))
if (ShouldLocalTableBeEmpty(relationId, distributionMethod))
{
EnsureLocalTableEmpty(relationId);
}
@ -1876,17 +1808,11 @@ EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMethod,
* see whether we need to be ensure emptiness of local table.
*/
static bool
ShouldLocalTableBeEmpty(Oid relationId, char distributionMethod,
bool viaDeprecatedAPI)
ShouldLocalTableBeEmpty(Oid relationId, char distributionMethod)
{
bool shouldLocalTableBeEmpty = false;
if (viaDeprecatedAPI)
{
/* we don't support copying local data via deprecated API */
shouldLocalTableBeEmpty = true;
}
else if (distributionMethod != DISTRIBUTE_BY_HASH &&
distributionMethod != DISTRIBUTE_BY_NONE)
if (distributionMethod != DISTRIBUTE_BY_HASH &&
distributionMethod != DISTRIBUTE_BY_NONE)
{
/*
* We only support hash distributed tables and reference tables

View File

@ -391,14 +391,13 @@ PostprocessCreateTableStmtPartitionOf(CreateStmt *createStatement, const
ColumnToColumnName(parentRelationId, (Node *) parentDistributionColumn);
char parentDistributionMethod = DISTRIBUTE_BY_HASH;
char *parentRelationName = generate_qualified_relation_name(parentRelationId);
bool viaDeprecatedAPI = false;
SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(parentRelationId,
relationId);
CreateDistributedTable(relationId, distributionColumnName,
parentDistributionMethod, ShardCount, false,
parentRelationName, viaDeprecatedAPI);
parentRelationName);
}
}
@ -598,14 +597,13 @@ DistributePartitionUsingParent(Oid parentCitusRelationId, Oid partitionRelationI
char distributionMethod = DISTRIBUTE_BY_HASH;
char *parentRelationName = generate_qualified_relation_name(parentCitusRelationId);
bool viaDeprecatedAPI = false;
SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(
parentCitusRelationId, partitionRelationId);
CreateDistributedTable(partitionRelationId, distributionColumnName,
distributionMethod, ShardCount, false,
parentRelationName, viaDeprecatedAPI);
parentRelationName);
}

View File

@ -58,40 +58,13 @@ PG_FUNCTION_INFO_V1(master_create_worker_shards);
/*
* master_create_worker_shards is a user facing function to create worker shards
* for the given relation in round robin order.
* master_create_worker_shards is a deprecated UDF that was used to
* create shards for a hash-distributed table.
*/
Datum
master_create_worker_shards(PG_FUNCTION_ARGS)
{
CheckCitusVersion(ERROR);
EnsureCoordinator();
text *tableNameText = PG_GETARG_TEXT_P(0);
int32 shardCount = PG_GETARG_INT32(1);
int32 replicationFactor = PG_GETARG_INT32(2);
Oid distributedTableId = ResolveRelationId(tableNameText, false);
/* do not add any data */
bool useExclusiveConnections = false;
/*
* distributed tables might have dependencies on different objects, since we create
* shards for a distributed table via multiple sessions these objects will be created
* via their own connection and committed immediately so they become visible to all
* sessions creating shards.
*/
ObjectAddress *tableAddress = palloc0(sizeof(ObjectAddress));
ObjectAddressSet(*tableAddress, RelationRelationId, distributedTableId);
EnsureAllObjectDependenciesExistOnAllNodes(list_make1(tableAddress));
EnsureReferenceTablesExistOnAllNodes();
CreateShardsWithRoundRobinPolicy(distributedTableId, shardCount, replicationFactor,
useExclusiveConnections);
PG_RETURN_VOID();
ereport(ERROR, (errmsg("master_create_worker_shards has been deprecated")));
}

View File

@ -330,8 +330,7 @@ extern void UpdatePlacementGroupId(uint64 placementId, int groupId);
extern void DeleteShardPlacementRow(uint64 placementId);
extern void CreateDistributedTable(Oid relationId, char *distributionColumnName,
char distributionMethod, int shardCount,
bool shardCountIsStrict, char *colocateWithTableName,
bool viaDeprecatedAPI);
bool shardCountIsStrict, char *colocateWithTableName);
extern void CreateTruncateTrigger(Oid relationId);
extern TableConversionReturn * UndistributeTable(TableConversionParameters *params);

View File

@ -111,8 +111,6 @@ SELECT count(*) FROM history;
-- test we can replicate MX tables
SET citus.shard_replication_factor TO 1;
-- metadata sync will succeed even if we have rep > 1 tables
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles_single_shard'::regclass::oid, 0);
SET client_min_messages TO warning;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node

View File

@ -189,8 +189,8 @@ SELECT create_distributed_table_concurrently('table_1', 'id');
SELECT * FROM pg_dist_shard WHERE logicalrelid = 'table_1'::regclass;
logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue
---------------------------------------------------------------------
table_1 | 1880096 | t | -2147483648 | -1
table_1 | 1880097 | t | 0 | 2147483647
table_1 | 1880084 | t | -2147483648 | -1
table_1 | 1880085 | t | 0 | 2147483647
(2 rows)
DROP SCHEMA create_dist_tbl_con CASCADE;

View File

@ -581,130 +581,4 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
DROP TABLE test_table;
DROP SCHEMA failure_create_table;
CREATE SCHEMA failure_create_table;
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
-- Test master_create_worker_shards with 2pc
CREATE TABLE test_table_2(id int, value_1 int);
SELECT master_create_distributed_table('test_table_2', 'id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
-- Kill connection before sending query to the worker
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('test_table_2', 4, 2);
ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open
SELECT count(*) FROM pg_dist_shard;
count
---------------------------------------------------------------------
0
(1 row)
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT count(*) FROM pg_dist_shard;
count
---------------------------------------------------------------------
0
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(2 rows)
-- Kill the connection after worker sends "PREPARE TRANSACTION" ack
SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('test_table_2', 4, 2);
ERROR: connection not open
CONTEXT: while executing command on localhost:xxxxx
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT count(*) FROM pg_dist_shard;
count
---------------------------------------------------------------------
0
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(2 rows)
-- Cancel the connection after sending prepare transaction in master_create_worker_shards
SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('test_table_2', 4, 2);
ERROR: canceling statement due to user request
-- Show that there is no pending transaction
SELECT recover_prepared_transactions();
recover_prepared_transactions
---------------------------------------------------------------------
1
(1 row)
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
SELECT count(*) FROM pg_dist_shard;
count
---------------------------------------------------------------------
0
(1 row)
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
run_command_on_workers
---------------------------------------------------------------------
(localhost,9060,t,0)
(localhost,57637,t,0)
(2 rows)
DROP SCHEMA failure_create_table CASCADE;
NOTICE: drop cascades to table test_table_2
SET search_path TO default;

View File

@ -4,19 +4,6 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13000000;
SET citus.shard_count TO 6;
SET citus.shard_replication_factor TO 1;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
RESET citus.enable_metadata_sync;
-- create distributed tables
CREATE TABLE table1_group1 ( id int PRIMARY KEY);
SELECT create_distributed_table('table1_group1', 'id', 'hash');
@ -41,8 +28,8 @@ SELECT create_distributed_table('table5_groupX', 'id', 'hash');
(1 row)
CREATE TABLE table6_append ( id int );
SELECT master_create_distributed_table('table6_append', 'id', 'append');
master_create_distributed_table
SELECT create_distributed_table('table6_append', 'id', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)

View File

@ -66,73 +66,43 @@ CREATE FUNCTION find_shard_interval_index(bigint)
-- ===================================================================
-- create distributed table observe shard pruning
CREATE TABLE table1_group1 ( id int );
SELECT master_create_distributed_table('table1_group1', 'id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('table1_group1', 4, 2);
master_create_worker_shards
SELECT create_distributed_table('table1_group1', 'id', 'hash', shard_count := 4, colocate_with := 'none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE table2_group1 ( id int );
SELECT master_create_distributed_table('table2_group1', 'id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('table2_group1', 4, 2);
master_create_worker_shards
SELECT create_distributed_table('table2_group1', 'id', 'hash', shard_count := 4, colocate_with := 'none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE table3_group2 ( id int );
SELECT master_create_distributed_table('table3_group2', 'id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('table3_group2', 4, 2);
master_create_worker_shards
SELECT create_distributed_table('table3_group2', 'id', 'hash', shard_count := 4, colocate_with := 'none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE table4_group2 ( id int );
SELECT master_create_distributed_table('table4_group2', 'id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('table4_group2', 4, 2);
master_create_worker_shards
SELECT create_distributed_table('table4_group2', 'id', 'hash', shard_count := 4, colocate_with := 'none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE table5_groupX ( id int );
SELECT master_create_distributed_table('table5_groupX', 'id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('table5_groupX', 4, 2);
master_create_worker_shards
SELECT create_distributed_table('table5_groupX', 'id', 'hash', shard_count := 4, colocate_with := 'none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE table6_append ( id int );
SELECT master_create_distributed_table('table6_append', 'id', 'append');
master_create_distributed_table
SELECT create_distributed_table('table6_append', 'id', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -166,7 +136,7 @@ SELECT get_table_colocation_id('table1_group1');
SELECT get_table_colocation_id('table5_groupX');
get_table_colocation_id
---------------------------------------------------------------------
0
8
(1 row)
SELECT get_table_colocation_id('table6_append');
@ -352,6 +322,7 @@ SELECT count(*) FROM pg_dist_partition WHERE colocationid IN (4, 5);
0
(1 row)
DROP TABLE table1_group1, table2_group1, table3_group2, table4_group2, table5_groupX, table6_append;
DELETE FROM pg_dist_colocation WHERE colocationid IN (4, 5);
SELECT 1 FROM run_command_on_workers('DELETE FROM pg_dist_colocation WHERE colocationid IN (4, 5)');
?column?
@ -360,6 +331,7 @@ SELECT 1 FROM run_command_on_workers('DELETE FROM pg_dist_colocation WHERE coloc
1
(2 rows)
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 4;
SET citus.shard_count = 2;
CREATE TABLE table1_groupA ( id int );
SELECT create_distributed_table('table1_groupA', 'id');
@ -472,16 +444,16 @@ $$);
SELECT logicalrelid, colocationid FROM pg_dist_partition
WHERE colocationid >= 1 AND colocationid < 1000
ORDER BY logicalrelid;
ORDER BY logicalrelid::text;
logicalrelid | colocationid
---------------------------------------------------------------------
table1_groupa | 4
table2_groupa | 4
table1_groupb | 5
table2_groupb | 5
table1_groupc | 6
table2_groupc | 6
table1_groupd | 7
table2_groupa | 4
table2_groupb | 5
table2_groupc | 6
table2_groupd | 7
(8 rows)
@ -726,7 +698,6 @@ ORDER BY
table2;
table1 | table2 | colocated
---------------------------------------------------------------------
table1_group1 | table2_group1 | t
table1_groupb | table2_groupb | t
table1_groupc | table2_groupc | t
table1_groupd | table2_groupd | t
@ -743,7 +714,7 @@ ORDER BY
table1_group_none_1 | table2_group_none_1 | t
table1_group_none_3 | table1_group_default | t
table1_groupf | table2_groupf | t
(17 rows)
(16 rows)
-- check created shards
SELECT
@ -1047,11 +1018,6 @@ SELECT update_distributed_table_colocation('table1_group_none', colocate_with =>
(1 row)
-- activate nodes to get rid of inconsistencies in pg_dist tables
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table1_group1'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table2_group1'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table3_group2'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table4_group2'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table5_groupX'::regclass::oid, 0);
SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
?column?
---------------------------------------------------------------------

View File

@ -202,8 +202,8 @@ CREATE TABLE customer_copy_range (
c_mktsegment char(10),
c_comment varchar(117),
primary key (c_custkey));
SELECT master_create_distributed_table('customer_copy_range', 'c_custkey', 'range');
master_create_distributed_table
SELECT create_distributed_table('customer_copy_range', 'c_custkey', 'range');
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -414,14 +414,9 @@ SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.cust
CREATE TABLE "customer_with_special_\\_character"(
c_custkey integer,
c_name varchar(25) not null);
SELECT master_create_distributed_table('"customer_with_special_\\_character"', 'c_custkey', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('"customer_with_special_\\_character"', 4, 1);
master_create_worker_shards
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('"customer_with_special_\\_character"', 'c_custkey', 'hash', shard_count := 4);
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -439,14 +434,9 @@ SELECT count(*) FROM "customer_with_special_\\_character";
CREATE TABLE "1_customer"(
c_custkey integer,
c_name varchar(25) not null);
SELECT master_create_distributed_table('"1_customer"', 'c_custkey', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('"1_customer"', 4, 1);
master_create_worker_shards
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('"1_customer"', 'c_custkey', 'hash', shard_count := 4);
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -474,14 +464,9 @@ CREATE TABLE packed_numbers_hash (
id integer,
packed_numbers number_pack[]
);
SELECT master_create_distributed_table('packed_numbers_hash', 'id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('packed_numbers_hash', 4, 1);
master_create_worker_shards
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('packed_numbers_hash', 'id', 'hash', shard_count := 4);
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -500,14 +485,9 @@ CREATE TABLE super_packed_numbers_hash (
id integer,
super_packed_number super_number_pack
);
SELECT master_create_distributed_table('super_packed_numbers_hash', 'id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('super_packed_numbers_hash', 4, 1);
master_create_worker_shards
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('super_packed_numbers_hash', 'id', 'hash', shard_count := 4);
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -642,14 +622,6 @@ SELECT shardid, nodename, nodeport
(6 rows)
-- add the node back
-- before adding the node, add pg_dist_object entry for tables created with
-- master_create_distributed_table as we don't have the entry for them.
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'objects'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'customer_with_special_\\_character'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, '1_customer'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'packed_numbers_hash'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'super_packed_numbers_hash'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table_to_distribute'::regclass::oid, 0);
SET client_min_messages TO ERROR;
SELECT 1 FROM master_activate_node('localhost', :worker_1_port);
?column?

View File

@ -59,33 +59,21 @@ DETAIL: Partition column types must have a default operator class defined.
SELECT create_distributed_table('table_to_distribute', 'test_type_data', 'hash');
ERROR: could not identify a hash function for type dummy_type
DETAIL: Partition column types must have a hash function defined to use hash partitioning.
-- distribute table and inspect side effects
SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT partmethod, partkey FROM pg_dist_partition
WHERE logicalrelid = 'table_to_distribute'::regclass;
partmethod | partkey
---------------------------------------------------------------------
h | {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}
(1 row)
-- use a bad shard count
SELECT master_create_worker_shards('table_to_distribute', 0, 1);
ERROR: shard_count must be positive
SELECT create_distributed_table('table_to_distribute', 'name', 'hash', shard_count := 0);
ERROR: 0 is outside the valid range for parameter "shard_count" (1 .. 64000)
-- use a bad replication factor
SELECT master_create_worker_shards('table_to_distribute', 16, 0);
ERROR: replication_factor must be positive
SET citus.shard_replication_factor TO 0;
ERROR: 0 is outside the valid range for parameter "citus.shard_replication_factor" (1 .. 100)
-- use a replication factor higher than shard count
SELECT master_create_worker_shards('table_to_distribute', 16, 3);
SET citus.shard_replication_factor TO 3;
SELECT create_distributed_table('table_to_distribute', 'name', 'hash');
ERROR: replication_factor (3) exceeds number of worker nodes (2)
HINT: Add more worker nodes or try again with a lower replication factor.
RESET citus.shard_replication_factor;
-- finally, create shards and inspect metadata
SELECT master_create_worker_shards('table_to_distribute', 16, 1);
master_create_worker_shards
SELECT create_distributed_table('table_to_distribute', 'name', 'hash', shard_count := 16);
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -130,9 +118,6 @@ SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relk
1
(1 row)
-- try to create them again
SELECT master_create_worker_shards('table_to_distribute', 16, 1);
ERROR: table "table_to_distribute" has already had shards created for it
-- test list sorting
SELECT sort_names('sumedh', 'jason', 'ozgun');
sort_names

View File

@ -6,25 +6,6 @@
-- need to cover both reference join and partitioned join, we have created
-- reference and hash-distributed version of orders, customer and part tables.
SET citus.next_shard_id TO 360000;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
CREATE TABLE lineitem (
l_orderkey bigint not null,
l_partkey integer not null,
@ -218,11 +199,6 @@ SELECT create_distributed_table('data_load_test', 'col1', 'range');
ERROR: cannot distribute relation "data_load_test"
DETAIL: Relation "data_load_test" contains data.
HINT: Empty your table before distributing it.
-- table must be empty when using master_create_distributed_table (no shards created)
SELECT master_create_distributed_table('data_load_test', 'col1', 'hash');
ERROR: cannot distribute relation "data_load_test"
DETAIL: Relation "data_load_test" contains data.
HINT: Empty your table before distributing it.
-- create_distributed_table creates shards and copies data into the distributed table
SELECT create_distributed_table('data_load_test', 'col1');
NOTICE: Copying data from local table...
@ -268,19 +244,6 @@ SELECT * FROM no_shard_test WHERE col1 > 1;
---------------------------------------------------------------------
(0 rows)
DROP TABLE no_shard_test;
CREATE TABLE no_shard_test (col1 int, col2 text);
SELECT master_create_distributed_table('no_shard_test', 'col1', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT * FROM no_shard_test WHERE col1 > 1;
col1 | col2
---------------------------------------------------------------------
(0 rows)
DROP TABLE no_shard_test;
-- ensure writes in the same transaction as create_distributed_table are visible
BEGIN;

View File

@ -29,49 +29,6 @@ SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regcl
c
(1 row)
DROP TABLE repmodel_test;
-- test that deprecated api creates distributed tables with coordinator replication
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
RESET citus.shard_replication_factor;
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360025;

View File

@ -81,8 +81,8 @@ CREATE TABLE events_range (
id bigint,
name text
);
SELECT master_create_distributed_table('events_range', 'name', 'range');
master_create_distributed_table
SELECT create_distributed_table('events_range', 'name', 'range');
create_distributed_table
---------------------------------------------------------------------
(1 row)

View File

@ -15,7 +15,6 @@ SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
-- change this test every time the previous tests change the set of tables they leave
-- around.
SET client_min_messages TO 'WARNING';
DROP FUNCTION pg_catalog.master_create_worker_shards(text, integer, integer);
DROP EXTENSION citus CASCADE;
RESET client_min_messages;
BEGIN;
@ -131,12 +130,6 @@ SAVEPOINT s4;
ROLLBACK TO SAVEPOINT s3;
ROLLBACK;
CREATE EXTENSION citus;
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
-- re-add the nodes to the cluster
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
?column?

View File

@ -102,19 +102,6 @@ SELECT unnest(activate_node_snapshot()) order by 1;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
(29 rows)
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
RESET citus.enable_metadata_sync;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- Create a test table with constraints and SERIAL and default from user defined sequence
CREATE SEQUENCE user_defined_seq;
CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq'));
@ -283,8 +270,8 @@ SELECT unnest(activate_node_snapshot()) order by 1;
-- Show that append distributed tables are not included in the activate node snapshot
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append');
master_create_distributed_table
SELECT create_distributed_table('non_mx_test_table', 'col_1', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)

View File

@ -102,19 +102,6 @@ SELECT unnest(activate_node_snapshot()) order by 1;
WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false), ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false), ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data;
(29 rows)
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
RESET citus.enable_metadata_sync;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- Create a test table with constraints and SERIAL and default from user defined sequence
CREATE SEQUENCE user_defined_seq;
CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq'));
@ -283,8 +270,8 @@ SELECT unnest(activate_node_snapshot()) order by 1;
-- Show that append distributed tables are not included in the activate node snapshot
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append');
master_create_distributed_table
SELECT create_distributed_table('non_mx_test_table', 'col_1', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)

View File

@ -463,14 +463,9 @@ CREATE TABLE objects (
id bigint PRIMARY KEY,
name text NOT NULL
);
SELECT master_create_distributed_table('objects', 'id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('objects', 1, 2);
master_create_worker_shards
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('objects', 'id', 'hash', shard_count := 1);
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -731,8 +726,8 @@ ORDER BY s.logicalrelid, sp.shardstate;
-- some append-partitioned tests for good measure
CREATE TABLE append_researchers ( LIKE researchers );
SELECT master_create_distributed_table('append_researchers', 'id', 'append');
master_create_distributed_table
SELECT create_distributed_table('append_researchers', 'id', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)

View File

@ -49,8 +49,8 @@ SELECT * FROM master_get_active_worker_nodes();
RESET ROLE;
-- ensure GRANT/REVOKE's do something sane for creating shards of
CREATE TABLE checkperm(key int);
SELECT master_create_distributed_table('checkperm', 'key', 'append');
master_create_distributed_table
SELECT create_distributed_table('checkperm', 'key', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -154,8 +154,8 @@ SELECT * FROM master_get_table_ddl_events('checkperm');
-- create table as superuser/postgres
CREATE TABLE trivial_postgres (id int);
SELECT master_create_distributed_table('trivial_postgres', 'id', 'append');
master_create_distributed_table
SELECT create_distributed_table('trivial_postgres', 'id', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -164,8 +164,8 @@ GRANT ALL ON trivial_postgres TO full_access;
GRANT CREATE ON SCHEMA public TO full_access;
SET ROLE full_access;
CREATE TABLE trivial_full_access (id int);
SELECT master_create_distributed_table('trivial_full_access', 'id', 'append');
master_create_distributed_table
SELECT create_distributed_table('trivial_full_access', 'id', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)

View File

@ -3,37 +3,12 @@
--
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000;
SET citus.shard_count TO 2;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
-- Verify that a table name > 56 characters gets hashed properly.
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
col1 integer not null,
col2 integer not null);
SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2');
master_create_worker_shards
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -43,9 +18,10 @@ SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345
List of relations
Schema | Name | Type | Owner
---------------------------------------------------------------------
public | too_long_12345678901234567890123456789012345678901234567890 | table | postgres
public | too_long_12345678901234567890123456789012345678_e0119164_225000 | table | postgres
public | too_long_12345678901234567890123456789012345678_e0119164_225001 | table | postgres
(2 rows)
(3 rows)
\c - - :master_host :master_port
SET citus.shard_count TO 2;
@ -287,14 +263,8 @@ SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_n
checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100)
(1 row)
SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
master_create_worker_shards
SELECT create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)

View File

@ -32,33 +32,15 @@ CREATE TABLE authors_range ( name varchar(20), id bigint );
CREATE TABLE authors_reference ( name varchar(20), id bigint );
-- this table is used in router executor tests
CREATE TABLE articles_single_shard_hash (LIKE articles_hash);
SELECT master_create_distributed_table('articles_hash', 'author_id', 'hash');
master_create_distributed_table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('articles_hash', 'author_id', 'hash', shard_count := 2);
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_distributed_table('articles_single_shard_hash', 'author_id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
-- test when a table is distributed but no shards created yet
SELECT count(*) from articles_hash;
count
---------------------------------------------------------------------
0
(1 row)
SELECT master_create_worker_shards('articles_hash', 2, 1);
master_create_worker_shards
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('articles_single_shard_hash', 1, 1);
master_create_worker_shards
SELECT create_distributed_table('articles_single_shard_hash', 'author_id', 'hash', shard_count := 1);
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -388,14 +370,8 @@ DEBUG: Creating router plan
-- recursive CTEs are supported when filtered on partition column
CREATE TABLE company_employees (company_id int, employee_id int, manager_id int);
SELECT master_create_distributed_table('company_employees', 'company_id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('company_employees', 4, 1);
master_create_worker_shards
SELECT create_distributed_table('company_employees', 'company_id', 'hash', shard_count := 4);
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -1761,14 +1737,14 @@ DEBUG: Creating router plan
-- just 4 shards to be created for each table to make sure
-- they are 'co-located' pairwise
SET citus.shard_replication_factor TO 1;
SELECT master_create_distributed_table('authors_range', 'id', 'range');
master_create_distributed_table
SELECT create_distributed_table('authors_range', 'id', 'range');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_distributed_table('articles_range', 'author_id', 'range');
master_create_distributed_table
SELECT create_distributed_table('articles_range', 'author_id', 'range');
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -1994,8 +1970,8 @@ DEBUG: Router planner cannot handle multi-shard select queries
-- following is a bug, function should have been
-- evaluated at master before going to worker
-- need to use a append distributed table here
SELECT master_create_distributed_table('articles_append', 'author_id', 'append');
master_create_distributed_table
SELECT create_distributed_table('articles_append', 'author_id', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -2041,7 +2017,7 @@ SELECT raise_failed_execution_router($$
LIMIT 1;
$$);
ERROR: Task failed to execute
-- same query on router planner with where false but evaluation left to worker
-- hash-distributed tables can be evaluated on workers since they are synced
SELECT raise_failed_execution_router($$
SELECT author_id FROM articles_single_shard_hash
WHERE
@ -2050,7 +2026,11 @@ SELECT raise_failed_execution_router($$
author_id
LIMIT 1;
$$);
ERROR: Task failed to execute
raise_failed_execution_router
---------------------------------------------------------------------
(1 row)
SELECT raise_failed_execution_router($$
SELECT author_id FROM articles_hash
WHERE
@ -2060,7 +2040,11 @@ SELECT raise_failed_execution_router($$
author_id
LIMIT 1;
$$);
ERROR: Task failed to execute
raise_failed_execution_router
---------------------------------------------------------------------
(1 row)
-- create a dummy function to be used in filtering
CREATE OR REPLACE FUNCTION someDummyFunction(regclass)
RETURNS text AS
@ -2094,7 +2078,11 @@ SELECT raise_failed_execution_router($$
author_id, id
LIMIT 5;
$$);
ERROR: Task failed to execute
raise_failed_execution_router
---------------------------------------------------------------------
(1 row)
\set VERBOSITY DEFAULT
-- temporarily turn off debug messages before dropping the function
SET client_min_messages TO 'NOTICE';
@ -2437,14 +2425,8 @@ SET client_min_messages to 'NOTICE';
-- test that a connection failure marks placements invalid
SET citus.shard_replication_factor TO 2;
CREATE TABLE failure_test (a int, b int);
SELECT master_create_distributed_table('failure_test', 'a', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('failure_test', 2);
master_create_worker_shards
SELECT create_distributed_table('failure_test', 'a', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)
@ -2475,9 +2457,13 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement
---------------------------------------------------------------------
840017 | 1 | localhost | 57637
840017 | 1 | localhost | 57638
840018 | 1 | localhost | 57638
840018 | 1 | localhost | 57637
(4 rows)
840018 | 1 | localhost | 57638
840019 | 1 | localhost | 57637
840019 | 1 | localhost | 57638
840020 | 1 | localhost | 57637
840020 | 1 | localhost | 57638
(8 rows)
\c - postgres - :worker_1_port
DROP OWNED BY router_user;

View File

@ -233,21 +233,13 @@ DEBUG: query has a single distribution column value: 1
(0 rows)
CREATE TABLE company_employees (company_id int, employee_id int, manager_id int);
SELECT master_create_distributed_table('company_employees', 'company_id', 'hash');
master_create_distributed_table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('company_employees', 'company_id', 'hash');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- do not print notices from workers since the order is not deterministic
SET client_min_messages TO DEFAULT;
SELECT master_create_worker_shards('company_employees', 4, 1);
master_create_worker_shards
---------------------------------------------------------------------
(1 row)
SET client_min_messages TO 'DEBUG2';
INSERT INTO company_employees values(1, 1, 0);
DEBUG: Creating router plan
DEBUG: query has a single distribution column value: 1

View File

@ -93,7 +93,6 @@ SELECT create_distributed_table('nation_append_search_path', 'n_nationkey', 'app
SELECT master_create_empty_shard('nation_append_search_path') AS shardid \gset
copy nation_append_search_path FROM STDIN with (delimiter '|', append_to_shard :shardid);
-- create shard with master_create_worker_shards
CREATE TABLE test_schema_support.nation_hash(
n_nationkey integer not null,
n_name char(25) not null,

View File

@ -17,26 +17,15 @@ CREATE TABLE articles (
CREATE TABLE authors ( name text, id bigint );
-- this table is used in router executor tests
CREATE TABLE articles_single_shard (LIKE articles);
SELECT master_create_distributed_table('articles', 'author_id', 'hash');
master_create_distributed_table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('articles', 'author_id', 'hash', shard_count := 2);
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('articles', 2, 1);
master_create_worker_shards
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('articles_single_shard', 1, 1);
master_create_worker_shards
SELECT create_distributed_table('articles_single_shard', 'author_id', 'hash', shard_count := 1);
create_distributed_table
---------------------------------------------------------------------
(1 row)

View File

@ -61,30 +61,7 @@ SELECT * FROM mx_table ORDER BY col_1;
-- Try commands from metadata worker
\c - - - :worker_1_port
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
CREATE TABLE mx_table_worker(col_1 text);
-- master_create_distributed_table
SELECT master_create_distributed_table('mx_table_worker', 'col_1', 'hash');
ERROR: operation is not allowed on this node
HINT: Connect to the coordinator and run it again.
-- create_distributed_table
SELECT create_distributed_table('mx_table_worker', 'col_1');
ERROR: operation is not allowed on this node
@ -100,26 +77,6 @@ SELECT count(*) FROM pg_dist_partition WHERE logicalrelid='mx_table_worker'::reg
(1 row)
DROP TABLE mx_table_worker;
-- master_create_worker_shards
CREATE TEMP TABLE pg_dist_shard_temp AS
SELECT * FROM pg_dist_shard WHERE logicalrelid = 'mx_table'::regclass;
DELETE FROM pg_dist_shard WHERE logicalrelid = 'mx_table'::regclass;
SELECT master_create_worker_shards('mx_table', 5, 1);
ERROR: operation is not allowed on this node
HINT: Connect to the coordinator and run it again.
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass;
count
---------------------------------------------------------------------
0
(1 row)
INSERT INTO pg_dist_shard SELECT * FROM pg_dist_shard_temp;
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass;
count
---------------------------------------------------------------------
5
(1 row)
\c - - - :master_port
DROP TABLE mx_ref_table;
CREATE UNIQUE INDEX mx_test_uniq_index ON mx_table(col_1);

View File

@ -184,14 +184,9 @@ SELECT create_distributed_table('dustbunnies', 'id', 'hash');
-- add some data to the distributed table
\copy dustbunnies (id, name) from stdin with csv
CREATE TABLE second_dustbunnies(id integer, name text, age integer);
SELECT master_create_distributed_table('second_dustbunnies', 'id', 'hash');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_worker_shards('second_dustbunnies', 1, 2);
master_create_worker_shards
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('second_dustbunnies', 'id', 'hash', shard_count := 1);
create_distributed_table
---------------------------------------------------------------------
(1 row)

View File

@ -327,25 +327,6 @@ CREATE FUNCTION worker_node_responsive(worker_node_name text, worker_node_port i
RETURNS boolean
AS 'citus'
LANGUAGE C STRICT VOLATILE;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
SET citus.next_shard_id TO 123000;
SELECT worker_node_responsive(node_name, node_port::int)
FROM master_get_active_worker_nodes()
@ -2237,8 +2218,8 @@ SELECT public.wait_until_metadata_sync(30000);
(1 row)
CREATE TABLE rebalance_test_table(int_column int);
SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'append');
master_create_distributed_table
SELECT create_distributed_table('rebalance_test_table', 'int_column', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)

View File

@ -52,25 +52,6 @@ SELECT create_distributed_table('tr', 'pk');
(1 row)
INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
CREATE TABLE t_range(id int, value_1 int);
SELECT create_distributed_table('t_range', 'id', 'range');
create_distributed_table

View File

@ -88,8 +88,6 @@ SELECT count(*) FROM history;
SET citus.shard_replication_factor TO 1;
-- metadata sync will succeed even if we have rep > 1 tables
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'public.articles_single_shard'::regclass::oid, 0);
SET client_min_messages TO warning;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
RESET client_min_messages;

View File

@ -220,59 +220,4 @@ SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables W
DROP TABLE test_table;
DROP SCHEMA failure_create_table;
CREATE SCHEMA failure_create_table;
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
-- Test master_create_worker_shards with 2pc
CREATE TABLE test_table_2(id int, value_1 int);
SELECT master_create_distributed_table('test_table_2', 'id', 'hash');
-- Kill connection before sending query to the worker
SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()');
SELECT master_create_worker_shards('test_table_2', 4, 2);
SELECT count(*) FROM pg_dist_shard;
SELECT citus.mitmproxy('conn.allow()');
SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
-- Kill the connection after worker sends "PREPARE TRANSACTION" ack
SELECT citus.mitmproxy('conn.onCommandComplete(command="^PREPARE TRANSACTION").kill()');
SELECT master_create_worker_shards('test_table_2', 4, 2);
SELECT citus.mitmproxy('conn.allow()');
SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
-- Cancel the connection after sending prepare transaction in master_create_worker_shards
SELECT citus.mitmproxy('conn.onCommandComplete(command="PREPARE TRANSACTION").cancel(' || pg_backend_pid() || ')');
SELECT master_create_worker_shards('test_table_2', 4, 2);
-- Show that there is no pending transaction
SELECT recover_prepared_transactions();
SELECT citus.mitmproxy('conn.allow()');
SELECT count(*) FROM pg_dist_shard;
SELECT run_command_on_workers($$SELECT count(*) FROM information_schema.tables WHERE table_schema = 'failure_create_table' and table_name LIKE 'test_table%' ORDER BY 1$$);
DROP SCHEMA failure_create_table CASCADE;
SET search_path TO default;

View File

@ -7,21 +7,6 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13000000;
SET citus.shard_count TO 6;
SET citus.shard_replication_factor TO 1;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
RESET citus.enable_metadata_sync;
-- create distributed tables
CREATE TABLE table1_group1 ( id int PRIMARY KEY);
SELECT create_distributed_table('table1_group1', 'id', 'hash');
@ -34,7 +19,7 @@ CREATE TABLE table5_groupX ( id int );
SELECT create_distributed_table('table5_groupX', 'id', 'hash');
CREATE TABLE table6_append ( id int );
SELECT master_create_distributed_table('table6_append', 'id', 'append');
SELECT create_distributed_table('table6_append', 'id', 'append');
SELECT master_create_empty_shard('table6_append');
SELECT master_create_empty_shard('table6_append');

View File

@ -72,27 +72,22 @@ CREATE FUNCTION find_shard_interval_index(bigint)
-- create distributed table observe shard pruning
CREATE TABLE table1_group1 ( id int );
SELECT master_create_distributed_table('table1_group1', 'id', 'hash');
SELECT master_create_worker_shards('table1_group1', 4, 2);
SELECT create_distributed_table('table1_group1', 'id', 'hash', shard_count := 4, colocate_with := 'none');
CREATE TABLE table2_group1 ( id int );
SELECT master_create_distributed_table('table2_group1', 'id', 'hash');
SELECT master_create_worker_shards('table2_group1', 4, 2);
SELECT create_distributed_table('table2_group1', 'id', 'hash', shard_count := 4, colocate_with := 'none');
CREATE TABLE table3_group2 ( id int );
SELECT master_create_distributed_table('table3_group2', 'id', 'hash');
SELECT master_create_worker_shards('table3_group2', 4, 2);
SELECT create_distributed_table('table3_group2', 'id', 'hash', shard_count := 4, colocate_with := 'none');
CREATE TABLE table4_group2 ( id int );
SELECT master_create_distributed_table('table4_group2', 'id', 'hash');
SELECT master_create_worker_shards('table4_group2', 4, 2);
SELECT create_distributed_table('table4_group2', 'id', 'hash', shard_count := 4, colocate_with := 'none');
CREATE TABLE table5_groupX ( id int );
SELECT master_create_distributed_table('table5_groupX', 'id', 'hash');
SELECT master_create_worker_shards('table5_groupX', 4, 2);
SELECT create_distributed_table('table5_groupX', 'id', 'hash', shard_count := 4, colocate_with := 'none');
CREATE TABLE table6_append ( id int );
SELECT master_create_distributed_table('table6_append', 'id', 'append');
SELECT create_distributed_table('table6_append', 'id', 'append');
SELECT master_create_empty_shard('table6_append');
SELECT master_create_empty_shard('table6_append');
@ -156,9 +151,13 @@ SELECT find_shard_interval_index(1300016);
-- check external colocation API
SELECT count(*) FROM pg_dist_partition WHERE colocationid IN (4, 5);
DROP TABLE table1_group1, table2_group1, table3_group2, table4_group2, table5_groupX, table6_append;
DELETE FROM pg_dist_colocation WHERE colocationid IN (4, 5);
SELECT 1 FROM run_command_on_workers('DELETE FROM pg_dist_colocation WHERE colocationid IN (4, 5)');
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 4;
SET citus.shard_count = 2;
CREATE TABLE table1_groupA ( id int );
@ -218,7 +217,7 @@ $$);
SELECT logicalrelid, colocationid FROM pg_dist_partition
WHERE colocationid >= 1 AND colocationid < 1000
ORDER BY logicalrelid;
ORDER BY logicalrelid::text;
-- check effects of dropping tables
DROP TABLE table1_groupA;
@ -432,11 +431,6 @@ SELECT update_distributed_table_colocation('table1_group_none', colocate_with =>
SELECT update_distributed_table_colocation('table1_group_none', colocate_with => 'table3_groupE');
-- activate nodes to get rid of inconsistencies in pg_dist tables
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table1_group1'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table2_group1'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table3_group2'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table4_group2'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table5_groupX'::regclass::oid, 0);
SELECT 1 FROM citus_activate_node('localhost', :worker_1_port);
SELECT 1 FROM citus_activate_node('localhost', :worker_2_port);

View File

@ -182,7 +182,7 @@ CREATE TABLE customer_copy_range (
c_comment varchar(117),
primary key (c_custkey));
SELECT master_create_distributed_table('customer_copy_range', 'c_custkey', 'range');
SELECT create_distributed_table('customer_copy_range', 'c_custkey', 'range');
-- Test COPY into empty range-partitioned table
\set client_side_copy_command '\\copy customer_copy_range FROM ' :'customer1datafile' ' WITH (DELIMITER '''|''');'
@ -338,9 +338,8 @@ CREATE TABLE "customer_with_special_\\_character"(
c_custkey integer,
c_name varchar(25) not null);
SELECT master_create_distributed_table('"customer_with_special_\\_character"', 'c_custkey', 'hash');
SELECT master_create_worker_shards('"customer_with_special_\\_character"', 4, 1);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('"customer_with_special_\\_character"', 'c_custkey', 'hash', shard_count := 4);
COPY "customer_with_special_\\_character" (c_custkey, c_name) FROM STDIN
WITH (FORMAT 'csv');
@ -356,9 +355,8 @@ CREATE TABLE "1_customer"(
c_custkey integer,
c_name varchar(25) not null);
SELECT master_create_distributed_table('"1_customer"', 'c_custkey', 'hash');
SELECT master_create_worker_shards('"1_customer"', 4, 1);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('"1_customer"', 'c_custkey', 'hash', shard_count := 4);
COPY "1_customer" (c_custkey, c_name) FROM STDIN
WITH (FORMAT 'csv');
@ -386,8 +384,8 @@ CREATE TABLE packed_numbers_hash (
packed_numbers number_pack[]
);
SELECT master_create_distributed_table('packed_numbers_hash', 'id', 'hash');
SELECT master_create_worker_shards('packed_numbers_hash', 4, 1);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('packed_numbers_hash', 'id', 'hash', shard_count := 4);
COPY (SELECT 1, ARRAY[ROW(42, 42), ROW(42, 42)]) TO :'temp_dir''copy_test_array_of_composite';
COPY packed_numbers_hash FROM :'temp_dir''copy_test_array_of_composite';
@ -401,8 +399,8 @@ CREATE TABLE super_packed_numbers_hash (
super_packed_number super_number_pack
);
SELECT master_create_distributed_table('super_packed_numbers_hash', 'id', 'hash');
SELECT master_create_worker_shards('super_packed_numbers_hash', 4, 1);
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('super_packed_numbers_hash', 'id', 'hash', shard_count := 4);
COPY (SELECT 1, ROW(ROW(42, 42), ROW(42, 42))) TO :'temp_dir''copy_test_composite_of_composite';
COPY super_packed_numbers_hash FROM :'temp_dir''copy_test_composite_of_composite';
@ -514,16 +512,6 @@ SELECT shardid, nodename, nodeport
WHERE logicalrelid = 'numbers_append'::regclass order by placementid;
-- add the node back
-- before adding the node, add pg_dist_object entry for tables created with
-- master_create_distributed_table as we don't have the entry for them.
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'objects'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'customer_with_special_\\_character'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, '1_customer'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'packed_numbers_hash'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'super_packed_numbers_hash'::regclass::oid, 0);
INSERT INTO pg_catalog.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table_to_distribute'::regclass::oid, 0);
SET client_min_messages TO ERROR;
SELECT 1 FROM master_activate_node('localhost', :worker_1_port);
RESET client_min_messages;

View File

@ -68,22 +68,19 @@ SELECT create_distributed_table('table_to_distribute', 'json_data', 'hash');
-- use a partition column of type lacking the required support function (hash)
SELECT create_distributed_table('table_to_distribute', 'test_type_data', 'hash');
-- distribute table and inspect side effects
SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash');
SELECT partmethod, partkey FROM pg_dist_partition
WHERE logicalrelid = 'table_to_distribute'::regclass;
-- use a bad shard count
SELECT master_create_worker_shards('table_to_distribute', 0, 1);
SELECT create_distributed_table('table_to_distribute', 'name', 'hash', shard_count := 0);
-- use a bad replication factor
SELECT master_create_worker_shards('table_to_distribute', 16, 0);
SET citus.shard_replication_factor TO 0;
-- use a replication factor higher than shard count
SELECT master_create_worker_shards('table_to_distribute', 16, 3);
SET citus.shard_replication_factor TO 3;
SELECT create_distributed_table('table_to_distribute', 'name', 'hash');
RESET citus.shard_replication_factor;
-- finally, create shards and inspect metadata
SELECT master_create_worker_shards('table_to_distribute', 16, 1);
SELECT create_distributed_table('table_to_distribute', 'name', 'hash', shard_count := 16);
SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard
WHERE logicalrelid = 'table_to_distribute'::regclass
@ -98,9 +95,6 @@ SELECT count(*) AS shard_count,
SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relkind = 'r';
-- try to create them again
SELECT master_create_worker_shards('table_to_distribute', 16, 1);
-- test list sorting
SELECT sort_names('sumedh', 'jason', 'ozgun');

View File

@ -9,27 +9,6 @@
SET citus.next_shard_id TO 360000;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
CREATE TABLE lineitem (
l_orderkey bigint not null,
l_partkey integer not null,
@ -174,9 +153,6 @@ INSERT INTO data_load_test VALUES (243, 'world');
SELECT create_distributed_table('data_load_test', 'col1', 'append');
SELECT create_distributed_table('data_load_test', 'col1', 'range');
-- table must be empty when using master_create_distributed_table (no shards created)
SELECT master_create_distributed_table('data_load_test', 'col1', 'hash');
-- create_distributed_table creates shards and copies data into the distributed table
SELECT create_distributed_table('data_load_test', 'col1');
SELECT * FROM data_load_test ORDER BY col1;
@ -193,11 +169,6 @@ SELECT create_distributed_table('no_shard_test', 'col1', 'range');
SELECT * FROM no_shard_test WHERE col1 > 1;
DROP TABLE no_shard_test;
CREATE TABLE no_shard_test (col1 int, col2 text);
SELECT master_create_distributed_table('no_shard_test', 'col1', 'hash');
SELECT * FROM no_shard_test WHERE col1 > 1;
DROP TABLE no_shard_test;
-- ensure writes in the same transaction as create_distributed_table are visible
BEGIN;
CREATE TABLE data_load_test (col1 int, col2 text, col3 serial);

View File

@ -14,22 +14,6 @@ SELECT create_distributed_table('repmodel_test', 'a', 'range');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
-- test that deprecated api creates distributed tables with coordinator replication
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
DROP TABLE repmodel_test;
RESET citus.shard_replication_factor;
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360025;

View File

@ -84,7 +84,7 @@ CREATE TABLE events_range (
id bigint,
name text
);
SELECT master_create_distributed_table('events_range', 'name', 'range');
SELECT create_distributed_table('events_range', 'name', 'range');
-- create empty shard
SELECT master_create_empty_shard('events_range');

View File

@ -15,7 +15,6 @@ SELECT create_distributed_table('testtableddl', 'distributecol', 'append');
-- change this test every time the previous tests change the set of tables they leave
-- around.
SET client_min_messages TO 'WARNING';
DROP FUNCTION pg_catalog.master_create_worker_shards(text, integer, integer);
DROP EXTENSION citus CASCADE;
RESET client_min_messages;
@ -130,12 +129,6 @@ ROLLBACK TO SAVEPOINT s3;
ROLLBACK;
CREATE EXTENSION citus;
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
-- re-add the nodes to the cluster
SELECT 1 FROM master_add_node('localhost', :worker_1_port);
SELECT 1 FROM master_add_node('localhost', :worker_2_port);

View File

@ -49,20 +49,6 @@ ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword';
-- pg_dist_node entries, pg_dist_object entries and roles.
SELECT unnest(activate_node_snapshot()) order by 1;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
RESET citus.enable_metadata_sync;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- Create a test table with constraints and SERIAL and default from user defined sequence
CREATE SEQUENCE user_defined_seq;
CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL, col_4 BIGINT DEFAULT nextval('user_defined_seq'));
@ -90,7 +76,7 @@ SELECT unnest(activate_node_snapshot()) order by 1;
-- Show that append distributed tables are not included in the activate node snapshot
CREATE TABLE non_mx_test_table (col_1 int, col_2 text);
SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append');
SELECT create_distributed_table('non_mx_test_table', 'col_1', 'append');
UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass;
SELECT unnest(activate_node_snapshot()) order by 1;

View File

@ -396,8 +396,8 @@ CREATE TABLE objects (
name text NOT NULL
);
SELECT master_create_distributed_table('objects', 'id', 'hash');
SELECT master_create_worker_shards('objects', 1, 2);
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('objects', 'id', 'hash', shard_count := 1);
-- test primary key violations
BEGIN;
@ -615,7 +615,7 @@ ORDER BY s.logicalrelid, sp.shardstate;
-- some append-partitioned tests for good measure
CREATE TABLE append_researchers ( LIKE researchers );
SELECT master_create_distributed_table('append_researchers', 'id', 'append');
SELECT create_distributed_table('append_researchers', 'id', 'append');
SET citus.shard_replication_factor TO 1;

View File

@ -21,7 +21,7 @@ RESET ROLE;
-- ensure GRANT/REVOKE's do something sane for creating shards of
CREATE TABLE checkperm(key int);
SELECT master_create_distributed_table('checkperm', 'key', 'append');
SELECT create_distributed_table('checkperm', 'key', 'append');
SELECT * FROM master_get_table_ddl_events('checkperm');
REVOKE ALL ON checkperm FROM PUBLIC;
@ -40,14 +40,14 @@ SELECT * FROM master_get_table_ddl_events('checkperm');
-- create table as superuser/postgres
CREATE TABLE trivial_postgres (id int);
SELECT master_create_distributed_table('trivial_postgres', 'id', 'append');
SELECT create_distributed_table('trivial_postgres', 'id', 'append');
GRANT ALL ON trivial_postgres TO full_access;
GRANT CREATE ON SCHEMA public TO full_access;
SET ROLE full_access;
CREATE TABLE trivial_full_access (id int);
SELECT master_create_distributed_table('trivial_full_access', 'id', 'append');
SELECT create_distributed_table('trivial_full_access', 'id', 'append');
RESET ROLE;
SELECT relname, rolname, relacl FROM pg_class JOIN pg_roles ON (pg_roles.oid = pg_class.relowner) WHERE relname LIKE 'trivial%' ORDER BY relname;

View File

@ -6,33 +6,11 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000;
SET citus.shard_count TO 2;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
-- Verify that a table name > 56 characters gets hashed properly.
CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 (
col1 integer not null,
col2 integer not null);
SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash');
SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2');
SELECT create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1');
\c - - :public_worker_1_host :worker_1_port
\dt too_long_*
@ -206,8 +184,7 @@ CREATE TABLE sneaky_name_lengths (
\di public.sneaky_name_lengths*
SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths'::regclass ORDER BY 1 DESC, 2 DESC;
SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash');
SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2');
SELECT create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash');
\c - - :public_worker_1_host :worker_1_port
SELECT c1.relname AS sneaky_index_name,

View File

@ -42,14 +42,10 @@ CREATE TABLE authors_reference ( name varchar(20), id bigint );
-- this table is used in router executor tests
CREATE TABLE articles_single_shard_hash (LIKE articles_hash);
SELECT master_create_distributed_table('articles_hash', 'author_id', 'hash');
SELECT master_create_distributed_table('articles_single_shard_hash', 'author_id', 'hash');
SET citus.shard_replication_factor TO 1;
-- test when a table is distributed but no shards created yet
SELECT count(*) from articles_hash;
SELECT master_create_worker_shards('articles_hash', 2, 1);
SELECT master_create_worker_shards('articles_single_shard_hash', 1, 1);
SELECT create_distributed_table('articles_hash', 'author_id', 'hash', shard_count := 2);
SELECT create_distributed_table('articles_single_shard_hash', 'author_id', 'hash', shard_count := 1);
SELECT create_reference_table('authors_reference');
@ -212,8 +208,7 @@ SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
-- recursive CTEs are supported when filtered on partition column
CREATE TABLE company_employees (company_id int, employee_id int, manager_id int);
SELECT master_create_distributed_table('company_employees', 'company_id', 'hash');
SELECT master_create_worker_shards('company_employees', 4, 1);
SELECT create_distributed_table('company_employees', 'company_id', 'hash', shard_count := 4);
INSERT INTO company_employees values(1, 1, 0);
INSERT INTO company_employees values(1, 2, 1);
@ -840,8 +835,8 @@ SELECT author_id FROM articles_hash
-- just 4 shards to be created for each table to make sure
-- they are 'co-located' pairwise
SET citus.shard_replication_factor TO 1;
SELECT master_create_distributed_table('authors_range', 'id', 'range');
SELECT master_create_distributed_table('articles_range', 'author_id', 'range');
SELECT create_distributed_table('authors_range', 'id', 'range');
SELECT create_distributed_table('articles_range', 'author_id', 'range');
SELECT master_create_empty_shard('authors_range') as shard_id \gset
UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue=10 WHERE shardid = :shard_id;
@ -927,7 +922,7 @@ SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au.
-- following is a bug, function should have been
-- evaluated at master before going to worker
-- need to use a append distributed table here
SELECT master_create_distributed_table('articles_append', 'author_id', 'append');
SELECT create_distributed_table('articles_append', 'author_id', 'append');
SET citus.shard_replication_factor TO 1;
SELECT master_create_empty_shard('articles_append') AS shard_id \gset
UPDATE pg_dist_shard SET shardmaxvalue = 100, shardminvalue=1 WHERE shardid = :shard_id;
@ -970,7 +965,7 @@ SELECT raise_failed_execution_router($$
LIMIT 1;
$$);
-- same query on router planner with where false but evaluation left to worker
-- hash-distributed tables can be evaluated on workers since they are synced
SELECT raise_failed_execution_router($$
SELECT author_id FROM articles_single_shard_hash
WHERE
@ -1183,8 +1178,7 @@ SET client_min_messages to 'NOTICE';
-- test that a connection failure marks placements invalid
SET citus.shard_replication_factor TO 2;
CREATE TABLE failure_test (a int, b int);
SELECT master_create_distributed_table('failure_test', 'a', 'hash');
SELECT master_create_worker_shards('failure_test', 2);
SELECT create_distributed_table('failure_test', 'a', 'hash');
SET citus.enable_ddl_propagation TO off;
CREATE USER router_user;

View File

@ -121,12 +121,9 @@ id_title AS (SELECT id, title from articles_hash WHERE author_id = 2)
SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id;
CREATE TABLE company_employees (company_id int, employee_id int, manager_id int);
SELECT master_create_distributed_table('company_employees', 'company_id', 'hash');
-- do not print notices from workers since the order is not deterministic
SET client_min_messages TO DEFAULT;
SELECT master_create_worker_shards('company_employees', 4, 1);
SET client_min_messages TO 'DEBUG2';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('company_employees', 'company_id', 'hash');
INSERT INTO company_employees values(1, 1, 0);
INSERT INTO company_employees values(1, 2, 1);

View File

@ -123,7 +123,6 @@ copy nation_append_search_path FROM STDIN with (delimiter '|', append_to_shard :
5|ETHIOPIA|0|ven packages wake quickly. regu
\.
-- create shard with master_create_worker_shards
CREATE TABLE test_schema_support.nation_hash(
n_nationkey integer not null,
n_name char(25) not null,

View File

@ -24,11 +24,10 @@ CREATE TABLE authors ( name text, id bigint );
-- this table is used in router executor tests
CREATE TABLE articles_single_shard (LIKE articles);
SELECT master_create_distributed_table('articles', 'author_id', 'hash');
SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash');
SET citus.shard_replication_factor TO 1;
SELECT master_create_worker_shards('articles', 2, 1);
SELECT master_create_worker_shards('articles_single_shard', 1, 1);
SELECT create_distributed_table('articles', 'author_id', 'hash', shard_count := 2);
SELECT create_distributed_table('articles_single_shard', 'author_id', 'hash', shard_count := 1);
-- create a bunch of test data
INSERT INTO articles VALUES ( 1, 1, 'arsenous', 9572);

View File

@ -50,32 +50,8 @@ SELECT * FROM mx_table ORDER BY col_1;
-- Try commands from metadata worker
\c - - - :worker_1_port
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
CREATE TABLE mx_table_worker(col_1 text);
-- master_create_distributed_table
SELECT master_create_distributed_table('mx_table_worker', 'col_1', 'hash');
-- create_distributed_table
SELECT create_distributed_table('mx_table_worker', 'col_1');
@ -85,18 +61,6 @@ SELECT create_reference_table('mx_table_worker');
SELECT count(*) FROM pg_dist_partition WHERE logicalrelid='mx_table_worker'::regclass;
DROP TABLE mx_table_worker;
-- master_create_worker_shards
CREATE TEMP TABLE pg_dist_shard_temp AS
SELECT * FROM pg_dist_shard WHERE logicalrelid = 'mx_table'::regclass;
DELETE FROM pg_dist_shard WHERE logicalrelid = 'mx_table'::regclass;
SELECT master_create_worker_shards('mx_table', 5, 1);
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass;
INSERT INTO pg_dist_shard SELECT * FROM pg_dist_shard_temp;
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass;
\c - - - :master_port
DROP TABLE mx_ref_table;
CREATE UNIQUE INDEX mx_test_uniq_index ON mx_table(col_1);

View File

@ -127,8 +127,8 @@ SELECT create_distributed_table('dustbunnies', 'id', 'hash');
\.
CREATE TABLE second_dustbunnies(id integer, name text, age integer);
SELECT master_create_distributed_table('second_dustbunnies', 'id', 'hash');
SELECT master_create_worker_shards('second_dustbunnies', 1, 2);
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('second_dustbunnies', 'id', 'hash', shard_count := 1);
-- run VACUUM and ANALYZE against the table on the master
\c - - :master_host :master_port

View File

@ -144,27 +144,6 @@ RETURNS boolean
AS 'citus'
LANGUAGE C STRICT VOLATILE;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
SET citus.next_shard_id TO 123000;
SELECT worker_node_responsive(node_name, node_port::int)
@ -1252,7 +1231,7 @@ SELECT 1 FROM master_remove_node('localhost', :master_port);
SELECT public.wait_until_metadata_sync(30000);
CREATE TABLE rebalance_test_table(int_column int);
SELECT master_create_distributed_table('rebalance_test_table', 'int_column', 'append');
SELECT create_distributed_table('rebalance_test_table', 'int_column', 'append');
CALL create_unbalanced_shards('rebalance_test_table');

View File

@ -33,25 +33,6 @@ INSERT INTO r SELECT * FROM generate_series(1, 5);
CREATE TABLE tr(pk int, a int REFERENCES r(a) ON DELETE CASCADE ON UPDATE CASCADE);
SELECT create_distributed_table('tr', 'pk');
INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c;
-- this function is dropped in Citus10, added here for tests
SET citus.enable_metadata_sync TO OFF;
CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
RETURNS void
LANGUAGE C STRICT
AS 'citus', $$master_create_distributed_table$$;
COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass,
distribution_column text,
distribution_method citus.distribution_type)
IS 'define the table distribution functions';
-- this function is dropped in Citus10, added here for tests
CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name text, shard_count integer,
replication_factor integer DEFAULT 2)
RETURNS void
AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT;
RESET citus.enable_metadata_sync;
CREATE TABLE t_range(id int, value_1 int);
SELECT create_distributed_table('t_range', 'id', 'range');
SELECT master_create_empty_shard('t_range') as shardid1 \gset