mirror of https://github.com/citusdata/citus.git
Merge pull request #1137 from citusdata/make_rep_model_explicit
Add replication_model GUC cr: @anarazelpull/1158/head
commit
849f70a409
|
@ -60,11 +60,14 @@
|
||||||
#include "utils/inval.h"
|
#include "utils/inval.h"
|
||||||
|
|
||||||
|
|
||||||
|
/* Replication model to use when creating distributed tables */
|
||||||
|
int ReplicationModel = REPLICATION_MODEL_COORDINATOR;
|
||||||
|
|
||||||
|
|
||||||
/* local function forward declarations */
|
/* local function forward declarations */
|
||||||
static void CreateReferenceTable(Oid relationId);
|
static void CreateReferenceTable(Oid relationId);
|
||||||
static void ConvertToDistributedTable(Oid relationId, char *distributionColumnName,
|
static void ConvertToDistributedTable(Oid relationId, char *distributionColumnName,
|
||||||
char distributionMethod, uint32 colocationId,
|
char distributionMethod, uint32 colocationId);
|
||||||
char replicationModel);
|
|
||||||
static char LookupDistributionMethod(Oid distributionMethodOid);
|
static char LookupDistributionMethod(Oid distributionMethodOid);
|
||||||
static Oid SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId,
|
static Oid SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId,
|
||||||
int16 supportFunctionNumber);
|
int16 supportFunctionNumber);
|
||||||
|
@ -106,8 +109,7 @@ master_create_distributed_table(PG_FUNCTION_ARGS)
|
||||||
EnsureSchemaNode();
|
EnsureSchemaNode();
|
||||||
|
|
||||||
ConvertToDistributedTable(distributedRelationId, distributionColumnName,
|
ConvertToDistributedTable(distributedRelationId, distributionColumnName,
|
||||||
distributionMethod, INVALID_COLOCATION_ID,
|
distributionMethod, INVALID_COLOCATION_ID);
|
||||||
REPLICATION_MODEL_COORDINATOR);
|
|
||||||
|
|
||||||
PG_RETURN_VOID();
|
PG_RETURN_VOID();
|
||||||
}
|
}
|
||||||
|
@ -164,8 +166,7 @@ create_distributed_table(PG_FUNCTION_ARGS)
|
||||||
if (distributionMethod != DISTRIBUTE_BY_HASH)
|
if (distributionMethod != DISTRIBUTE_BY_HASH)
|
||||||
{
|
{
|
||||||
ConvertToDistributedTable(relationId, distributionColumnName,
|
ConvertToDistributedTable(relationId, distributionColumnName,
|
||||||
distributionMethod, INVALID_COLOCATION_ID,
|
distributionMethod, INVALID_COLOCATION_ID);
|
||||||
REPLICATION_MODEL_COORDINATOR);
|
|
||||||
PG_RETURN_VOID();
|
PG_RETURN_VOID();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -228,7 +229,7 @@ CreateReferenceTable(Oid relationId)
|
||||||
|
|
||||||
/* first, convert the relation into distributed relation */
|
/* first, convert the relation into distributed relation */
|
||||||
ConvertToDistributedTable(relationId, distributionColumnName,
|
ConvertToDistributedTable(relationId, distributionColumnName,
|
||||||
DISTRIBUTE_BY_NONE, colocationId, REPLICATION_MODEL_2PC);
|
DISTRIBUTE_BY_NONE, colocationId);
|
||||||
|
|
||||||
/* now, create the single shard replicated to all nodes */
|
/* now, create the single shard replicated to all nodes */
|
||||||
CreateReferenceTableShard(relationId);
|
CreateReferenceTableShard(relationId);
|
||||||
|
@ -250,14 +251,27 @@ CreateReferenceTable(Oid relationId)
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ConvertToDistributedTable(Oid relationId, char *distributionColumnName,
|
ConvertToDistributedTable(Oid relationId, char *distributionColumnName,
|
||||||
char distributionMethod, uint32 colocationId,
|
char distributionMethod, uint32 colocationId)
|
||||||
char replicationModel)
|
|
||||||
{
|
{
|
||||||
Relation relation = NULL;
|
Relation relation = NULL;
|
||||||
TupleDesc relationDesc = NULL;
|
TupleDesc relationDesc = NULL;
|
||||||
char *relationName = NULL;
|
char *relationName = NULL;
|
||||||
char relationKind = 0;
|
char relationKind = 0;
|
||||||
Var *distributionColumn = NULL;
|
Var *distributionColumn = NULL;
|
||||||
|
char replicationModel = REPLICATION_MODEL_INVALID;
|
||||||
|
|
||||||
|
/* check global replication settings before continuing */
|
||||||
|
EnsureReplicationSettings(InvalidOid);
|
||||||
|
|
||||||
|
/* distribute by none tables use 2PC replication; otherwise use GUC setting */
|
||||||
|
if (distributionMethod == DISTRIBUTE_BY_NONE)
|
||||||
|
{
|
||||||
|
replicationModel = REPLICATION_MODEL_2PC;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
replicationModel = ReplicationModel;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock target relation with an exclusive lock - there's no way to make
|
* Lock target relation with an exclusive lock - there's no way to make
|
||||||
|
@ -891,21 +905,10 @@ CreateHashDistributedTable(Oid relationId, char *distributionColumnName,
|
||||||
uint32 colocationId = INVALID_COLOCATION_ID;
|
uint32 colocationId = INVALID_COLOCATION_ID;
|
||||||
Oid sourceRelationId = InvalidOid;
|
Oid sourceRelationId = InvalidOid;
|
||||||
Oid distributionColumnType = InvalidOid;
|
Oid distributionColumnType = InvalidOid;
|
||||||
char replicationModel = 0;
|
|
||||||
|
|
||||||
/* get an access lock on the relation to prevent DROP TABLE and ALTER TABLE */
|
/* get an access lock on the relation to prevent DROP TABLE and ALTER TABLE */
|
||||||
distributedRelation = relation_open(relationId, AccessShareLock);
|
distributedRelation = relation_open(relationId, AccessShareLock);
|
||||||
|
|
||||||
/* all hash-distributed tables with repfactor=1 are treated as MX tables */
|
|
||||||
if (replicationFactor == 1)
|
|
||||||
{
|
|
||||||
replicationModel = REPLICATION_MODEL_STREAMING;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
replicationModel = REPLICATION_MODEL_COORDINATOR;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get an exclusive lock on the colocation system catalog. Therefore, we
|
* Get an exclusive lock on the colocation system catalog. Therefore, we
|
||||||
* can be sure that there will no modifications on the colocation table
|
* can be sure that there will no modifications on the colocation table
|
||||||
|
@ -946,7 +949,7 @@ CreateHashDistributedTable(Oid relationId, char *distributionColumnName,
|
||||||
|
|
||||||
/* create distributed table metadata */
|
/* create distributed table metadata */
|
||||||
ConvertToDistributedTable(relationId, distributionColumnName, DISTRIBUTE_BY_HASH,
|
ConvertToDistributedTable(relationId, distributionColumnName, DISTRIBUTE_BY_HASH,
|
||||||
colocationId, replicationModel);
|
colocationId);
|
||||||
|
|
||||||
/* create shards */
|
/* create shards */
|
||||||
if (sourceRelationId != InvalidOid)
|
if (sourceRelationId != InvalidOid)
|
||||||
|
@ -979,3 +982,34 @@ ColumnType(Oid relationId, char *columnName)
|
||||||
|
|
||||||
return columnType;
|
return columnType;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check that the current replication factor setting is compatible with the
|
||||||
|
* replication model of relationId, if valid. If InvalidOid, check that the
|
||||||
|
* global replication model setting instead. Errors out if an invalid state
|
||||||
|
* is detected.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
EnsureReplicationSettings(Oid relationId)
|
||||||
|
{
|
||||||
|
char replicationModel = (char) ReplicationModel;
|
||||||
|
char *msgSuffix = "the streaming replication model";
|
||||||
|
char *extraHint = " or setting \"citus.replication_model\" to \"statement\"";
|
||||||
|
|
||||||
|
if (relationId != InvalidOid)
|
||||||
|
{
|
||||||
|
replicationModel = TableReplicationModel(relationId);
|
||||||
|
msgSuffix = "tables which use the streaming replication model";
|
||||||
|
extraHint = "";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (replicationModel == REPLICATION_MODEL_STREAMING && ShardReplicationFactor != 1)
|
||||||
|
{
|
||||||
|
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||||
|
errmsg("replication factors above one are incompatible with %s",
|
||||||
|
msgSuffix),
|
||||||
|
errhint("Try again after reducing \"citus.shard_replication_"
|
||||||
|
"factor\" to one%s.", extraHint)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "catalog/namespace.h"
|
#include "catalog/namespace.h"
|
||||||
#include "catalog/pg_class.h"
|
#include "catalog/pg_class.h"
|
||||||
#include "distributed/listutils.h"
|
#include "distributed/listutils.h"
|
||||||
|
#include "distributed/master_metadata_utility.h"
|
||||||
#include "distributed/master_protocol.h"
|
#include "distributed/master_protocol.h"
|
||||||
#include "distributed/metadata_cache.h"
|
#include "distributed/metadata_cache.h"
|
||||||
#include "distributed/multi_join_order.h"
|
#include "distributed/multi_join_order.h"
|
||||||
|
|
|
@ -127,6 +127,8 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
|
||||||
"on reference tables")));
|
"on reference tables")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
EnsureReplicationSettings(relationId);
|
||||||
|
|
||||||
/* generate new and unique shardId from sequence */
|
/* generate new and unique shardId from sequence */
|
||||||
shardId = GetNextShardId();
|
shardId = GetNextShardId();
|
||||||
|
|
||||||
|
|
|
@ -1586,3 +1586,16 @@ PartitionMethod(Oid relationId)
|
||||||
|
|
||||||
return partitionMethod;
|
return partitionMethod;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Returns the replication model for the given relation. */
|
||||||
|
char
|
||||||
|
TableReplicationModel(Oid relationId)
|
||||||
|
{
|
||||||
|
/* errors out if not a distributed table */
|
||||||
|
DistTableCacheEntry *partitionEntry = DistributedTableCacheEntry(relationId);
|
||||||
|
|
||||||
|
char replicationModel = partitionEntry->replicationModel;
|
||||||
|
|
||||||
|
return replicationModel;
|
||||||
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include "distributed/citus_nodefuncs.h"
|
#include "distributed/citus_nodefuncs.h"
|
||||||
#include "distributed/connection_management.h"
|
#include "distributed/connection_management.h"
|
||||||
#include "distributed/connection_management.h"
|
#include "distributed/connection_management.h"
|
||||||
|
#include "distributed/master_metadata_utility.h"
|
||||||
#include "distributed/master_protocol.h"
|
#include "distributed/master_protocol.h"
|
||||||
#include "distributed/multi_copy.h"
|
#include "distributed/multi_copy.h"
|
||||||
#include "distributed/multi_executor.h"
|
#include "distributed/multi_executor.h"
|
||||||
|
@ -32,6 +33,7 @@
|
||||||
#include "distributed/multi_router_planner.h"
|
#include "distributed/multi_router_planner.h"
|
||||||
#include "distributed/multi_server_executor.h"
|
#include "distributed/multi_server_executor.h"
|
||||||
#include "distributed/multi_utility.h"
|
#include "distributed/multi_utility.h"
|
||||||
|
#include "distributed/pg_dist_partition.h"
|
||||||
#include "distributed/placement_connection.h"
|
#include "distributed/placement_connection.h"
|
||||||
#include "distributed/remote_commands.h"
|
#include "distributed/remote_commands.h"
|
||||||
#include "distributed/task_tracker.h"
|
#include "distributed/task_tracker.h"
|
||||||
|
@ -63,6 +65,12 @@ static const struct config_enum_entry task_assignment_policy_options[] = {
|
||||||
{ NULL, 0, false }
|
{ NULL, 0, false }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct config_enum_entry replication_model_options[] = {
|
||||||
|
{ "statement", REPLICATION_MODEL_COORDINATOR, false },
|
||||||
|
{ "streaming", REPLICATION_MODEL_STREAMING, false },
|
||||||
|
{ NULL, 0, false }
|
||||||
|
};
|
||||||
|
|
||||||
static const struct config_enum_entry task_executor_type_options[] = {
|
static const struct config_enum_entry task_executor_type_options[] = {
|
||||||
{ "real-time", MULTI_EXECUTOR_REAL_TIME, false },
|
{ "real-time", MULTI_EXECUTOR_REAL_TIME, false },
|
||||||
{ "task-tracker", MULTI_EXECUTOR_TASK_TRACKER, false },
|
{ "task-tracker", MULTI_EXECUTOR_TASK_TRACKER, false },
|
||||||
|
@ -571,6 +579,20 @@ RegisterCitusConfigVariables(void)
|
||||||
0,
|
0,
|
||||||
NULL, NULL, NULL);
|
NULL, NULL, NULL);
|
||||||
|
|
||||||
|
DefineCustomEnumVariable(
|
||||||
|
"citus.replication_model",
|
||||||
|
gettext_noop("Sets the replication model to be used for distributed tables."),
|
||||||
|
gettext_noop("Depending upon the execution environment, statement- or streaming-"
|
||||||
|
"based replication modes may be employed. Though most Citus deploy-"
|
||||||
|
"ments will simply use statement replication, hosted and MX-style"
|
||||||
|
"deployments should set this parameter to 'streaming'."),
|
||||||
|
&ReplicationModel,
|
||||||
|
REPLICATION_MODEL_COORDINATOR,
|
||||||
|
replication_model_options,
|
||||||
|
PGC_SUSET,
|
||||||
|
GUC_SUPERUSER_ONLY,
|
||||||
|
NULL, NULL, NULL);
|
||||||
|
|
||||||
DefineCustomEnumVariable(
|
DefineCustomEnumVariable(
|
||||||
"citus.task_executor_type",
|
"citus.task_executor_type",
|
||||||
gettext_noop("Sets the executor type to be used for distributed queries."),
|
gettext_noop("Sets the executor type to be used for distributed queries."),
|
||||||
|
|
|
@ -59,6 +59,9 @@ typedef struct ShardPlacement
|
||||||
} ShardPlacement;
|
} ShardPlacement;
|
||||||
|
|
||||||
|
|
||||||
|
/* Config variable managed via guc.c */
|
||||||
|
extern int ReplicationModel;
|
||||||
|
|
||||||
/* Function declarations to read shard and shard placement data */
|
/* Function declarations to read shard and shard placement data */
|
||||||
extern uint32 TableShardReplicationFactor(Oid relationId);
|
extern uint32 TableShardReplicationFactor(Oid relationId);
|
||||||
extern List * LoadShardIntervalList(Oid relationId);
|
extern List * LoadShardIntervalList(Oid relationId);
|
||||||
|
@ -97,6 +100,7 @@ extern char * TableOwner(Oid relationId);
|
||||||
extern void EnsureTablePermissions(Oid relationId, AclMode mode);
|
extern void EnsureTablePermissions(Oid relationId, AclMode mode);
|
||||||
extern void EnsureTableOwner(Oid relationId);
|
extern void EnsureTableOwner(Oid relationId);
|
||||||
extern void EnsureSuperUser(void);
|
extern void EnsureSuperUser(void);
|
||||||
|
extern void EnsureReplicationSettings(Oid relationId);
|
||||||
extern bool TableReferenced(Oid relationId);
|
extern bool TableReferenced(Oid relationId);
|
||||||
extern char * ConstructQualifiedShardName(ShardInterval *shardInterval);
|
extern char * ConstructQualifiedShardName(ShardInterval *shardInterval);
|
||||||
extern Datum StringToDatum(char *inputString, Oid dataType);
|
extern Datum StringToDatum(char *inputString, Oid dataType);
|
||||||
|
|
|
@ -92,6 +92,7 @@ extern Var * RightColumn(OpExpr *joinClause);
|
||||||
extern Var * PartitionColumn(Oid relationId, uint32 rangeTableId);
|
extern Var * PartitionColumn(Oid relationId, uint32 rangeTableId);
|
||||||
extern Var * PartitionKey(Oid relationId);
|
extern Var * PartitionKey(Oid relationId);
|
||||||
extern char PartitionMethod(Oid relationId);
|
extern char PartitionMethod(Oid relationId);
|
||||||
|
extern char TableReplicationModel(Oid relationId);
|
||||||
|
|
||||||
|
|
||||||
#endif /* MULTI_JOIN_ORDER_H */
|
#endif /* MULTI_JOIN_ORDER_H */
|
||||||
|
|
|
@ -137,10 +137,16 @@ SELECT master_create_distributed_table('supplier_single_shard', 's_suppkey', 'ap
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Show that when a hash distributed table with replication factor=1 is created, it
|
|
||||||
-- automatically marked as streaming replicated
|
|
||||||
SET citus.shard_replication_factor TO 1;
|
|
||||||
CREATE TABLE mx_table_test (col1 int, col2 text);
|
CREATE TABLE mx_table_test (col1 int, col2 text);
|
||||||
|
-- Since we're superuser, we can set the replication model to 'streaming' to
|
||||||
|
-- create a one-off MX table... but if we forget to set the replication factor to one,
|
||||||
|
-- we should see an error reminding us to fix that
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
|
SELECT create_distributed_table('mx_table_test', 'col1');
|
||||||
|
ERROR: replication factors above one are incompatible with the streaming replication model
|
||||||
|
HINT: Try again after reducing "citus.shard_replication_factor" to one or setting "citus.replication_model" to "statement".
|
||||||
|
-- ok, so now actually create the one-off MX table
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
SELECT create_distributed_table('mx_table_test', 'col1');
|
SELECT create_distributed_table('mx_table_test', 'col1');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
--------------------------
|
--------------------------
|
||||||
|
@ -154,6 +160,7 @@ SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regcl
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
DROP TABLE mx_table_test;
|
DROP TABLE mx_table_test;
|
||||||
|
RESET citus.replication_model;
|
||||||
-- Show that it is not possible to create an mx table with the old
|
-- Show that it is not possible to create an mx table with the old
|
||||||
-- master_create_distributed_table function
|
-- master_create_distributed_table function
|
||||||
CREATE TABLE mx_table_test (col1 int, col2 text);
|
CREATE TABLE mx_table_test (col1 int, col2 text);
|
||||||
|
|
|
@ -267,7 +267,10 @@ SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table':
|
||||||
|
|
||||||
-- Make sure that start_metadata_sync_to_node considers foreign key constraints
|
-- Make sure that start_metadata_sync_to_node considers foreign key constraints
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
-- Since we're superuser, we can set the replication model to 'streaming' to
|
||||||
|
-- create some MX tables
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
CREATE SCHEMA mx_testing_schema_2;
|
CREATE SCHEMA mx_testing_schema_2;
|
||||||
CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE(col1, col3));
|
CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE(col1, col3));
|
||||||
CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text,
|
CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text,
|
||||||
|
@ -307,6 +310,7 @@ Foreign-key constraints:
|
||||||
DROP TABLE mx_testing_schema_2.fk_test_2;
|
DROP TABLE mx_testing_schema_2.fk_test_2;
|
||||||
DROP TABLE mx_testing_schema.fk_test_1;
|
DROP TABLE mx_testing_schema.fk_test_1;
|
||||||
RESET citus.shard_replication_factor;
|
RESET citus.shard_replication_factor;
|
||||||
|
RESET citus.replication_model;
|
||||||
-- Check that repeated calls to start_metadata_sync_to_node has no side effects
|
-- Check that repeated calls to start_metadata_sync_to_node has no side effects
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||||
|
@ -399,6 +403,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||||
-- Check that the distributed table can be queried from the worker
|
-- Check that the distributed table can be queried from the worker
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||||
start_metadata_sync_to_node
|
start_metadata_sync_to_node
|
||||||
-----------------------------
|
-----------------------------
|
||||||
|
@ -489,6 +494,7 @@ CREATE SCHEMA mx_test_schema_1;
|
||||||
CREATE SCHEMA mx_test_schema_2;
|
CREATE SCHEMA mx_test_schema_2;
|
||||||
-- Create MX tables
|
-- Create MX tables
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
CREATE TABLE mx_test_schema_1.mx_table_1 (col1 int UNIQUE, col2 text);
|
CREATE TABLE mx_test_schema_1.mx_table_1 (col1 int UNIQUE, col2 text);
|
||||||
CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1);
|
CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1);
|
||||||
CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text);
|
CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text);
|
||||||
|
@ -754,6 +760,7 @@ SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gse
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000;
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000;
|
||||||
SET citus.shard_count TO 7;
|
SET citus.shard_count TO 7;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
CREATE TABLE mx_colocation_test_1 (a int);
|
CREATE TABLE mx_colocation_test_1 (a int);
|
||||||
SELECT create_distributed_table('mx_colocation_test_1', 'a');
|
SELECT create_distributed_table('mx_colocation_test_1', 'a');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
@ -848,6 +855,7 @@ DROP TABLE mx_colocation_test_2;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET citus.shard_count TO 7;
|
SET citus.shard_count TO 7;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
CREATE TABLE mx_temp_drop_test (a int);
|
CREATE TABLE mx_temp_drop_test (a int);
|
||||||
SELECT create_distributed_table('mx_temp_drop_test', 'a');
|
SELECT create_distributed_table('mx_temp_drop_test', 'a');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
@ -880,6 +888,7 @@ DROP TABLE mx_temp_drop_test;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET citus.shard_count TO 3;
|
SET citus.shard_count TO 3;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||||
stop_metadata_sync_to_node
|
stop_metadata_sync_to_node
|
||||||
----------------------------
|
----------------------------
|
||||||
|
@ -1097,7 +1106,8 @@ SELECT master_remove_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
CREATE USER mx_user;
|
-- the master user needs superuser permissions to change the replication model
|
||||||
|
CREATE USER mx_user WITH SUPERUSER;
|
||||||
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
NOTICE: not propagating CREATE ROLE/USER commands to worker nodes
|
||||||
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
HINT: Connect to worker nodes directly to manually create all necessary users and roles.
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
|
@ -1112,6 +1122,7 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
|
||||||
-- Create an mx table as a different user
|
-- Create an mx table as a different user
|
||||||
CREATE TABLE mx_table (a int, b BIGSERIAL);
|
CREATE TABLE mx_table (a int, b BIGSERIAL);
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
SELECT create_distributed_table('mx_table', 'a');
|
SELECT create_distributed_table('mx_table', 'a');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
--------------------------
|
--------------------------
|
||||||
|
@ -1352,6 +1363,7 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
RESET citus.shard_count;
|
RESET citus.shard_count;
|
||||||
RESET citus.shard_replication_factor;
|
RESET citus.shard_replication_factor;
|
||||||
|
RESET citus.replication_model;
|
||||||
RESET citus.multi_shard_commit_protocol;
|
RESET citus.multi_shard_commit_protocol;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id;
|
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id;
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id;
|
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id;
|
||||||
|
|
|
@ -353,6 +353,7 @@ SELECT create_reference_table('replicate_reference_table_reference_one');
|
||||||
|
|
||||||
SET citus.shard_count TO 1;
|
SET citus.shard_count TO 1;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
CREATE TABLE replicate_reference_table_hash(column1 int);
|
CREATE TABLE replicate_reference_table_hash(column1 int);
|
||||||
SELECT create_distributed_table('replicate_reference_table_hash', 'column1');
|
SELECT create_distributed_table('replicate_reference_table_hash', 'column1');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
@ -605,3 +606,5 @@ DROP SCHEMA replicate_reference_table_schema CASCADE;
|
||||||
-- reload pg_dist_shard_placement table
|
-- reload pg_dist_shard_placement table
|
||||||
INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement);
|
INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement);
|
||||||
DROP TABLE tmp_shard_placement;
|
DROP TABLE tmp_shard_placement;
|
||||||
|
RESET citus.shard_replication_factor;
|
||||||
|
RESET citus.replication_model;
|
||||||
|
|
|
@ -33,6 +33,9 @@ ROLLBACK;
|
||||||
DROP TABLE testtableddl;
|
DROP TABLE testtableddl;
|
||||||
-- verify that the table can dropped even if shards exist
|
-- verify that the table can dropped even if shards exist
|
||||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||||
|
-- create table as MX table to do create empty shard test here, too
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||||
master_create_distributed_table
|
master_create_distributed_table
|
||||||
---------------------------------
|
---------------------------------
|
||||||
|
@ -45,7 +48,15 @@ SELECT 1 FROM master_create_empty_shard('testtableddl');
|
||||||
1
|
1
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- this'll error out
|
||||||
|
SET citus.shard_replication_factor TO 2;
|
||||||
|
SELECT 1 FROM master_create_empty_shard('testtableddl');
|
||||||
|
ERROR: replication factors above one are incompatible with tables which use the streaming replication model
|
||||||
|
HINT: Try again after reducing "citus.shard_replication_factor" to one.
|
||||||
|
-- now actually drop table and shards
|
||||||
DROP TABLE testtableddl;
|
DROP TABLE testtableddl;
|
||||||
|
RESET citus.shard_replication_factor;
|
||||||
|
RESET citus.replication_model;
|
||||||
-- ensure no metadata of distributed tables are remaining
|
-- ensure no metadata of distributed tables are remaining
|
||||||
SELECT * FROM pg_dist_partition;
|
SELECT * FROM pg_dist_partition;
|
||||||
logicalrelid | partmethod | partkey | colocationid | repmodel
|
logicalrelid | partmethod | partkey | colocationid | repmodel
|
||||||
|
|
|
@ -12,6 +12,7 @@ SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gse
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 150000;
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 150000;
|
||||||
-- Prepare the environment
|
-- Prepare the environment
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
SET citus.shard_count TO 5;
|
SET citus.shard_count TO 5;
|
||||||
-- Create test tables
|
-- Create test tables
|
||||||
CREATE TABLE mx_table (col_1 int, col_2 text, col_3 BIGSERIAL);
|
CREATE TABLE mx_table (col_1 int, col_2 text, col_3 BIGSERIAL);
|
||||||
|
@ -403,3 +404,5 @@ SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition;
|
||||||
|
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
|
||||||
|
RESET citus.shard_replication_factor;
|
||||||
|
RESET citus.replication_model;
|
||||||
|
|
|
@ -801,6 +801,7 @@ DROP TABLE upgrade_reference_table_transaction_commit;
|
||||||
-- create an mx table
|
-- create an mx table
|
||||||
SET citus.shard_count TO 1;
|
SET citus.shard_count TO 1;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
CREATE TABLE upgrade_reference_table_mx(column1 int);
|
CREATE TABLE upgrade_reference_table_mx(column1 int);
|
||||||
SELECT create_distributed_table('upgrade_reference_table_mx', 'column1');
|
SELECT create_distributed_table('upgrade_reference_table_mx', 'column1');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
@ -911,6 +912,7 @@ DROP TABLE upgrade_reference_table_mx;
|
||||||
-- test valid cases, do it with MX
|
-- test valid cases, do it with MX
|
||||||
SET citus.shard_count TO 1;
|
SET citus.shard_count TO 1;
|
||||||
SET citus.shard_replication_factor TO 2;
|
SET citus.shard_replication_factor TO 2;
|
||||||
|
RESET citus.replication_model;
|
||||||
CREATE TABLE upgrade_reference_table_mx(column1 int);
|
CREATE TABLE upgrade_reference_table_mx(column1 int);
|
||||||
SELECT create_distributed_table('upgrade_reference_table_mx', 'column1');
|
SELECT create_distributed_table('upgrade_reference_table_mx', 'column1');
|
||||||
create_distributed_table
|
create_distributed_table
|
||||||
|
|
|
@ -112,15 +112,22 @@ CREATE TABLE supplier_single_shard
|
||||||
);
|
);
|
||||||
SELECT master_create_distributed_table('supplier_single_shard', 's_suppkey', 'append');
|
SELECT master_create_distributed_table('supplier_single_shard', 's_suppkey', 'append');
|
||||||
|
|
||||||
-- Show that when a hash distributed table with replication factor=1 is created, it
|
|
||||||
-- automatically marked as streaming replicated
|
|
||||||
SET citus.shard_replication_factor TO 1;
|
|
||||||
|
|
||||||
CREATE TABLE mx_table_test (col1 int, col2 text);
|
CREATE TABLE mx_table_test (col1 int, col2 text);
|
||||||
|
|
||||||
|
-- Since we're superuser, we can set the replication model to 'streaming' to
|
||||||
|
-- create a one-off MX table... but if we forget to set the replication factor to one,
|
||||||
|
-- we should see an error reminding us to fix that
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
|
SELECT create_distributed_table('mx_table_test', 'col1');
|
||||||
|
|
||||||
|
-- ok, so now actually create the one-off MX table
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
SELECT create_distributed_table('mx_table_test', 'col1');
|
SELECT create_distributed_table('mx_table_test', 'col1');
|
||||||
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass;
|
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass;
|
||||||
DROP TABLE mx_table_test;
|
DROP TABLE mx_table_test;
|
||||||
|
|
||||||
|
RESET citus.replication_model;
|
||||||
|
|
||||||
-- Show that it is not possible to create an mx table with the old
|
-- Show that it is not possible to create an mx table with the old
|
||||||
-- master_create_distributed_table function
|
-- master_create_distributed_table function
|
||||||
CREATE TABLE mx_table_test (col1 int, col2 text);
|
CREATE TABLE mx_table_test (col1 int, col2 text);
|
||||||
|
|
|
@ -89,7 +89,11 @@ SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table':
|
||||||
|
|
||||||
-- Make sure that start_metadata_sync_to_node considers foreign key constraints
|
-- Make sure that start_metadata_sync_to_node considers foreign key constraints
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
|
||||||
|
-- Since we're superuser, we can set the replication model to 'streaming' to
|
||||||
|
-- create some MX tables
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
|
|
||||||
CREATE SCHEMA mx_testing_schema_2;
|
CREATE SCHEMA mx_testing_schema_2;
|
||||||
|
|
||||||
|
@ -111,6 +115,7 @@ DROP TABLE mx_testing_schema_2.fk_test_2;
|
||||||
DROP TABLE mx_testing_schema.fk_test_1;
|
DROP TABLE mx_testing_schema.fk_test_1;
|
||||||
|
|
||||||
RESET citus.shard_replication_factor;
|
RESET citus.shard_replication_factor;
|
||||||
|
RESET citus.replication_model;
|
||||||
|
|
||||||
-- Check that repeated calls to start_metadata_sync_to_node has no side effects
|
-- Check that repeated calls to start_metadata_sync_to_node has no side effects
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
|
@ -136,6 +141,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
|
||||||
-- Check that the distributed table can be queried from the worker
|
-- Check that the distributed table can be queried from the worker
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
|
||||||
|
|
||||||
CREATE TABLE mx_query_test (a int, b text, c int);
|
CREATE TABLE mx_query_test (a int, b text, c int);
|
||||||
|
@ -177,6 +183,7 @@ CREATE SCHEMA mx_test_schema_2;
|
||||||
|
|
||||||
-- Create MX tables
|
-- Create MX tables
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
CREATE TABLE mx_test_schema_1.mx_table_1 (col1 int UNIQUE, col2 text);
|
CREATE TABLE mx_test_schema_1.mx_table_1 (col1 int UNIQUE, col2 text);
|
||||||
CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1);
|
CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1);
|
||||||
|
|
||||||
|
@ -302,6 +309,7 @@ SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gse
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000;
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000;
|
||||||
SET citus.shard_count TO 7;
|
SET citus.shard_count TO 7;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
|
|
||||||
CREATE TABLE mx_colocation_test_1 (a int);
|
CREATE TABLE mx_colocation_test_1 (a int);
|
||||||
SELECT create_distributed_table('mx_colocation_test_1', 'a');
|
SELECT create_distributed_table('mx_colocation_test_1', 'a');
|
||||||
|
@ -370,6 +378,7 @@ DROP TABLE mx_colocation_test_2;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET citus.shard_count TO 7;
|
SET citus.shard_count TO 7;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
|
|
||||||
CREATE TABLE mx_temp_drop_test (a int);
|
CREATE TABLE mx_temp_drop_test (a int);
|
||||||
SELECT create_distributed_table('mx_temp_drop_test', 'a');
|
SELECT create_distributed_table('mx_temp_drop_test', 'a');
|
||||||
|
@ -387,6 +396,7 @@ DROP TABLE mx_temp_drop_test;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
SET citus.shard_count TO 3;
|
SET citus.shard_count TO 3;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
|
|
||||||
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
|
||||||
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||||
|
@ -465,7 +475,8 @@ DELETE FROM pg_dist_shard_placement;
|
||||||
DELETE FROM pg_dist_partition;
|
DELETE FROM pg_dist_partition;
|
||||||
SELECT master_remove_node('localhost', :worker_2_port);
|
SELECT master_remove_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
CREATE USER mx_user;
|
-- the master user needs superuser permissions to change the replication model
|
||||||
|
CREATE USER mx_user WITH SUPERUSER;
|
||||||
\c - - - :worker_1_port
|
\c - - - :worker_1_port
|
||||||
CREATE USER mx_user;
|
CREATE USER mx_user;
|
||||||
\c - - - :worker_2_port
|
\c - - - :worker_2_port
|
||||||
|
@ -475,6 +486,7 @@ CREATE USER mx_user;
|
||||||
-- Create an mx table as a different user
|
-- Create an mx table as a different user
|
||||||
CREATE TABLE mx_table (a int, b BIGSERIAL);
|
CREATE TABLE mx_table (a int, b BIGSERIAL);
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
SELECT create_distributed_table('mx_table', 'a');
|
SELECT create_distributed_table('mx_table', 'a');
|
||||||
|
|
||||||
\c - postgres - :master_port
|
\c - postgres - :master_port
|
||||||
|
@ -591,6 +603,7 @@ SELECT stop_metadata_sync_to_node('localhost', :worker_2_port);
|
||||||
|
|
||||||
RESET citus.shard_count;
|
RESET citus.shard_count;
|
||||||
RESET citus.shard_replication_factor;
|
RESET citus.shard_replication_factor;
|
||||||
|
RESET citus.replication_model;
|
||||||
RESET citus.multi_shard_commit_protocol;
|
RESET citus.multi_shard_commit_protocol;
|
||||||
|
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id;
|
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id;
|
||||||
|
|
|
@ -228,6 +228,7 @@ SELECT create_reference_table('replicate_reference_table_reference_one');
|
||||||
|
|
||||||
SET citus.shard_count TO 1;
|
SET citus.shard_count TO 1;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
CREATE TABLE replicate_reference_table_hash(column1 int);
|
CREATE TABLE replicate_reference_table_hash(column1 int);
|
||||||
SELECT create_distributed_table('replicate_reference_table_hash', 'column1');
|
SELECT create_distributed_table('replicate_reference_table_hash', 'column1');
|
||||||
|
|
||||||
|
@ -395,3 +396,6 @@ DROP SCHEMA replicate_reference_table_schema CASCADE;
|
||||||
-- reload pg_dist_shard_placement table
|
-- reload pg_dist_shard_placement table
|
||||||
INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement);
|
INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement);
|
||||||
DROP TABLE tmp_shard_placement;
|
DROP TABLE tmp_shard_placement;
|
||||||
|
|
||||||
|
RESET citus.shard_replication_factor;
|
||||||
|
RESET citus.replication_model;
|
||||||
|
|
|
@ -32,10 +32,23 @@ DROP TABLE testtableddl;
|
||||||
|
|
||||||
-- verify that the table can dropped even if shards exist
|
-- verify that the table can dropped even if shards exist
|
||||||
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL);
|
||||||
|
|
||||||
|
-- create table as MX table to do create empty shard test here, too
|
||||||
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append');
|
||||||
SELECT 1 FROM master_create_empty_shard('testtableddl');
|
SELECT 1 FROM master_create_empty_shard('testtableddl');
|
||||||
|
|
||||||
|
-- this'll error out
|
||||||
|
SET citus.shard_replication_factor TO 2;
|
||||||
|
SELECT 1 FROM master_create_empty_shard('testtableddl');
|
||||||
|
|
||||||
|
-- now actually drop table and shards
|
||||||
DROP TABLE testtableddl;
|
DROP TABLE testtableddl;
|
||||||
|
|
||||||
|
RESET citus.shard_replication_factor;
|
||||||
|
RESET citus.replication_model;
|
||||||
|
|
||||||
-- ensure no metadata of distributed tables are remaining
|
-- ensure no metadata of distributed tables are remaining
|
||||||
SELECT * FROM pg_dist_partition;
|
SELECT * FROM pg_dist_partition;
|
||||||
SELECT * FROM pg_dist_shard;
|
SELECT * FROM pg_dist_shard;
|
||||||
|
|
|
@ -16,6 +16,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 150000;
|
||||||
|
|
||||||
-- Prepare the environment
|
-- Prepare the environment
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
SET citus.shard_count TO 5;
|
SET citus.shard_count TO 5;
|
||||||
|
|
||||||
-- Create test tables
|
-- Create test tables
|
||||||
|
@ -213,3 +214,6 @@ DELETE FROM pg_dist_node;
|
||||||
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition;
|
SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition;
|
||||||
\c - - - :master_port
|
\c - - - :master_port
|
||||||
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
|
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
|
||||||
|
|
||||||
|
RESET citus.shard_replication_factor;
|
||||||
|
RESET citus.replication_model;
|
||||||
|
|
|
@ -522,6 +522,7 @@ DROP TABLE upgrade_reference_table_transaction_commit;
|
||||||
-- create an mx table
|
-- create an mx table
|
||||||
SET citus.shard_count TO 1;
|
SET citus.shard_count TO 1;
|
||||||
SET citus.shard_replication_factor TO 1;
|
SET citus.shard_replication_factor TO 1;
|
||||||
|
SET citus.replication_model TO 'streaming';
|
||||||
CREATE TABLE upgrade_reference_table_mx(column1 int);
|
CREATE TABLE upgrade_reference_table_mx(column1 int);
|
||||||
SELECT create_distributed_table('upgrade_reference_table_mx', 'column1');
|
SELECT create_distributed_table('upgrade_reference_table_mx', 'column1');
|
||||||
|
|
||||||
|
@ -596,6 +597,7 @@ DROP TABLE upgrade_reference_table_mx;
|
||||||
-- test valid cases, do it with MX
|
-- test valid cases, do it with MX
|
||||||
SET citus.shard_count TO 1;
|
SET citus.shard_count TO 1;
|
||||||
SET citus.shard_replication_factor TO 2;
|
SET citus.shard_replication_factor TO 2;
|
||||||
|
RESET citus.replication_model;
|
||||||
CREATE TABLE upgrade_reference_table_mx(column1 int);
|
CREATE TABLE upgrade_reference_table_mx(column1 int);
|
||||||
SELECT create_distributed_table('upgrade_reference_table_mx', 'column1');
|
SELECT create_distributed_table('upgrade_reference_table_mx', 'column1');
|
||||||
UPDATE pg_dist_shard_placement SET shardstate = 3
|
UPDATE pg_dist_shard_placement SET shardstate = 3
|
||||||
|
|
Loading…
Reference in New Issue