Merge pull request #4891 from citusdata/remove-replmodel-guc

Deprecates the `citus.replication_model` GUC

We used to have 2 different GUCs that decided shard replication models:
- `citus.replication_model`: either set to "statement" or "streaming"
- `citus.shard_replication_factor` that prevents us to use streaming
  replication when greater than 1.

This PR aims to deprecate the `citus.replication_model` GUC and decide
on the replication model, solely based on the shard replication factor
of distributed tables that are affected by queries.
pull/4995/head^2
Hanefi Onaldi 2021-05-21 16:32:44 +03:00 committed by GitHub
commit 8b8f0161c3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
134 changed files with 605 additions and 1435 deletions

View File

@ -179,7 +179,7 @@ remove_local_tables_from_metadata(PG_FUNCTION_ARGS)
* properties:
* - it will have only one shard,
* - its distribution method will be DISTRIBUTE_BY_NONE,
* - its replication model will be ReplicationModel,
* - its replication model will be REPLICATION_MODEL_STREAMING,
* - its replication factor will be set to 1.
* Similar to reference tables, it has only 1 placement. In addition to that, that
* single placement is only allowed to be on the coordinator.
@ -996,9 +996,7 @@ InsertMetadataForCitusLocalTable(Oid citusLocalTableId, uint64 shardId)
Assert(shardId != INVALID_SHARD_ID);
char distributionMethod = DISTRIBUTE_BY_NONE;
char replicationModel = ReplicationModel;
Assert(replicationModel != REPLICATION_MODEL_2PC);
char replicationModel = REPLICATION_MODEL_STREAMING;
uint32 colocationId = INVALID_COLOCATION_ID;
Var *distributionColumn = NULL;

View File

@ -86,13 +86,9 @@
*/
#define LOG_PER_TUPLE_AMOUNT 1000000
/* Replication model to use when creating distributed tables */
int ReplicationModel = REPLICATION_MODEL_COORDINATOR;
/* local function forward declarations */
static char DecideReplicationModel(char distributionMethod, bool viaDeprecatedAPI);
static char DecideReplicationModel(char distributionMethod, char *colocateWithTableName,
bool viaDeprecatedAPI);
static void CreateHashDistributedTableShards(Oid relationId, int shardCount,
Oid colocatedTableId, bool localTableEmpty);
static uint32 ColocationIdForNewTable(Oid relationId, Var *distributionColumn,
@ -442,6 +438,7 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio
EnsureDependenciesExistOnAllNodes(&tableAddress);
char replicationModel = DecideReplicationModel(distributionMethod,
colocateWithTableName,
viaDeprecatedAPI);
/*
@ -631,44 +628,38 @@ DropFKeysRelationInvolvedWithTableType(Oid relationId, int tableTypeFlag)
/*
* DecideReplicationModel function decides which replication model should be
* used depending on given distribution configuration and global ReplicationModel
* variable. If ReplicationModel conflicts with distribution configuration, this
* function errors out.
* used depending on given distribution configuration.
*/
static char
DecideReplicationModel(char distributionMethod, bool viaDeprecatedAPI)
DecideReplicationModel(char distributionMethod, char *colocateWithTableName, bool
viaDeprecatedAPI)
{
if (viaDeprecatedAPI)
{
if (ReplicationModel != REPLICATION_MODEL_COORDINATOR)
{
ereport(NOTICE, (errmsg("using statement-based replication"),
errdetail("The current replication_model setting is "
"'streaming', which is not supported by "
"master_create_distributed_table."),
errhint("Use create_distributed_table to use the streaming "
"replication model.")));
}
return REPLICATION_MODEL_COORDINATOR;
}
else if (distributionMethod == DISTRIBUTE_BY_NONE)
{
return REPLICATION_MODEL_2PC;
}
else if (distributionMethod == DISTRIBUTE_BY_HASH)
else if (pg_strncasecmp(colocateWithTableName, "default", NAMEDATALEN) != 0 &&
!IsColocateWithNone(colocateWithTableName))
{
return ReplicationModel;
text *colocateWithTableNameText = cstring_to_text(colocateWithTableName);
Oid colocatedRelationId = ResolveRelationId(colocateWithTableNameText, false);
CitusTableCacheEntry *targetTableEntry = GetCitusTableCacheEntry(
colocatedRelationId);
char replicationModel = targetTableEntry->replicationModel;
return replicationModel;
}
else if (distributionMethod == DISTRIBUTE_BY_HASH &&
!DistributedTableReplicationIsEnabled())
{
return REPLICATION_MODEL_STREAMING;
}
else
{
if (ReplicationModel != REPLICATION_MODEL_COORDINATOR)
{
ereport(NOTICE, (errmsg("using statement-based replication"),
errdetail("Streaming replication is supported only for "
"hash-distributed tables.")));
}
return REPLICATION_MODEL_COORDINATOR;
}
@ -863,7 +854,6 @@ EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn,
EnsureTableNotDistributed(relationId);
EnsureLocalTableEmptyIfNecessary(relationId, distributionMethod, viaDeprecatedAPI);
EnsureReplicationSettings(InvalidOid, replicationModel);
EnsureRelationHasNoTriggers(relationId);
/* we assume callers took necessary locks */
@ -1156,36 +1146,6 @@ EnsureTableNotDistributed(Oid relationId)
}
/*
* EnsureReplicationSettings checks whether the current replication factor
* setting is compatible with the replication model. This function errors
* out if caller tries to use streaming replication with more than one
* replication factor.
*/
void
EnsureReplicationSettings(Oid relationId, char replicationModel)
{
char *msgSuffix = "the streaming replication model";
char *extraHint = " or setting \"citus.replication_model\" to \"statement\"";
if (relationId != InvalidOid)
{
msgSuffix = "tables which use the streaming replication model";
extraHint = "";
}
if (replicationModel == REPLICATION_MODEL_STREAMING &&
DistributedTableReplicationIsEnabled())
{
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("replication factors above one are incompatible with %s",
msgSuffix),
errhint("Try again after reducing \"citus.shard_replication_"
"factor\" to one%s.", extraHint)));
}
}
/*
* EnsureRelationHasNoTriggers errors out if the given table has triggers on
* it. See also GetExplicitTriggerIdList function's comment for the triggers this

View File

@ -461,15 +461,6 @@ GetFunctionColocationId(Oid functionOid, char *colocateWithTableName,
EnsureFunctionCanBeColocatedWithTable(functionOid, distributionArgumentOid,
colocatedTableId);
}
else if (ReplicationModel == REPLICATION_MODEL_COORDINATOR)
{
/* streaming replication model is required for metadata syncing */
ereport(ERROR, (errmsg("cannot create a function with a distribution "
"argument when citus.replication_model is "
"'statement'"),
errhint("Set citus.replication_model to 'streaming' "
"before creating distributed tables")));
}
}
else
{
@ -537,7 +528,7 @@ EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid distributionColumnTyp
"with distributed tables that are created using "
"streaming replication model."),
errhint("When distributing tables make sure that "
"citus.replication_model = 'streaming'")));
"citus.shard_replication_factor = 1")));
}
/*

View File

@ -171,10 +171,6 @@ master_create_empty_shard(PG_FUNCTION_ARGS)
"on local tables")));
}
char replicationModel = TableReplicationModel(relationId);
EnsureReplicationSettings(relationId, replicationModel);
/* generate new and unique shardId from sequence */
uint64 shardId = GetNextShardId();

View File

@ -99,6 +99,9 @@ PG_MODULE_MAGIC;
#define DUMMY_REAL_TIME_EXECUTOR_ENUM_VALUE 9999999
static char *CitusVersion = CITUS_VERSION;
/* deprecated GUC value that should not be used anywhere outside this file */
static int ReplicationModel = REPLICATION_MODEL_STREAMING;
void _PG_init(void);
void _PG_fini(void);
@ -115,6 +118,7 @@ static void RegisterCitusConfigVariables(void);
static bool ErrorIfNotASuitableDeadlockFactor(double *newval, void **extra,
GucSource source);
static bool WarnIfDeprecatedExecutorUsed(int *newval, void **extra, GucSource source);
static bool WarnIfReplicationModelIsSet(int *newval, void **extra, GucSource source);
static bool NoticeIfSubqueryPushdownEnabled(bool *newval, void **extra, GucSource source);
static bool NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source);
static void NodeConninfoGucAssignHook(const char *newval, void *extra);
@ -1487,17 +1491,16 @@ RegisterCitusConfigVariables(void)
DefineCustomEnumVariable(
"citus.replication_model",
gettext_noop("Sets the replication model to be used for distributed tables."),
gettext_noop("Depending upon the execution environment, statement- or streaming-"
"based replication modes may be employed. Though most Citus deploy-"
"ments will simply use statement replication, hosted and MX-style"
"deployments should set this parameter to 'streaming'."),
gettext_noop("Deprecated. Please use citus.shard_replication_factor instead"),
gettext_noop(
"Shard replication model is determined by the shard replication factor."
"'statement' replication is used only when the replication factor is one."),
&ReplicationModel,
REPLICATION_MODEL_COORDINATOR,
REPLICATION_MODEL_STREAMING,
replication_model_options,
PGC_SUSET,
GUC_SUPERUSER_ONLY,
NULL, NULL, NULL);
GUC_NO_SHOW_ALL,
WarnIfReplicationModelIsSet, NULL, NULL);
DefineCustomBoolVariable(
"citus.running_under_isolation_test",
@ -1816,6 +1819,32 @@ NoticeIfSubqueryPushdownEnabled(bool *newval, void **extra, GucSource source)
}
/*
* WarnIfReplicationModelIsSet prints a warning when a user sets
* citus.replication_model.
*/
static bool
WarnIfReplicationModelIsSet(int *newval, void **extra, GucSource source)
{
/* print a warning only when user sets the guc */
if (source == PGC_S_SESSION)
{
ereport(NOTICE, (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE),
errmsg(
"Setting citus.replication_model has no effect. Please use "
"citus.shard_replication_factor instead."),
errdetail(
"Citus determines the replication model based on the "
"replication factor and the replication models of the colocated "
"shards. If a colocated table is present, the replication model "
"is inherited. Otherwise 'streaming' replication is preferred if "
"supported by the replication factor.")));
}
return true;
}
/*
* NodeConninfoGucCheckHook ensures conninfo settings are in the expected form
* and that the keywords of all non-null settings are on a allowlist devised to

View File

@ -24,3 +24,22 @@ ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ADD COLUMN improvement_thresho
UPDATE pg_catalog.pg_dist_rebalance_strategy SET improvement_threshold = 0.5 WHERE name = 'by_disk_size';
#include "udfs/get_rebalance_progress/10.1-1.sql"
-- use streaming replication when replication factor = 1
WITH replicated_shards AS (
SELECT shardid
FROM pg_dist_placement
WHERE shardstate = 1 OR shardstate = 3
GROUP BY shardid
HAVING count(*) <> 1 ),
replicated_relations AS (
SELECT DISTINCT logicalrelid
FROM pg_dist_shard
JOIN replicated_shards
USING (shardid)
)
UPDATE pg_dist_partition
SET repmodel = 's'
WHERE repmodel = 'c'
AND partmethod = 'h'
AND logicalrelid NOT IN (SELECT * FROM replicated_relations);

View File

@ -196,9 +196,6 @@ typedef enum SizeQueryType
} SizeQueryType;
/* Config variable managed via guc.c */
extern int ReplicationModel;
/* Size functions */
extern Datum citus_table_size(PG_FUNCTION_ARGS);
extern Datum citus_total_relation_size(PG_FUNCTION_ARGS);
@ -267,7 +264,6 @@ extern void EnsureFunctionOwner(Oid functionId);
extern void EnsureSuperUser(void);
extern void ErrorIfTableIsACatalogTable(Relation relation);
extern void EnsureTableNotDistributed(Oid relationId);
extern void EnsureReplicationSettings(Oid relationId, char replicationModel);
extern void EnsureRelationExists(Oid relationId);
extern bool RegularTable(Oid relationId);
extern bool RelationUsesIdentityColumns(TupleDesc relationDesc);

View File

@ -3,4 +3,5 @@ test: multi_test_helpers multi_test_helpers_superuser
test: multi_test_catalog_views
test: upgrade_basic_before
test: upgrade_columnar_before
test: upgrade_type_before upgrade_ref2ref_before upgrade_distributed_function_before upgrade_rebalance_strategy_before
test: upgrade_ref2ref_before
test: upgrade_type_before upgrade_distributed_function_before upgrade_rebalance_strategy_before

View File

@ -498,7 +498,7 @@ SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHE
\endif
-- test with metadata sync
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
@ -530,7 +530,6 @@ SELECT table_name, shard_count FROM public.citus_tables WHERE table_name::text =
metadata_sync_table | 8
(1 row)
SET citus.replication_model TO DEFAULT;
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
stop_metadata_sync_to_node
---------------------------------------------------------------------

View File

@ -30,7 +30,7 @@ ALTER TABLE citus_local_table ADD CONSTRAINT fkey_local_to_ref FOREIGN KEY(l1) R
SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid IN ('citus_local_table'::regclass, 'reference_table'::regclass) ORDER BY logicalrelid;
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
citus_local_table | n | c
citus_local_table | n | s
reference_table | n | t
(2 rows)
@ -50,7 +50,7 @@ BEGIN;
SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalrelid IN ('citus_local_table'::regclass, 'reference_table'::regclass) ORDER BY logicalrelid;
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
citus_local_table | n | c
citus_local_table | n | s
reference_table | n | t
(2 rows)
@ -76,7 +76,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table | n | t
citus_local_table | n | c
citus_local_table | n | s
(2 rows)
ALTER TABLE reference_table DROP COLUMN r1 CASCADE;
@ -100,7 +100,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table | n | t
citus_local_table | n | c
citus_local_table | n | s
(2 rows)
ALTER TABLE citus_local_table DROP COLUMN l1 CASCADE;
@ -124,7 +124,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table | n | t
citus_local_table | n | c
citus_local_table | n | s
(2 rows)
ALTER TABLE reference_table DROP CONSTRAINT reference_table_pkey CASCADE;
@ -149,7 +149,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table | n | t
citus_local_table | n | c
citus_local_table | n | s
(2 rows)
DROP INDEX ref_unique CASCADE;
@ -173,7 +173,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table | n | t
citus_local_table | n | c
citus_local_table | n | s
(2 rows)
ALTER TABLE reference_table DROP CONSTRAINT reference_table_r1_key CASCADE;
@ -197,7 +197,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table | n | t
citus_local_table | n | c
citus_local_table | n | s
(2 rows)
DROP TABLE reference_table CASCADE;
@ -220,7 +220,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table | n | t
citus_local_table | n | c
citus_local_table | n | s
(2 rows)
ALTER TABLE reference_table DROP CONSTRAINT reference_table_r1_key CASCADE;
@ -245,7 +245,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
ref_table_drop_schema.reference_table | n | t
citus_local_table | n | c
citus_local_table | n | s
(2 rows)
DROP SCHEMA ref_table_drop_schema CASCADE;
@ -276,7 +276,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
---------------------------------------------------------------------
reference_table_1 | n | t
reference_table_2 | n | t
citus_local_table | n | c
citus_local_table | n | s
(3 rows)
ALTER TABLE reference_table_1 DROP COLUMN r1 CASCADE;
@ -285,7 +285,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
---------------------------------------------------------------------
reference_table_1 | n | t
reference_table_2 | n | t
citus_local_table | n | c
citus_local_table | n | s
(3 rows)
-- local table has multiple foreign keys to two tables
@ -311,7 +311,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
---------------------------------------------------------------------
reference_table_1 | n | t
reference_table_2 | n | t
citus_local_table | n | c
citus_local_table | n | s
(3 rows)
DROP TABLE reference_table_1 CASCADE;
@ -319,7 +319,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table_2 | n | t
citus_local_table | n | c
citus_local_table | n | s
(2 rows)
CREATE TABLE distributed_table (d1 int);
@ -359,7 +359,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
---------------------------------------------------------------------
reference_table_1 | n | t
reference_table_2 | n | t
citus_local_table | n | c
citus_local_table | n | s
(3 rows)
DROP TABLE reference_table_1, reference_table_2 CASCADE;
@ -391,7 +391,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
---------------------------------------------------------------------
reference_table_1 | n | t
reference_table_2 | n | t
citus_local_table | n | c
citus_local_table | n | s
(3 rows)
BEGIN;
@ -413,7 +413,7 @@ BEGIN;
---------------------------------------------------------------------
reference_table_1 | n | t
reference_table_2 | n | t
citus_local_table | n | c
citus_local_table | n | s
(3 rows)
-- this should undistribute citus_local_table again
@ -442,9 +442,9 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table_1 | n | t
citus_local_table_1 | n | c
citus_local_table_2 | n | c
citus_local_table_3 | n | c
citus_local_table_1 | n | s
citus_local_table_2 | n | s
citus_local_table_3 | n | s
(4 rows)
ALTER TABLE reference_table_1 DROP COLUMN r1 CASCADE;
@ -470,9 +470,9 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table_1 | n | t
citus_local_table_1 | n | c
citus_local_table_2 | n | c
citus_local_table_3 | n | c
citus_local_table_1 | n | s
citus_local_table_2 | n | s
citus_local_table_3 | n | s
(4 rows)
-- test DROP OWNED BY
@ -525,7 +525,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table_1 | n | t
citus_local_table_1 | n | c
citus_local_table_1 | n | s
(2 rows)
CREATE OR REPLACE FUNCTION drop_constraint_via_func()
@ -577,7 +577,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table_1 | n | t
citus_local_table_1 | n | c
citus_local_table_1 | n | s
(2 rows)
create or replace procedure drop_constraint_via_proc_top_level()
@ -610,7 +610,7 @@ SELECT logicalrelid, partmethod, repmodel FROM pg_dist_partition WHERE logicalre
logicalrelid | partmethod | repmodel
---------------------------------------------------------------------
reference_table_1 | n | t
citus_local_table_1 | n | c
citus_local_table_1 | n | s
(2 rows)
create or replace procedure drop_constraint_via_proc_exception()

View File

@ -1,5 +1,4 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1640000;
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor to 1;
SET citus.shard_count to 4;
CREATE SCHEMA ch_bench_having;
@ -292,7 +291,6 @@ having (select max(s_order_cnt) > 2 as having_query from stock where s_i_id =
order by s_i_id;
ERROR: Subqueries in HAVING cannot refer to outer query
\c - - - :master_port
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor to 1;
SET citus.shard_count to 4;
SET search_path = ch_bench_having, public;

View File

@ -472,7 +472,7 @@ BEGIN;
SELECT logicalrelid::regclass::text FROM pg_dist_partition, pg_tables
WHERE tablename=logicalrelid::regclass::text AND
schemaname='citus_local_tables_test_schema' AND
partmethod = 'n' AND repmodel = 'c'
partmethod = 'n' AND repmodel = 's'
ORDER BY 1;
logicalrelid
---------------------------------------------------------------------
@ -505,7 +505,7 @@ BEGIN;
SELECT logicalrelid::regclass::text FROM pg_dist_partition, pg_tables
WHERE tablename=logicalrelid::regclass::text AND
schemaname='citus_local_tables_test_schema' AND
partmethod = 'n' AND repmodel = 'c'
partmethod = 'n' AND repmodel = 's'
ORDER BY 1;
logicalrelid
---------------------------------------------------------------------

View File

@ -21,7 +21,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
(1 row)
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE dummy_reference_table(a int unique, b int);
SELECT create_reference_table('dummy_reference_table');
create_reference_table
@ -874,7 +874,7 @@ SELECT reference_table.* FROM reference_table, distributed_table;
TRUNCATE reference_table, citus_local_table, distributed_table;
\c - - - :master_port
SET search_path TO citus_local_table_queries_mx;
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
ALTER TABLE reference_table ADD CONSTRAINT pkey_ref PRIMARY KEY (a);
ALTER TABLE citus_local_table ADD CONSTRAINT pkey_c PRIMARY KEY (a);
-- define a foreign key chain distributed table -> reference table -> citus local table
@ -918,7 +918,7 @@ NOTICE: truncate cascades to table "distributed_table_xxxxx"
ROLLBACK;
\c - - - :master_port
SET search_path TO citus_local_table_queries_mx;
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
ALTER TABLE distributed_table DROP CONSTRAINT fkey_dist_to_ref;
\c - - - :worker_1_port
SET search_path TO citus_local_table_queries_mx;
@ -933,7 +933,7 @@ BEGIN;
ROLLBACK;
\c - - - :master_port
SET search_path TO citus_local_table_queries_mx;
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
-- remove uniqueness constraint and dependent foreign key constraint for next tests
ALTER TABLE reference_table DROP CONSTRAINT fkey_ref_to_local;
ALTER TABLE citus_local_table DROP CONSTRAINT pkey_c;

View File

@ -33,7 +33,6 @@ BEGIN
RETURN 1;
END; $$ language plpgsql STABLE;
CREATE TYPE user_data AS (name text, age int);
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE user_info_data (user_id int, u_data user_data, user_index int);
SELECT create_distributed_table('user_info_data', 'user_id');

View File

@ -27,7 +27,6 @@ SELECT create_distributed_function('get_local_node_id_volatile()');
(1 row)
CREATE TYPE user_data AS (name text, age int);
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE user_info_data (user_id int, u_data user_data, user_index int);
SELECT create_distributed_table('user_info_data', 'user_id');

View File

@ -90,11 +90,11 @@ BEGIN;
tablename | partmethod | repmodel
---------------------------------------------------------------------
citus_local_table_1 | n | t
citus_local_table_2 | n | c
citus_local_table_3 | n | c
citus_local_table_4 | n | c
distributed_table_1 | h | c
partitioned_dist_table_1 | h | c
citus_local_table_2 | n | s
citus_local_table_3 | n | s
citus_local_table_4 | n | s
distributed_table_1 | h | s
partitioned_dist_table_1 | h | s
reference_table_1 | n | t
reference_table_2 | n | t
(8 rows)
@ -120,12 +120,12 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
citus_local_table_1 | n | c
citus_local_table_1 | n | s
citus_local_table_2 | n | t
citus_local_table_3 | n | c
citus_local_table_4 | n | c
distributed_table_1 | h | c
partitioned_dist_table_1 | h | c
citus_local_table_3 | n | s
citus_local_table_4 | n | s
distributed_table_1 | h | s
partitioned_dist_table_1 | h | s
reference_table_1 | n | t
reference_table_2 | n | t
(8 rows)
@ -204,13 +204,13 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
citus_local_table_1 | n | c
citus_local_table_2 | n | c
citus_local_table_3 | n | c
citus_local_table_4 | n | c
citus_local_table_5 | h | c
distributed_table_1 | h | c
partitioned_dist_table_1 | h | c
citus_local_table_1 | n | s
citus_local_table_2 | n | s
citus_local_table_3 | n | s
citus_local_table_4 | n | s
citus_local_table_5 | h | s
distributed_table_1 | h | s
partitioned_dist_table_1 | h | s
reference_table_1 | n | t
reference_table_2 | n | t
(9 rows)
@ -237,13 +237,13 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
citus_local_table_1 | n | c
citus_local_table_2 | n | c
citus_local_table_3 | n | c
citus_local_table_4 | n | c
citus_local_table_5 | h | c
distributed_table_1 | h | c
partitioned_dist_table_1 | h | c
citus_local_table_1 | n | s
citus_local_table_2 | n | s
citus_local_table_3 | n | s
citus_local_table_4 | n | s
citus_local_table_5 | h | s
distributed_table_1 | h | s
partitioned_dist_table_1 | h | s
reference_table_1 | n | t
reference_table_2 | n | t
(9 rows)
@ -272,13 +272,13 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
citus_local_table_1 | n | c
citus_local_table_2 | n | c
citus_local_table_3 | n | c
citus_local_table_4 | n | c
citus_local_table_5 | h | c
distributed_table_1 | h | c
partitioned_dist_table_1 | h | c
citus_local_table_1 | n | s
citus_local_table_2 | n | s
citus_local_table_3 | n | s
citus_local_table_4 | n | s
citus_local_table_5 | h | s
distributed_table_1 | h | s
partitioned_dist_table_1 | h | s
reference_table_1 | n | t
reference_table_2 | n | t
(9 rows)

View File

@ -169,8 +169,7 @@ CREATE AGGREGATE agg_names(x dup_result, yz dup_result) (
SET citus.enable_ddl_propagation TO on;
-- use an unusual type to force a new colocation group
CREATE TABLE statement_table(id int2);
SET citus.replication_model TO 'statement';
SET citus.shard_replication_factor TO 1;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('statement_table','id');
create_distributed_table
---------------------------------------------------------------------
@ -179,7 +178,6 @@ SELECT create_distributed_table('statement_table','id');
-- create a table uses streaming-based replication (can be synced)
CREATE TABLE streaming_table(id macaddr);
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('streaming_table','id');
create_distributed_table
@ -228,19 +226,17 @@ select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'pr
-- try to co-locate with a table that uses statement-based replication
SELECT create_distributed_function('increment(int2)', '$1');
ERROR: cannot colocate function "increment" and table "statement_table"
DETAIL: Citus currently only supports colocating function with distributed tables that are created using streaming replication model.
HINT: When distributing tables make sure that citus.replication_model = 'streaming'
ERROR: cannot distribute the function "increment" since there is no table to colocate with
HINT: Provide a distributed table via "colocate_with" option to create_distributed_function()
SELECT create_distributed_function('increment(int2)', '$1', colocate_with := 'statement_table');
ERROR: cannot colocate function "increment" and table "statement_table"
DETAIL: Citus currently only supports colocating function with distributed tables that are created using streaming replication model.
HINT: When distributing tables make sure that citus.replication_model = 'streaming'
HINT: When distributing tables make sure that citus.shard_replication_factor = 1
BEGIN;
SET LOCAL citus.replication_model TO 'statement';
DROP TABLE statement_table;
SELECT create_distributed_function('increment(int2)', '$1');
ERROR: cannot create a function with a distribution argument when citus.replication_model is 'statement'
HINT: Set citus.replication_model to 'streaming' before creating distributed tables
ERROR: cannot distribute the function "increment" since there is no table to colocate with
HINT: Provide a distributed table via "colocate_with" option to create_distributed_function()
END;
-- try to co-locate with a table that uses streaming replication
SELECT create_distributed_function('dup(macaddr)', '$1', colocate_with := 'streaming_table');
@ -649,7 +645,6 @@ SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)','$1')
-- a function cannot be colocated with a table that is not "streaming" replicated
SET citus.shard_replication_factor TO 2;
CREATE TABLE replicated_table_func_test (a macaddr);
SET citus.replication_model TO "statement";
SELECT create_distributed_table('replicated_table_func_test', 'a');
create_distributed_table
---------------------------------------------------------------------
@ -659,7 +654,7 @@ SELECT create_distributed_table('replicated_table_func_test', 'a');
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', '$1', colocate_with:='replicated_table_func_test');
ERROR: cannot colocate function "eq_with_param_names" and table "replicated_table_func_test"
DETAIL: Citus currently only supports colocating function with distributed tables that are created using streaming replication model.
HINT: When distributing tables make sure that citus.replication_model = 'streaming'
HINT: When distributing tables make sure that citus.shard_replication_factor = 1
SELECT public.wait_until_metadata_sync(30000);
wait_until_metadata_sync
---------------------------------------------------------------------
@ -670,7 +665,6 @@ SELECT public.wait_until_metadata_sync(30000);
-- as long as there is a coercion path
SET citus.shard_replication_factor TO 1;
CREATE TABLE replicated_table_func_test_2 (a macaddr8);
SET citus.replication_model TO "streaming";
SELECT create_distributed_table('replicated_table_func_test_2', 'a');
create_distributed_table
---------------------------------------------------------------------
@ -694,7 +688,6 @@ ERROR: relation replicated_table_func_test_3 is not distributed
-- finally, colocate the function with a distributed table
SET citus.shard_replication_factor TO 1;
CREATE TABLE replicated_table_func_test_4 (a macaddr);
SET citus.replication_model TO "streaming";
SELECT create_distributed_table('replicated_table_func_test_4', 'a');
create_distributed_table
---------------------------------------------------------------------

View File

@ -35,7 +35,6 @@ CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 1500
AS 'citus';
-- procedures are distributed by text arguments, when run in isolation it is not guaranteed a table actually exists.
CREATE TABLE colocation_table(id text);
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('colocation_table','id');
create_distributed_table

View File

@ -418,7 +418,7 @@ COMMIT;
SELECT recover_prepared_transactions();
recover_prepared_transactions
---------------------------------------------------------------------
1
4
(1 row)
SELECT citus.mitmproxy('conn.allow()');

View File

@ -128,41 +128,13 @@ SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where r
{key,value}
(1 row)
-- kill as soon as the coordinator sends COMMIT
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
mitmproxy
---------------------------------------------------------------------
(1 row)
ALTER TABLE test_table ADD COLUMN new_column INT;
-- manually drop & re-create the table for the next tests
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------
(1 row)
-- since we've killed the connection just after
-- the coordinator sends the COMMIT, the command should be applied
-- to the distributed table and the shards on the other worker
-- however, there is no way to recover the failure on the shards
-- that live in the failed worker, since we're running 1PC
SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass;
array_agg
---------------------------------------------------------------------
{key,new_column,value}
(1 row)
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
run_command_on_placements
---------------------------------------------------------------------
(localhost,9060,100800,t,"{key,value}")
(localhost,9060,100802,t,"{key,value}")
(localhost,57637,100801,t,"{key,new_column,value}")
(localhost,57637,100803,t,"{key,new_column,value}")
(4 rows)
-- manually drop & re-create the table for the next tests
DROP TABLE test_table;
SET citus.next_shard_id TO 100800;
SET citus.multi_shard_commit_protocol TO '1pc';
@ -229,8 +201,6 @@ CONTEXT: while executing command on localhost:xxxxx
WARNING: failed to commit transaction on localhost:xxxxx
WARNING: connection not open
CONTEXT: while executing command on localhost:xxxxx
WARNING: could not commit transaction for shard xxxxx on any active node
WARNING: could not commit transaction for shard xxxxx on any active node
SELECT citus.mitmproxy('conn.allow()');
mitmproxy
---------------------------------------------------------------------

View File

@ -20,7 +20,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
(1 row)
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
CREATE TABLE failover_to_local (key int PRIMARY KEY, value varchar(10));
SELECT create_distributed_table('failover_to_local', 'key');

View File

@ -6,7 +6,6 @@ SET SEARCH_PATH = mx_metadata_sync;
SET citus.shard_count TO 2;
SET citus.next_shard_id TO 16000000;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SELECT pg_backend_pid() as pid \gset
SELECT citus.mitmproxy('conn.allow()');
mitmproxy

View File

@ -6,7 +6,6 @@ SELECT citus.mitmproxy('conn.allow()');
(1 row)
SET citus.shard_replication_factor TO 2;
SET "citus.replication_model" to "statement";
SET citus.shard_count TO 4;
CREATE TABLE partitioned_table (
dist_key bigint,

View File

@ -9,7 +9,7 @@ CREATE VIEW citus_local_tables_in_schema AS
SELECT logicalrelid FROM pg_dist_partition, pg_tables
WHERE tablename=logicalrelid::regclass::text AND
schemaname='fkeys_between_local_ref' AND
partmethod = 'n' AND repmodel = 'c';
partmethod = 'n' AND repmodel = 's';
-- remove coordinator if it is added to pg_dist_node and test
-- behavior when coordinator is not added to metadata
SELECT COUNT(master_remove_node(nodename, nodeport)) < 2
@ -195,10 +195,10 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
local_table_1 | n | c
local_table_2 | n | c
local_table_3 | n | c
local_table_4 | n | c
local_table_1 | n | s
local_table_2 | n | s
local_table_3 | n | s
local_table_4 | n | s
reference_table_1 | n | t
reference_table_2 | n | t
(6 rows)
@ -279,12 +279,12 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
local_table_1 | n | c
local_table_2 | n | c
local_table_3 | n | c
local_table_4 | n | c
local_table_5 | n | c
local_table_6 | n | c
local_table_1 | n | s
local_table_2 | n | s
local_table_3 | n | s
local_table_4 | n | s
local_table_5 | n | s
local_table_6 | n | s
reference_table_1 | n | t
(7 rows)
@ -318,12 +318,12 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
distributed_table | h | c
local_table_1 | n | c
local_table_2 | n | c
local_table_3 | n | c
local_table_4 | n | c
local_table_5 | n | c
distributed_table | h | s
local_table_1 | n | s
local_table_2 | n | s
local_table_3 | n | s
local_table_4 | n | s
local_table_5 | n | s
reference_table_1 | n | t
(7 rows)
@ -348,13 +348,13 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
another_schema_fkeys_between_local_ref.local_table_6 | n | c
distributed_table | h | c
local_table_1 | n | c
local_table_2 | n | c
local_table_3 | n | c
local_table_4 | n | c
local_table_5 | n | c
another_schema_fkeys_between_local_ref.local_table_6 | n | s
distributed_table | h | s
local_table_1 | n | s
local_table_2 | n | s
local_table_3 | n | s
local_table_4 | n | s
local_table_5 | n | s
reference_table_1 | n | t
(8 rows)
@ -366,10 +366,10 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
distributed_table | h | c
local_table_1 | n | c
local_table_2 | n | c
local_table_4 | n | c
distributed_table | h | s
local_table_1 | n | s
local_table_2 | n | s
local_table_4 | n | s
reference_table_1 | n | t
(5 rows)
@ -395,13 +395,13 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
distributed_table | h | c
local_table_1 | n | c
local_table_2 | n | c
local_table_3 | n | c
local_table_4 | n | c
local_table_5 | n | c
local_table_6 | n | c
distributed_table | h | s
local_table_1 | n | s
local_table_2 | n | s
local_table_3 | n | s
local_table_4 | n | s
local_table_5 | n | s
local_table_6 | n | s
reference_table_1 | n | t
(8 rows)
@ -423,12 +423,12 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
distributed_table | h | c
local_table_1 | n | c
distributed_table | h | s
local_table_1 | n | s
local_table_2 | n | t
local_table_3 | n | c
local_table_4 | n | c
local_table_5 | n | c
local_table_3 | n | s
local_table_4 | n | s
local_table_5 | n | s
local_table_6 | n | t
reference_table_1 | n | t
(8 rows)
@ -452,7 +452,7 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
distributed_table | h | c
distributed_table | h | s
local_table_2 | n | t
local_table_6 | n | t
reference_table_1 | n | t
@ -476,7 +476,7 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
distributed_table | h | c
distributed_table | h | s
reference_table_1 | n | t
(2 rows)
@ -501,11 +501,11 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
distributed_table | h | c
distributed_table | h | s
local_table_1 | n | t
local_table_2 | h | c
local_table_3 | n | c
local_table_4 | n | c
local_table_2 | h | s
local_table_3 | n | s
local_table_4 | n | s
reference_table_1 | n | t
(6 rows)
@ -528,9 +528,9 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
distributed_table | h | c
local_table_1 | n | c
local_table_2 | n | c
distributed_table | h | s
local_table_1 | n | s
local_table_2 | n | s
local_table_3 | n | t
local_table_4 | n | t
reference_table_1 | n | t
@ -562,12 +562,12 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
distributed_table | h | c
distributed_table | h | s
local_table_1 | n | t
local_table_2 | h | c
local_table_3 | n | c
local_table_4 | n | c
local_table_5 | h | c
local_table_2 | h | s
local_table_3 | n | s
local_table_4 | n | s
local_table_5 | h | s
reference_table_1 | n | t
(7 rows)
@ -611,12 +611,12 @@ WHERE logicalrelid::text IN (SELECT tablename FROM pg_tables WHERE schemaname='f
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
distributed_table | h | c
distributed_table | h | s
local_table_1 | n | t
local_table_2 | h | c
local_table_3 | n | c
local_table_4 | n | c
local_table_6 | h | c
local_table_2 | h | s
local_table_3 | n | s
local_table_4 | n | s
local_table_6 | h | s
reference_table_1 | n | t
(7 rows)
@ -662,7 +662,7 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
pg_local_1 | n | c
pg_local_1 | n | s
ref_1 | n | t
ref_2 | n | t
(3 rows)
@ -691,7 +691,7 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
pg_local_1 | n | c
pg_local_1 | n | s
ref_1 | n | t
ref_2 | n | t
(3 rows)
@ -720,7 +720,7 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
pg_local_1 | n | c
pg_local_1 | n | s
ref_1 | n | t
ref_2 | n | t
(3 rows)
@ -755,7 +755,7 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
pg_local_3 | n | c
pg_local_3 | n | s
ref_1 | n | t
(2 rows)
@ -767,10 +767,10 @@ BEGIN;
ORDER BY tablename;
tablename | partmethod | repmodel
---------------------------------------------------------------------
pg_local_1 | n | c
pg_local_2 | n | c
pg_local_3 | n | c
pg_local_4 | n | c
pg_local_1 | n | s
pg_local_2 | n | s
pg_local_3 | n | s
pg_local_4 | n | s
ref_1 | n | t
(5 rows)

View File

@ -3,7 +3,6 @@ CREATE SCHEMA insert_select_repartition;
SET search_path TO 'insert_select_repartition';
SET citus.next_shard_id TO 4213581;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
-- 4 shards, hash distributed.
-- Negate distribution column value.
SET citus.shard_count TO 4;
@ -51,8 +50,6 @@ CREATE TYPE composite_key_type AS (f1 int, f2 text);
-- source
CREATE TABLE source_table(f1 int, key composite_key_type, value int, mapped_key composite_key_type);
SELECT create_distributed_table('source_table', 'key', 'range');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
@ -69,8 +66,6 @@ INSERT INTO source_table VALUES (6, (32, 'g'), 50, (8, 'g'));
-- target
CREATE TABLE target_table(f1 int DEFAULT 0, value int, key composite_key_type PRIMARY KEY);
SELECT create_distributed_table('target_table', 'key', 'range');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
@ -824,7 +819,6 @@ UPDATE source_table SET b = NULL where b IN (9, 4);
SET citus.shard_replication_factor TO 2;
CREATE TABLE target_table(a int, b int not null);
SELECT create_distributed_table('target_table', 'a', 'range');
NOTICE: using statement-based replication
create_distributed_table
---------------------------------------------------------------------
@ -939,7 +933,6 @@ DROP TABLE source_table, target_table;
-- Range partitioned target's ranges doesn't cover the whole range
--
SET citus.shard_replication_factor TO 2;
SET citus.replication_model TO 'statement';
SET citus.shard_count TO 4;
CREATE TABLE source_table(a int, b int);
SELECT create_distributed_table('source_table', 'a');

View File

@ -832,7 +832,6 @@ DEBUG: Subplan XXX_2 will be written to local file
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
DEBUG: Subplan XXX_3 will be sent to localhost:xxxxx
-- append partitioned/heap-type
SET citus.replication_model TO statement;
-- do not print out 'building index pg_toast_xxxxx_index' messages
SET client_min_messages TO DEFAULT;
CREATE TABLE range_partitioned(range_column text, data int);

View File

@ -1,9 +1,6 @@
Parsed test spec with 2 sessions
starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content
create_distributed_table
step s1-load-cache:
TRUNCATE test_repair_placement_vs_modification;
@ -35,7 +32,7 @@ master_copy_shard_placement
step s1-update:
UPDATE test_repair_placement_vs_modification SET y = 5 WHERE x = 5;
<waiting ...>
step s2-commit:
step s2-commit:
COMMIT;
step s1-update: <... completed>
@ -58,9 +55,6 @@ nodeport success result
57638 t 5
starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content
create_distributed_table
step s1-load-cache:
TRUNCATE test_repair_placement_vs_modification;
@ -92,7 +86,7 @@ master_copy_shard_placement
step s1-delete:
DELETE FROM test_repair_placement_vs_modification WHERE x = 5;
<waiting ...>
step s2-commit:
step s2-commit:
COMMIT;
step s1-delete: <... completed>
@ -115,9 +109,6 @@ nodeport success result
57638 t
starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content
create_distributed_table
step s1-load-cache:
TRUNCATE test_repair_placement_vs_modification;
@ -146,7 +137,7 @@ master_copy_shard_placement
step s1-insert:
INSERT INTO test_repair_placement_vs_modification VALUES (5, 10);
<waiting ...>
step s2-commit:
step s2-commit:
COMMIT;
step s1-insert: <... completed>
@ -169,9 +160,6 @@ nodeport success result
57638 t 10
starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content
create_distributed_table
step s1-load-cache:
TRUNCATE test_repair_placement_vs_modification;
@ -200,7 +188,7 @@ master_copy_shard_placement
step s1-copy:
COPY test_repair_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
<waiting ...>
step s2-commit:
step s2-commit:
COMMIT;
step s1-copy: <... completed>
@ -223,9 +211,6 @@ nodeport success result
57638 t 5
starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count
create_distributed_table
step s1-load-cache:
TRUNCATE test_repair_placement_vs_modification;
@ -254,7 +239,7 @@ master_copy_shard_placement
step s1-ddl:
CREATE INDEX test_repair_placement_vs_modification_index ON test_repair_placement_vs_modification(x);
<waiting ...>
step s2-commit:
step s2-commit:
COMMIT;
step s1-ddl: <... completed>
@ -277,9 +262,6 @@ nodeport success result
57638 t 1
starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content
create_distributed_table
step s1-insert:
INSERT INTO test_repair_placement_vs_modification VALUES (5, 10);
@ -308,7 +290,7 @@ master_copy_shard_placement
step s1-update:
UPDATE test_repair_placement_vs_modification SET y = 5 WHERE x = 5;
<waiting ...>
step s2-commit:
step s2-commit:
COMMIT;
step s1-update: <... completed>
@ -331,9 +313,6 @@ nodeport success result
57638 t 5
starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content
create_distributed_table
step s1-insert:
INSERT INTO test_repair_placement_vs_modification VALUES (5, 10);
@ -362,7 +341,7 @@ master_copy_shard_placement
step s1-delete:
DELETE FROM test_repair_placement_vs_modification WHERE x = 5;
<waiting ...>
step s2-commit:
step s2-commit:
COMMIT;
step s1-delete: <... completed>
@ -385,9 +364,6 @@ nodeport success result
57638 t
starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
@ -413,7 +389,7 @@ master_copy_shard_placement
step s1-insert:
INSERT INTO test_repair_placement_vs_modification VALUES (5, 10);
<waiting ...>
step s2-commit:
step s2-commit:
COMMIT;
step s1-insert: <... completed>
@ -436,9 +412,6 @@ nodeport success result
57638 t 10
starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
@ -464,7 +437,7 @@ master_copy_shard_placement
step s1-copy:
COPY test_repair_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
<waiting ...>
step s2-commit:
step s2-commit:
COMMIT;
step s1-copy: <... completed>
@ -487,9 +460,6 @@ nodeport success result
57638 t 5
starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
@ -515,7 +485,7 @@ master_copy_shard_placement
step s1-ddl:
CREATE INDEX test_repair_placement_vs_modification_index ON test_repair_placement_vs_modification(x);
<waiting ...>
step s2-commit:
step s2-commit:
COMMIT;
step s1-ddl: <... completed>
@ -536,367 +506,3 @@ nodeport success result
57637 t 1
57638 t 1
57638 t 1
starting permutation: s1-begin s2-begin s2-copy-placement s1-update-copy-table s2-commit s1-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
master_copy_shard_placement
step s1-update-copy-table:
UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5;
<waiting ...>
step s2-commit:
COMMIT;
step s1-update-copy-table: <... completed>
step s1-commit:
COMMIT;
starting permutation: s1-begin s2-begin s2-copy-placement s1-delete-copy-table s2-commit s1-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
master_copy_shard_placement
step s1-delete-copy-table:
DELETE FROM test_copy_placement_vs_modification WHERE x = 5;
<waiting ...>
step s2-commit:
COMMIT;
step s1-delete-copy-table: <... completed>
step s1-commit:
COMMIT;
starting permutation: s1-begin s2-begin s2-copy-placement s1-insert-copy-table s2-commit s1-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
master_copy_shard_placement
step s1-insert-copy-table:
INSERT INTO test_copy_placement_vs_modification VALUES (5, 10);
<waiting ...>
step s2-commit:
COMMIT;
step s1-insert-copy-table: <... completed>
step s1-commit:
COMMIT;
starting permutation: s1-begin s2-begin s2-copy-placement s1-copy-copy-table s2-commit s1-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
master_copy_shard_placement
step s1-copy-copy-table:
COPY test_copy_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
<waiting ...>
step s2-commit:
COMMIT;
step s1-copy-copy-table: <... completed>
step s1-commit:
COMMIT;
starting permutation: s1-begin s2-begin s2-copy-placement s1-ddl-copy-table s2-commit s1-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
master_copy_shard_placement
step s1-ddl-copy-table:
CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x);
<waiting ...>
step s2-commit:
COMMIT;
step s1-ddl-copy-table: <... completed>
step s1-commit:
COMMIT;
starting permutation: s1-begin s2-begin s2-copy-placement s1-select-copy-table s2-commit s1-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
master_copy_shard_placement
step s1-select-copy-table:
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
count
0
step s2-commit:
COMMIT;
step s1-commit:
COMMIT;
starting permutation: s1-begin s2-begin s1-update-copy-table s2-copy-placement s1-commit s2-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s1-update-copy-table:
UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5;
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
<waiting ...>
step s1-commit:
COMMIT;
step s2-copy-placement: <... completed>
master_copy_shard_placement
step s2-commit:
COMMIT;
starting permutation: s1-begin s2-begin s1-delete-copy-table s2-copy-placement s1-commit s2-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s1-delete-copy-table:
DELETE FROM test_copy_placement_vs_modification WHERE x = 5;
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
<waiting ...>
step s1-commit:
COMMIT;
step s2-copy-placement: <... completed>
master_copy_shard_placement
step s2-commit:
COMMIT;
starting permutation: s1-begin s2-begin s1-insert-copy-table s2-copy-placement s1-commit s2-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s1-insert-copy-table:
INSERT INTO test_copy_placement_vs_modification VALUES (5, 10);
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
<waiting ...>
step s1-commit:
COMMIT;
step s2-copy-placement: <... completed>
master_copy_shard_placement
step s2-commit:
COMMIT;
starting permutation: s1-begin s2-begin s1-copy-copy-table s2-copy-placement s1-commit s2-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s1-copy-copy-table:
COPY test_copy_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
<waiting ...>
step s1-commit:
COMMIT;
step s2-copy-placement: <... completed>
master_copy_shard_placement
step s2-commit:
COMMIT;
starting permutation: s1-begin s2-begin s1-ddl-copy-table s2-copy-placement s1-commit s2-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s1-ddl-copy-table:
CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x);
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
<waiting ...>
step s1-commit:
COMMIT;
step s2-copy-placement: <... completed>
master_copy_shard_placement
step s2-commit:
COMMIT;
starting permutation: s1-begin s2-begin s1-select-copy-table s2-copy-placement s1-commit s2-commit
create_distributed_table
step s1-begin:
BEGIN;
SET LOCAL citus.select_opens_transaction_block TO off;
step s2-begin:
BEGIN;
step s1-select-copy-table:
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
count
0
step s2-copy-placement:
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
master_copy_shard_placement
step s1-commit:
COMMIT;
step s2-commit:
COMMIT;

View File

@ -5,21 +5,21 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-create-distributed:
CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text);
SELECT create_distributed_table('test_create_distributed_table', 'test_id');
CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text);
SELECT create_distributed_table('test_create_distributed_table', 'test_id');
create_distributed_table
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;
step s1-commit:
COMMIT;
step s2-create-restore: <... completed>
?column?
@ -31,20 +31,20 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-insert:
INSERT INTO restore_table VALUES (1,'hello');
INSERT INTO restore_table VALUES (1,'hello');
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-commit:
COMMIT;
COMMIT;
starting permutation: s1-begin s1-modify-multiple s2-create-restore s1-commit
@ -52,20 +52,20 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-modify-multiple:
UPDATE restore_table SET data = 'world';
UPDATE restore_table SET data = 'world';
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-commit:
COMMIT;
COMMIT;
starting permutation: s1-begin s1-ddl s2-create-restore s1-commit
@ -73,41 +73,42 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-ddl:
ALTER TABLE restore_table ADD COLUMN x int;
ALTER TABLE restore_table ADD COLUMN x int;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;
step s2-create-restore: <... completed>
?column?
1
step s1-commit:
COMMIT;
starting permutation: s1-begin s1-copy s2-create-restore s1-commit
create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-copy:
COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV;
COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-commit:
COMMIT;
COMMIT;
starting permutation: s1-begin s1-recover s2-create-restore s1-commit
@ -115,20 +116,20 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-recover:
SELECT recover_prepared_transactions();
SELECT recover_prepared_transactions();
recover_prepared_transactions
0
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;
step s1-commit:
COMMIT;
step s2-create-restore: <... completed>
?column?
@ -140,17 +141,17 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-drop:
DROP TABLE restore_table;
DROP TABLE restore_table;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;
step s1-commit:
COMMIT;
step s2-create-restore: <... completed>
?column?
@ -162,20 +163,20 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-add-node:
SELECT 1 FROM master_add_inactive_node('localhost', 9999);
SELECT 1 FROM master_add_inactive_node('localhost', 9999);
?column?
1
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;
step s1-commit:
COMMIT;
step s2-create-restore: <... completed>
?column?
@ -187,20 +188,20 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-remove-node:
SELECT master_remove_node('localhost', 9999);
SELECT master_remove_node('localhost', 9999);
master_remove_node
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;
step s1-commit:
COMMIT;
step s2-create-restore: <... completed>
?column?
@ -212,20 +213,20 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test-2');
SELECT 1 FROM citus_create_restore_point('citus-test-2');
?column?
1
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;
step s1-commit:
COMMIT;
step s2-create-restore: <... completed>
?column?
@ -237,19 +238,19 @@ create_reference_table
step s2-begin:
BEGIN;
BEGIN;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-modify-multiple:
UPDATE restore_table SET data = 'world';
UPDATE restore_table SET data = 'world';
<waiting ...>
step s2-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-modify-multiple: <... completed>
@ -258,19 +259,19 @@ create_reference_table
step s2-begin:
BEGIN;
BEGIN;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-ddl:
ALTER TABLE restore_table ADD COLUMN x int;
ALTER TABLE restore_table ADD COLUMN x int;
<waiting ...>
step s2-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-ddl: <... completed>
@ -279,23 +280,23 @@ create_reference_table
step s2-begin:
BEGIN;
BEGIN;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-multi-statement:
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
INSERT INTO restore_table VALUES (1,'hello');
INSERT INTO restore_table VALUES (2,'hello');
COMMIT;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
INSERT INTO restore_table VALUES (1,'hello');
INSERT INTO restore_table VALUES (2,'hello');
COMMIT;
<waiting ...>
step s2-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-multi-statement: <... completed>
@ -304,21 +305,21 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-create-reference:
CREATE TABLE test_create_reference_table (test_id integer NOT NULL, data text);
SELECT create_reference_table('test_create_reference_table');
CREATE TABLE test_create_reference_table (test_id integer NOT NULL, data text);
SELECT create_reference_table('test_create_reference_table');
create_reference_table
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;
step s1-commit:
COMMIT;
step s2-create-restore: <... completed>
?column?
@ -330,20 +331,20 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-insert-ref:
INSERT INTO restore_ref_table VALUES (1,'hello');
INSERT INTO restore_ref_table VALUES (1,'hello');
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-commit:
COMMIT;
COMMIT;
starting permutation: s1-begin s1-modify-multiple-ref s2-create-restore s1-commit
@ -351,20 +352,20 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-modify-multiple-ref:
UPDATE restore_ref_table SET data = 'world';
UPDATE restore_ref_table SET data = 'world';
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-commit:
COMMIT;
COMMIT;
starting permutation: s1-begin s1-ddl-ref s2-create-restore s1-commit
@ -372,17 +373,17 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-ddl-ref:
ALTER TABLE restore_ref_table ADD COLUMN x int;
ALTER TABLE restore_ref_table ADD COLUMN x int;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;
step s1-commit:
COMMIT;
step s2-create-restore: <... completed>
?column?
@ -394,20 +395,20 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-copy-ref:
COPY restore_ref_table FROM PROGRAM 'echo 1,hello' WITH CSV;
COPY restore_ref_table FROM PROGRAM 'echo 1,hello' WITH CSV;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-commit:
COMMIT;
COMMIT;
starting permutation: s1-begin s1-drop-ref s2-create-restore s1-commit
@ -415,17 +416,17 @@ create_reference_table
step s1-begin:
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
SET citus.multi_shard_commit_protocol TO '2pc';
step s1-drop-ref:
DROP TABLE restore_ref_table;
DROP TABLE restore_ref_table;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;
step s1-commit:
COMMIT;
step s2-create-restore: <... completed>
?column?
@ -437,19 +438,19 @@ create_reference_table
step s2-begin:
BEGIN;
BEGIN;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-modify-multiple-ref:
UPDATE restore_ref_table SET data = 'world';
UPDATE restore_ref_table SET data = 'world';
<waiting ...>
step s2-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-modify-multiple-ref: <... completed>
@ -458,19 +459,19 @@ create_reference_table
step s2-begin:
BEGIN;
BEGIN;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-ddl-ref:
ALTER TABLE restore_ref_table ADD COLUMN x int;
ALTER TABLE restore_ref_table ADD COLUMN x int;
<waiting ...>
step s2-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-ddl-ref: <... completed>
@ -479,22 +480,22 @@ create_reference_table
step s2-begin:
BEGIN;
BEGIN;
step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
SELECT 1 FROM citus_create_restore_point('citus-test');
?column?
1
step s1-multi-statement-ref:
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
INSERT INTO restore_ref_table VALUES (1,'hello');
INSERT INTO restore_ref_table VALUES (2,'hello');
COMMIT;
SET citus.multi_shard_commit_protocol TO '2pc';
BEGIN;
INSERT INTO restore_ref_table VALUES (1,'hello');
INSERT INTO restore_ref_table VALUES (2,'hello');
COMMIT;
<waiting ...>
step s2-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-multi-statement-ref: <... completed>

View File

@ -12,7 +12,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -55,7 +55,7 @@ step s1-begin:
BEGIN;
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -64,13 +64,12 @@ step s2-public-schema:
SET search_path TO public;
step s2-create-table:
CREATE TABLE t1 (a int, b int);
CREATE TABLE t1 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
<waiting ...>
step s1-commit:
step s1-commit:
COMMIT;
step s2-create-table: <... completed>
@ -84,7 +83,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -133,7 +132,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -176,10 +175,10 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -188,13 +187,12 @@ step s2-public-schema:
SET search_path TO public;
step s2-create-table:
CREATE TABLE t1 (a int, b int);
CREATE TABLE t1 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
<waiting ...>
step s1-commit:
step s1-commit:
COMMIT;
step s2-create-table: <... completed>
@ -202,7 +200,7 @@ create_distributed_table
step s2-commit:
COMMIT;
COMMIT;
step s2-print-distributed-objects:
-- print an overview of all distributed objects
@ -211,7 +209,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -260,7 +258,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -303,26 +301,25 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s2-public-schema:
SET search_path TO public;
step s2-create-table:
CREATE TABLE t1 (a int, b int);
CREATE TABLE t1 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
create_distributed_table
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-add-worker: <... completed>
?column?
@ -338,7 +335,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -387,7 +384,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -430,7 +427,7 @@ step s1-begin:
BEGIN;
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -440,13 +437,12 @@ step s2-create-schema:
SET search_path TO myschema;
step s2-create-table:
CREATE TABLE t1 (a int, b int);
CREATE TABLE t1 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
<waiting ...>
step s1-commit:
step s1-commit:
COMMIT;
step s2-create-table: <... completed>
@ -460,7 +456,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -510,7 +506,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -553,10 +549,10 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -566,13 +562,12 @@ step s2-create-schema:
SET search_path TO myschema;
step s2-create-table:
CREATE TABLE t1 (a int, b int);
CREATE TABLE t1 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
<waiting ...>
step s1-commit:
step s1-commit:
COMMIT;
step s2-create-table: <... completed>
@ -580,7 +575,7 @@ create_distributed_table
step s2-commit:
COMMIT;
COMMIT;
step s2-print-distributed-objects:
-- print an overview of all distributed objects
@ -589,7 +584,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -639,7 +634,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -682,27 +677,26 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s2-create-schema:
CREATE SCHEMA myschema;
SET search_path TO myschema;
step s2-create-table:
CREATE TABLE t1 (a int, b int);
CREATE TABLE t1 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
create_distributed_table
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-add-worker: <... completed>
?column?
@ -718,7 +712,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -768,7 +762,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -815,34 +809,33 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s3-begin:
BEGIN;
BEGIN;
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
1
step s2-create-table:
CREATE TABLE t1 (a int, b int);
CREATE TABLE t1 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
<waiting ...>
step s3-use-schema:
step s3-use-schema:
SET search_path TO myschema;
step s3-create-table:
CREATE TABLE t2 (a int, b int);
CREATE TABLE t2 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t2', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t2', 'a');
<waiting ...>
step s1-commit:
step s1-commit:
COMMIT;
step s2-create-table: <... completed>
@ -850,14 +843,14 @@ create_distributed_table
step s2-commit:
COMMIT;
COMMIT;
step s3-create-table: <... completed>
create_distributed_table
step s3-commit:
COMMIT;
COMMIT;
step s2-print-distributed-objects:
-- print an overview of all distributed objects
@ -866,7 +859,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -916,7 +909,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -956,7 +949,7 @@ master_remove_node
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -966,39 +959,38 @@ step s2-create-schema:
SET search_path TO myschema;
step s2-begin:
BEGIN;
BEGIN;
step s3-begin:
BEGIN;
BEGIN;
step s3-use-schema:
SET search_path TO myschema;
step s2-create-table:
CREATE TABLE t1 (a int, b int);
CREATE TABLE t1 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
create_distributed_table
step s3-create-table:
CREATE TABLE t2 (a int, b int);
CREATE TABLE t2 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t2', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t2', 'a');
<waiting ...>
step s2-commit:
COMMIT;
step s2-commit:
COMMIT;
step s3-create-table: <... completed>
create_distributed_table
step s3-commit:
COMMIT;
COMMIT;
step s2-print-distributed-objects:
-- print an overview of all distributed objects
@ -1007,7 +999,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1057,7 +1049,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1100,13 +1092,13 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s3-begin:
BEGIN;
BEGIN;
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -1120,19 +1112,18 @@ step s3-create-schema2:
SET search_path TO myschema2;
step s2-create-table:
CREATE TABLE t1 (a int, b int);
CREATE TABLE t1 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
<waiting ...>
step s3-create-table:
CREATE TABLE t2 (a int, b int);
step s3-create-table:
CREATE TABLE t2 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t2', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t2', 'a');
<waiting ...>
step s1-commit:
step s1-commit:
COMMIT;
step s2-create-table: <... completed>
@ -1144,10 +1135,10 @@ create_distributed_table
step s3-commit:
COMMIT;
COMMIT;
step s2-commit:
COMMIT;
COMMIT;
step s2-print-distributed-objects:
-- print an overview of all distributed objects
@ -1156,7 +1147,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1207,7 +1198,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1250,7 +1241,7 @@ step s1-begin:
BEGIN;
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -1259,9 +1250,9 @@ step s2-public-schema:
SET search_path TO public;
step s2-create-type:
CREATE TYPE tt1 AS (a int, b int);
CREATE TYPE tt1 AS (a int, b int);
<waiting ...>
step s1-commit:
step s1-commit:
COMMIT;
step s2-create-type: <... completed>
@ -1272,7 +1263,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1322,7 +1313,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1368,10 +1359,10 @@ step s2-public-schema:
SET search_path TO public;
step s2-create-type:
CREATE TYPE tt1 AS (a int, b int);
CREATE TYPE tt1 AS (a int, b int);
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -1386,7 +1377,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1436,7 +1427,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1479,30 +1470,29 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s2-create-schema:
CREATE SCHEMA myschema;
SET search_path TO myschema;
step s2-create-type:
CREATE TYPE tt1 AS (a int, b int);
CREATE TYPE tt1 AS (a int, b int);
step s2-create-table-with-type:
CREATE TABLE t1 (a int, b tt1);
CREATE TABLE t1 (a int, b tt1);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
create_distributed_table
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
<waiting ...>
step s2-commit:
COMMIT;
step s2-commit:
COMMIT;
step s1-add-worker: <... completed>
?column?
@ -1518,7 +1508,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1569,7 +1559,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1612,7 +1602,7 @@ step s1-begin:
BEGIN;
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -1624,7 +1614,7 @@ step s2-distribute-function:
CREATE OR REPLACE FUNCTION add (INT,INT) RETURNS INT AS $$ SELECT $1 + $2 $$ LANGUAGE SQL;
SELECT create_distributed_function('add(INT,INT)', '$1');
<waiting ...>
step s1-commit:
step s1-commit:
COMMIT;
step s2-distribute-function: <... completed>
@ -1632,10 +1622,10 @@ create_distributed_function
step s2-begin:
BEGIN;
BEGIN;
step s2-commit:
COMMIT;
COMMIT;
step s3-wait-for-metadata-sync:
SELECT public.wait_until_metadata_sync(5000);
@ -1650,7 +1640,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1700,7 +1690,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1753,10 +1743,10 @@ create_distributed_function
step s2-begin:
BEGIN;
BEGIN;
step s2-commit:
COMMIT;
COMMIT;
step s3-wait-for-metadata-sync:
SELECT public.wait_until_metadata_sync(5000);
@ -1765,7 +1755,7 @@ wait_until_metadata_sync
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -1786,7 +1776,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1836,7 +1826,7 @@ step s1-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';
@ -1876,7 +1866,7 @@ master_remove_node
step s2-begin:
BEGIN;
BEGIN;
step s2-create-schema:
CREATE SCHEMA myschema;
@ -1890,7 +1880,7 @@ create_distributed_function
step s2-commit:
COMMIT;
COMMIT;
step s3-wait-for-metadata-sync:
SELECT public.wait_until_metadata_sync(5000);
@ -1902,7 +1892,7 @@ step s1-begin:
BEGIN;
step s1-add-worker:
SELECT 1 FROM master_add_node('localhost', 57638);
SELECT 1 FROM master_add_node('localhost', 57638);
?column?
@ -1923,7 +1913,7 @@ step s2-print-distributed-objects:
SELECT count(*) FROM pg_namespace where nspname = 'myschema';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_namespace where nspname = 'myschema';$$);
-- print if the type has been created
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT count(*) FROM pg_type where typname = 'tt1';
SELECT run_command_on_workers($$SELECT count(*) FROM pg_type where typname = 'tt1';$$);
-- print if the function has been created
SELECT count(*) FROM pg_proc WHERE proname='add';

View File

@ -81,11 +81,12 @@ run_commands_on_session_level_connection_to_node
run_commands_on_session_level_connection_to_node
s1: WARNING: canceling statement due to lock timeout
s1: WARNING: Failed to drop 1 old shards out of 1
step s1-drop-marked-shards:
SELECT public.master_defer_delete_shards();
<waiting ...>
s1: WARNING: canceling statement due to lock timeout
s1: WARNING: Failed to drop 1 old shards out of 1
step s1-drop-marked-shards: <... completed>
master_defer_delete_shards
0

View File

@ -11,7 +11,6 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupid := 0);
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE reference_table (key int PRIMARY KEY);
DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "reference_table_pkey" for table "reference_table"
SELECT create_reference_table('reference_table');

View File

@ -2,7 +2,6 @@ CREATE SCHEMA local_shard_execution;
SET search_path TO local_shard_execution;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SET citus.next_shard_id TO 1470000;
CREATE TABLE reference_table (key int PRIMARY KEY);
SELECT create_reference_table('reference_table');
@ -1799,7 +1798,6 @@ RESET citus.log_local_commands;
\c - - - :master_port
SET citus.next_shard_id TO 1480000;
-- test both local and remote execution with custom type
SET citus.replication_model TO "streaming";
SET citus.shard_replication_factor TO 1;
CREATE TYPE invite_resp AS ENUM ('yes', 'no', 'maybe');
CREATE TABLE event_responses (

View File

@ -5,7 +5,6 @@ SET citus.log_local_commands TO TRUE;
SET citus.shard_count TO 4;
SET citus.next_shard_id TO 1580000;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE table_1 (key int, value text);
SELECT create_distributed_table('table_1', 'key');
create_distributed_table

View File

@ -3,7 +3,6 @@ CREATE SCHEMA mcsp;
SET search_path TO mcsp;
SET citus.next_shard_id TO 8139000;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'statement';
CREATE TABLE ref_table(a int, b text unique);
SELECT create_reference_table('ref_table');
create_reference_table
@ -36,6 +35,9 @@ SELECT create_distributed_table('history','key');
(1 row)
-- Mark tables as non-mx tables, in order to be able to test master_copy_shard_placement
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
('data'::regclass, 'history'::regclass);
INSERT INTO data VALUES ('key-1', 'value-1');
INSERT INTO data VALUES ('key-2', 'value-2');
INSERT INTO history VALUES ('key-1', '2020-02-01', 'old');
@ -107,7 +109,13 @@ SELECT count(*) FROM history;
-- test we can not replicate MX tables
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
-- metadata sync will fail as we have a statement replicated table
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
ERROR: relation "mcsp.history" does not exist
CONTEXT: while executing command on localhost:xxxxx
-- use streaming replication to enable metadata syncing
UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid IN
('history'::regclass);
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------

View File

@ -64,6 +64,7 @@ CREATE VIEW view_on_part_dist AS SELECT * FROM partitioned_distributed_table;
CREATE MATERIALIZED VIEW mat_view_on_part_dist AS SELECT * FROM partitioned_distributed_table;
CREATE FOREIGN TABLE foreign_distributed_table (a int, b int) SERVER fake_fdw_server;
SELECT create_distributed_table('foreign_distributed_table', 'a');
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
create_distributed_table
---------------------------------------------------------------------

View File

@ -81,12 +81,18 @@ SELECT master_get_active_worker_nodes();
-- add some shard placements to the cluster
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;
-- test warnings on setting the deprecated guc for replication model
BEGIN;
SET citus.replication_model to 'statement';
NOTICE: Setting citus.replication_model has no effect. Please use citus.shard_replication_factor instead.
DETAIL: Citus determines the replication model based on the replication factor and the replication models of the colocated shards. If a colocated table is present, the replication model is inherited. Otherwise 'streaming' replication is preferred if supported by the replication factor.
ROLLBACK;
SELECT * FROM citus_activate_node('localhost', :worker_2_port);
WARNING: citus.enable_object_propagation is off, not creating distributed objects on worker
DETAIL: distributed objects are only kept in sync when citus.enable_object_propagation is set to on. Newly activated nodes will not get these objects created
citus_activate_node
---------------------------------------------------------------------
3
3
(1 row)
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
@ -260,6 +266,7 @@ ABORT;
\c - postgres - :master_port
SET citus.next_shard_id TO 1220016;
SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup
SET citus.shard_replication_factor TO 1;
SELECT master_get_active_worker_nodes();
master_get_active_worker_nodes
---------------------------------------------------------------------
@ -322,7 +329,16 @@ SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHER
(8 rows)
CREATE TABLE cluster_management_test_colocated (col_1 text, col_2 int);
-- Check that we warn the user about colocated shards that will not get created for shards that do not have active placements
SELECT create_distributed_table('cluster_management_test_colocated', 'col_1', 'hash', colocate_with=>'cluster_management_test');
WARNING: could not find any shard placements for shardId 1220017
WARNING: could not find any shard placements for shardId 1220019
WARNING: could not find any shard placements for shardId 1220021
WARNING: could not find any shard placements for shardId 1220023
WARNING: could not find any shard placements for shardId 1220025
WARNING: could not find any shard placements for shardId 1220027
WARNING: could not find any shard placements for shardId 1220029
WARNING: could not find any shard placements for shardId 1220031
create_distributed_table
---------------------------------------------------------------------

View File

@ -1037,10 +1037,13 @@ SELECT create_distributed_table('table1_groupG', 'id');
UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid = 'table1_groupG'::regclass;
CREATE TABLE table2_groupG ( id int );
SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'table1_groupG');
ERROR: cannot colocate tables table1_groupg and table2_groupg
DETAIL: Replication models don't match for table1_groupg and table2_groupg.
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE table2_groupG;
CREATE TABLE table2_groupG ( id int );
ERROR: relation "table2_groupg" already exists
SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'NONE');
create_distributed_table
---------------------------------------------------------------------

View File

@ -68,7 +68,7 @@ SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash');
SELECT partmethod, partkey FROM pg_dist_partition
WHERE logicalrelid = 'table_to_distribute'::regclass;
partmethod | partkey
partmethod | partkey
---------------------------------------------------------------------
h | {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1}
(1 row)
@ -159,6 +159,7 @@ SERVER fake_fdw_server;
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('foreign_table_to_distribute', 'id', 'hash');
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
create_distributed_table
---------------------------------------------------------------------

View File

@ -1,60 +1,9 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360005;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100000;
-- Since we're superuser, we can set the replication model to 'streaming' to
-- create a one-off MX table... but if we forget to set the replication factor to one,
-- we should see an error reminding us to fix that
SET citus.replication_model TO 'streaming';
SELECT create_distributed_table('mx_table_test', 'col1');
ERROR: replication factors above one are incompatible with the streaming replication model
HINT: Try again after reducing "citus.shard_replication_factor" to one or setting "citus.replication_model" to "statement".
-- ok, so now actually create the one-off MX table
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('mx_table_test', 'col1');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass;
repmodel
---------------------------------------------------------------------
s
(1 row)
DROP TABLE mx_table_test;
-- Show that master_create_distributed_table ignores citus.replication_model GUC
CREATE TABLE s_table(a int);
SELECT master_create_distributed_table('s_table', 'a', 'hash');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='s_table'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
-- Show that master_create_worker_shards complains when RF>1 and replication model is streaming
UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid='s_table'::regclass;
SELECT master_create_worker_shards('s_table', 4, 2);
ERROR: using replication factor 2 with the streaming replication model is not supported
DETAIL: The table s_table is marked as streaming replicated and the shard replication factor of streaming replicated tables must be 1.
HINT: Use replication factor 1.
DROP TABLE s_table;
RESET citus.replication_model;
-- Show that create_distributed_table with append and range distributions ignore
-- citus.replication_model GUC
SET citus.shard_replication_factor TO 2;
SET citus.replication_model TO streaming;
-- test that range and append distributed tables have coordinator replication
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
@ -69,8 +18,6 @@ SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regcl
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
@ -83,13 +30,9 @@ SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regcl
(1 row)
DROP TABLE repmodel_test;
-- Show that master_create_distributed_table created statement replicated tables no matter
-- what citus.replication_model set to
-- test that deprecated api creates distributed tables with coordinator replication
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
@ -104,9 +47,6 @@ SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regcl
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
@ -121,9 +61,6 @@ SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regcl
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
@ -136,92 +73,7 @@ SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regcl
(1 row)
DROP TABLE repmodel_test;
-- Check that the replication_model overwrite behavior is the same with RF=1
SET citus.shard_replication_factor TO 1;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: Streaming replication is supported only for hash-distributed tables.
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'hash');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'append');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
CREATE TABLE repmodel_test (a int);
SELECT master_create_distributed_table('repmodel_test', 'a', 'range');
NOTICE: using statement-based replication
DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table.
HINT: Use create_distributed_table to use the streaming replication model.
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass;
repmodel
---------------------------------------------------------------------
c
(1 row)
DROP TABLE repmodel_test;
RESET citus.replication_model;
RESET citus.shard_replication_factor;
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360025;
-- There should be no table on the worker node
\c - - :public_worker_1_host :worker_1_port
@ -339,7 +191,7 @@ SELECT create_distributed_table('unlogged_table', 'key');
(1 row)
SELECT * FROM master_get_table_ddl_events('unlogged_table');
master_get_table_ddl_events
master_get_table_ddl_events
---------------------------------------------------------------------
CREATE UNLOGGED TABLE public.unlogged_table (key text, value text)
ALTER TABLE public.unlogged_table OWNER TO postgres

View File

@ -12,7 +12,6 @@
CREATE SCHEMA functions_in_joins;
SET search_path TO 'functions_in_joins';
SET citus.next_shard_id TO 2500000;
SET citus.replication_model to 'streaming';
SET citus.shard_replication_factor to 1;
CREATE TABLE table1 (id int, data int);
SELECT create_distributed_table('table1','id');

View File

@ -12,7 +12,6 @@
CREATE SCHEMA functions_in_joins;
SET search_path TO 'functions_in_joins';
SET citus.next_shard_id TO 2500000;
SET citus.replication_model to 'streaming';
SET citus.shard_replication_factor to 1;
CREATE TABLE table1 (id int, data int);
SELECT create_distributed_table('table1','id');

View File

@ -332,7 +332,6 @@ SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table':
-- Since we're superuser, we can set the replication model to 'streaming' to
-- create some MX tables
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE SCHEMA mx_testing_schema_2;
CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE(col1, col3));
CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text,
@ -358,7 +357,7 @@ SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
-- Check that foreign key metadata exists on the worker
\c - - - :worker_1_port
SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass;
Constraint | Definition
Constraint | Definition
---------------------------------------------------------------------
fk_test_2_col1_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3)
(1 row)
@ -367,7 +366,6 @@ SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schem
DROP TABLE mx_testing_schema_2.fk_test_2;
DROP TABLE mx_testing_schema.fk_test_1;
RESET citus.shard_replication_factor;
RESET citus.replication_model;
-- Check that repeated calls to start_metadata_sync_to_node has no side effects
\c - - - :master_port
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
@ -473,7 +471,6 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port;
-- Check that the distributed table can be queried from the worker
\c - - - :master_port
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
@ -564,7 +561,6 @@ CREATE SCHEMA mx_test_schema_1;
CREATE SCHEMA mx_test_schema_2;
-- Create MX tables
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE mx_test_schema_1.mx_table_1 (col1 int UNIQUE, col2 text);
CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1);
CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text);
@ -814,7 +810,6 @@ SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gse
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000;
SET citus.shard_count TO 7;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE mx_colocation_test_1 (a int);
SELECT create_distributed_table('mx_colocation_test_1', 'a');
create_distributed_table
@ -907,7 +902,6 @@ DROP TABLE mx_colocation_test_2;
\c - - - :master_port
SET citus.shard_count TO 7;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE mx_temp_drop_test (a int);
SELECT create_distributed_table('mx_temp_drop_test', 'a');
create_distributed_table
@ -940,7 +934,6 @@ DROP TABLE mx_temp_drop_test;
\c - - - :master_port
SET citus.shard_count TO 3;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
stop_metadata_sync_to_node
---------------------------------------------------------------------
@ -981,7 +974,6 @@ INSERT INTO mx_table_with_small_sequence VALUES (0);
INSERT INTO mx_table_with_small_sequence VALUES (1), (3);
\c - - - :master_port
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
-- Create an MX table with (BIGSERIAL) sequences
CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL);
SELECT create_distributed_table('mx_table_with_sequence', 'a');
@ -1203,7 +1195,6 @@ HINT: Connect to worker nodes directly to manually create all necessary users a
-- Create an mx table as a different user
CREATE TABLE mx_table (a int, b BIGSERIAL);
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SELECT create_distributed_table('mx_table', 'a');
create_distributed_table
---------------------------------------------------------------------
@ -1540,7 +1531,6 @@ SELECT pg_reload_conf();
t
(1 row)
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
CREATE TABLE dist_table_1(a int);
SELECT create_distributed_table('dist_table_1', 'a');
@ -1627,7 +1617,6 @@ DROP TABLE mx_ref;
DROP TABLE dist_table_1, dist_table_2;
RESET citus.shard_count;
RESET citus.shard_replication_factor;
RESET citus.replication_model;
RESET citus.multi_shard_commit_protocol;
ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id;
ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id;

View File

@ -11,7 +11,6 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port);
-- Create mx test tables
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE mx_table_1 (a int);
SELECT create_distributed_table('mx_table_1', 'a');
create_distributed_table
@ -232,5 +231,3 @@ DELETE FROM pg_dist_node;
DELETE FROM pg_dist_partition;
DELETE FROM pg_dist_shard;
DELETE FROM pg_dist_shard_placement;
\c - - - :master_port
RESET citus.replication_model;

View File

@ -108,7 +108,6 @@ GRANT USAGE ON SCHEMA full_access_user_schema TO full_access;
GRANT ALL ON SCHEMA full_access_user_schema TO full_access;
GRANT USAGE ON SCHEMA full_access_user_schema TO usage_access;
\c - - - :master_port
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
-- create prepare tests
PREPARE prepare_insert AS INSERT INTO test VALUES ($1);

View File

@ -4,7 +4,6 @@ SET citus.shard_replication_factor TO 1;
SET citus.shard_count TO 8;
SET citus.next_shard_id TO 7000000;
SET citus.next_placement_id TO 7000000;
SET citus.replication_model TO streaming;
SET client_min_messages TO WARNING;
CREATE USER reprefuser WITH LOGIN;
SELECT run_command_on_workers('CREATE USER reprefuser WITH LOGIN');

View File

@ -2,7 +2,6 @@ CREATE SCHEMA mx_alter_distributed_table;
SET search_path TO mx_alter_distributed_table;
SET citus.shard_replication_factor TO 1;
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1410000;
SET citus.replication_model TO 'streaming';
-- test alter_distributed_table UDF
CREATE TABLE adt_table (a INT, b INT);
CREATE TABLE adt_col (a INT UNIQUE, b INT);

View File

@ -3,7 +3,6 @@ create schema multi_mx_call;
set search_path to multi_mx_call, public;
-- Create worker-local tables to test procedure calls were routed
set citus.shard_replication_factor to 2;
set citus.replication_model to 'statement';
-- This table requires specific settings, create before getting into things
create table mx_call_dist_table_replica(id int, val int);
select create_distributed_table('mx_call_dist_table_replica', 'id');
@ -14,7 +13,6 @@ select create_distributed_table('mx_call_dist_table_replica', 'id');
insert into mx_call_dist_table_replica values (9,1),(8,2),(7,3),(6,4),(5,5);
set citus.shard_replication_factor to 1;
set citus.replication_model to 'streaming';
--
-- Create tables and procedures we want to use in tests
--

View File

@ -140,7 +140,6 @@ CREATE OPERATOR citus_mx_test_schema.=== (
-- connect back to the master, and do some more tests
\c - - - :master_port
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO streaming;
SET search_path TO public;
CREATE TABLE nation_hash(
n_nationkey integer not null,

View File

@ -219,7 +219,6 @@ DROP INDEX ddl_test_index;
-- show that sequences owned by mx tables result in unique values
SET citus.shard_replication_factor TO 1;
SET citus.shard_count TO 4;
SET citus.replication_model TO streaming;
CREATE TABLE mx_sequence(key INT, value BIGSERIAL);
SELECT create_distributed_table('mx_sequence', 'key');
create_distributed_table
@ -267,7 +266,6 @@ HINT: You can add/drop the member objects on the workers as well.
-- sync table metadata, but skip CREATE TABLE
SET citus.shard_replication_factor TO 1;
SET citus.shard_count TO 4;
SET citus.replication_model TO streaming;
SELECT create_distributed_table('seg_test', 'x');
NOTICE: Copying data from local table...
NOTICE: copying the data has completed

View File

@ -2,7 +2,6 @@
CREATE SCHEMA multi_mx_function_call_delegation;
SET search_path TO multi_mx_function_call_delegation, public;
SET citus.shard_replication_factor TO 2;
SET citus.replication_model TO 'statement';
-- This table requires specific settings, create before getting into things
create table mx_call_dist_table_replica(id int, val int);
select create_distributed_table('mx_call_dist_table_replica', 'id');
@ -13,7 +12,6 @@ select create_distributed_table('mx_call_dist_table_replica', 'id');
insert into mx_call_dist_table_replica values (9,1),(8,2),(7,3),(6,4),(5,5);
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
--
-- Create tables and functions we want to use in tests
--
@ -544,7 +542,7 @@ select start_metadata_sync_to_node('localhost', :worker_2_port);
\c - - - :master_port
SET search_path to multi_mx_function_call_delegation, public;
SET client_min_messages TO DEBUG1;
SET citus.replication_model = 'streaming';
SET citus.shard_replication_factor = 1;
--
-- Test non-const parameter values
--

View File

@ -9,7 +9,6 @@ CREATE SCHEMA function_table_reference;
SET search_path TO function_table_reference;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO streaming;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------

View File

@ -24,7 +24,6 @@ CREATE SCHEMA mx_hide_shard_names;
SET search_path TO 'mx_hide_shard_names';
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
@ -139,7 +138,6 @@ SELECT pg_table_is_visible('test_table_1130000'::regclass);
SET search_path TO 'mx_hide_shard_names';
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
-- not existing shard ids appended to the distributed table name
CREATE TABLE test_table_102008(id int, time date);
SELECT create_distributed_table('test_table_102008', 'id');
@ -179,7 +177,6 @@ CREATE SCHEMA mx_hide_shard_names_2;
SET search_path TO 'mx_hide_shard_names_2';
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE test_table(id int, time date);
SELECT create_distributed_table('test_table', 'id');
create_distributed_table
@ -236,7 +233,6 @@ SELECT * FROM citus_shard_indexes_on_worker ORDER BY 2;
\c - - - :master_port
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE SCHEMA mx_hide_shard_names_3;
SET search_path TO 'mx_hide_shard_names_3';
-- Verify that a table name > 56 characters handled properly.
@ -269,7 +265,6 @@ SELECT * FROM citus_shards_on_worker ORDER BY 2;
\c - - - :master_port
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE SCHEMA "CiTuS.TeeN";
SET search_path TO "CiTuS.TeeN";
CREATE TABLE "TeeNTabLE.1!?!"(id int, "TeNANt_Id" int);

View File

@ -2,7 +2,6 @@
CREATE SCHEMA multi_mx_insert_select_repartition;
SET search_path TO multi_mx_insert_select_repartition;
SET citus.next_shard_id TO 4213581;
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SET citus.shard_count TO 4;
CREATE TABLE source_table(a int, b int);

View File

@ -29,7 +29,6 @@ CREATE TABLE distributed_mx_table (
);
CREATE INDEX ON distributed_mx_table USING GIN (value);
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO streaming;
SET citus.shard_count TO 4;
SELECT create_distributed_table('distributed_mx_table', 'key');
create_distributed_table
@ -135,7 +134,6 @@ WHERE logicalrelid = 'distributed_mx_table'::regclass;
-- Create a table and then roll back the transaction
\c - - - :master_port
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO streaming;
BEGIN;
CREATE TABLE should_not_exist (
key text primary key,
@ -159,7 +157,6 @@ SELECT count(*) FROM pg_tables WHERE tablename = 'should_not_exist';
-- Ensure that we don't allow prepare on a metadata transaction
\c - - - :master_port
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO streaming;
BEGIN;
CREATE TABLE should_not_exist (
key text primary key,
@ -208,7 +205,6 @@ WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass;
\c - - - :master_port
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO streaming;
-- now show that we can rollback on creating mx table, but shards remain....
BEGIN;
CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts;

View File

@ -6,7 +6,6 @@ CREATE SCHEMA mx_modify_reference_table;
SET search_path TO 'mx_modify_reference_table';
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------

View File

@ -5,7 +5,6 @@ SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset
SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset
SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gset
SELECT nextval('pg_catalog.pg_dist_shardid_seq') AS last_shard_id \gset
SET citus.replication_model TO streaming;
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1;
SET citus.replicate_reference_tables_on_activate TO off;
@ -820,4 +819,3 @@ ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placem
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART :last_shard_id;
RESET citus.shard_count;
RESET citus.shard_replication_factor;
RESET citus.replication_model;

View File

@ -5,7 +5,6 @@ SET citus.next_shard_id TO 1700000;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
-- make sure wen can create partitioning tables in MX
SET citus.replication_model TO 'streaming';
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------
@ -88,7 +87,6 @@ SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'
(2 rows)
\c - - - :master_port
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
-- 2-) Creating partition of a distributed table
CREATE TABLE partitioning_test_2011 PARTITION OF partitioning_test FOR VALUES FROM ('2011-01-01') TO ('2012-01-01');
@ -131,7 +129,6 @@ SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'
(3 rows)
\c - - - :master_port
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
-- 3-) Attaching non distributed table to a distributed table
CREATE TABLE partitioning_test_2012(id int, time date);
@ -195,7 +192,6 @@ SELECT inhrelid::regclass FROM pg_inherits WHERE inhparent = 'partitioning_test'
(4 rows)
\c - - - :master_port
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
-- 4-) Attaching distributed table to distributed table
CREATE TABLE partitioning_test_2013(id int, time date);
@ -281,7 +277,6 @@ DROP TABLE partitioning_test;
DROP TABLE IF EXISTS partitioning_test_2013;
NOTICE: table "partitioning_test_2013" does not exist, skipping
-- test schema drop with partitioned tables
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
CREATE SCHEMA partition_test;
SET SEARCH_PATH TO partition_test;

View File

@ -779,7 +779,6 @@ ORDER BY
\c - - - :master_port
SET citus.shard_count TO 6;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO streaming;
CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp);
SELECT create_distributed_table('colocated_table_test', 'value_1');
create_distributed_table

View File

@ -124,7 +124,6 @@ FUNCTION 1 test_udt_hash(test_udt);
\c - - - :master_port
-- Distribute and populate the two tables.
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO streaming;
SET citus.shard_count TO 3;
SELECT create_distributed_table('repartition_udt', 'pk');
create_distributed_table

View File

@ -381,7 +381,6 @@ CREATE SCHEMA mx_ddl_schema_2;
CREATE SCHEMA "CiTuS.TeAeN";
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
-- in the first test make sure that we handle DDLs
-- when search path is set
SET search_path TO mx_ddl_schema_1;

View File

@ -1,7 +1,6 @@
-- Tests for running transaction recovery from a worker node
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO streaming;
CREATE TABLE test_recovery (x text);
SELECT create_distributed_table('test_recovery', 'x');
create_distributed_table

View File

@ -4,7 +4,6 @@ SET citus.next_shard_id TO 2380000;
SET citus.next_placement_id TO 2380000;
SET citus.shard_replication_factor TO 1;
SET citus.shard_count TO 6;
SET citus.replication_model TO streaming;
CREATE TABLE "refer'ence_table"(id int PRIMARY KEY);
SELECT create_reference_table('refer''ence_table');
create_reference_table

View File

@ -1815,7 +1815,7 @@ IF EXISTS
partitioning_locks,
partitioning_locks_for_select;
-- make sure we can create a partitioned table with streaming replication
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time);
CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01');
SELECT create_distributed_table('partitioning_test', 'id');

View File

@ -358,7 +358,6 @@ SELECT create_reference_table('replicate_reference_table_reference_one');
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE replicate_reference_table_reference_two(column1 int);
-- status before master_add_node
SELECT
@ -950,7 +949,7 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port);
1
(1 row)
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
start_metadata_sync_to_node
---------------------------------------------------------------------

View File

@ -11,7 +11,6 @@ SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gse
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 150000;
-- Prepare the environment
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SET citus.shard_count TO 5;
-- Create test tables
CREATE TABLE mx_table (col_1 int, col_2 text, col_3 BIGSERIAL);
@ -387,4 +386,3 @@ SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_
\c - - - :master_port
ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id;
RESET citus.shard_replication_factor;
RESET citus.replication_model;

View File

@ -1,7 +1,6 @@
CREATE SCHEMA mx_coordinator_shouldhaveshards;
SET search_path TO mx_coordinator_shouldhaveshards;
SET citus.shard_replication_factor to 1;
SET citus.replication_model TO streaming;
SET client_min_messages TO WARNING;
SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0);
?column?

View File

@ -4,7 +4,6 @@ SET citus.shard_replication_factor TO 1;
SET citus.shard_count TO 8;
SET citus.next_shard_id TO 7000000;
SET citus.next_placement_id TO 7000000;
SET citus.replication_model TO streaming;
-- Setup the view so that we can check if the foreign keys are created properly
CREATE TYPE foreign_details AS (name text, relid text, refd_relid text);
CREATE VIEW table_fkeys_in_workers AS

View File

@ -1,7 +1,6 @@
CREATE SCHEMA recursive_dml_queries_mx;
SET search_path TO recursive_dml_queries_mx, public;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO streaming;
CREATE TABLE recursive_dml_queries_mx.distributed_table (tenant_id text, dept int, info jsonb);
SELECT create_distributed_table('distributed_table', 'tenant_id');
create_distributed_table
@ -171,4 +170,3 @@ DETAIL: drop cascades to table distributed_table
drop cascades to table second_distributed_table
drop cascades to table reference_table
RESET citus.shard_replication_factor;
RESET citus.replication_model;

View File

@ -156,13 +156,15 @@ DROP TABLE citus_local_table;
CREATE TABLE dist_table_test_2(a int);
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO "statement";
SELECT create_distributed_table('dist_table_test_2', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- Mark tables as coordinator replicated in order to be able to test replicate_table_shards
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
('dist_table_test_2'::regclass);
-- replicate_table_shards should fail when the hostname GUC is set to a non-reachable node
ALTER SYSTEM SET citus.local_hostname TO 'foobar';
SELECT pg_reload_conf();
@ -209,7 +211,6 @@ NOTICE: Copying shard xxxxx from localhost:xxxxx to localhost:xxxxx ...
DROP TABLE dist_table_test, dist_table_test_2, ref_table_test;
RESET citus.shard_count;
RESET citus.shard_replication_factor;
RESET citus.replication_model;
-- Create a user to test multiuser usage of rebalancer functions
-- We explicitely don't create this user on worker nodes yet, so we can
-- test some more error handling. We create them later there.
@ -2207,7 +2208,6 @@ SET client_min_messages TO WARNING;
CREATE TABLE dist_table_test_3(a int);
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO "statement";
SELECT create_distributed_table('dist_table_test_3', 'a');
create_distributed_table
---------------------------------------------------------------------
@ -2234,6 +2234,11 @@ SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE lo
(1 row)
SET citus.shard_replication_factor TO 2;
SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shard_transfer_mode:='block_writes');
ERROR: Table 'dist_table_test_3' is streaming replicated. Shards of streaming replicated tables cannot be copied
-- Mark table as coordinator replicated in order to be able to test replicate_table_shards
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
('dist_table_test_3'::regclass);
SELECT replicate_table_shards('dist_table_test_3', max_shard_copies := 4, shard_transfer_mode:='block_writes');
replicate_table_shards
---------------------------------------------------------------------
@ -2457,6 +2462,11 @@ WHERE logicalrelid = 'r1'::regclass;
1
(1 row)
SELECT replicate_table_shards('t1', shard_replication_factor := 2);
ERROR: Table 't1' is streaming replicated. Shards of streaming replicated tables cannot be copied
-- Mark table as coordinator replicated in order to be able to test replicate_table_shards
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
('t1'::regclass);
SELECT replicate_table_shards('t1', shard_replication_factor := 2);
replicate_table_shards
---------------------------------------------------------------------

View File

@ -3,7 +3,6 @@ SET search_path TO single_node;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 90630500;
SET citus.replication_model TO 'streaming';
-- adding the coordinator as inactive is disallowed
SELECT 1 FROM master_add_inactive_node('localhost', :master_port, groupid => 0);
ERROR: coordinator node cannot be added as inactive node

View File

@ -3,6 +3,7 @@
-- ===================================================================
SET search_path TO subquery_and_ctes;
CREATE TABLE users_table_local AS SELECT * FROM users_table;
SET citus.shard_replication_factor TO 1;
CREATE TABLE dist_table (id int, value int);
SELECT create_distributed_table('dist_table', 'id', colocate_with => 'users_table');
create_distributed_table

View File

@ -135,6 +135,7 @@ CREATE FOREIGN TABLE foreign_table (
full_name text not null default ''
) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true');
SELECT create_distributed_table('foreign_table', 'id');
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined
create_distributed_table
---------------------------------------------------------------------

View File

@ -140,7 +140,6 @@ CREATE TABLE users (
, country_id int references countries(id)
, primary key (org_id, id)
);
SET citus.replication_model to 'streaming';
-- "users" table was implicitly added to citus metadata when defining foreign key,
-- so create_distributed_table would first undistribute it.
-- Show that it works well when changing sequence dependencies on mx workers.

View File

@ -1,6 +1,5 @@
CREATE SCHEMA upgrade_distributed_function_before;
SET search_path TO upgrade_distributed_function_before, public;
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE t1 (a int PRIMARY KEY, b int);
SELECT create_distributed_table('t1','a');

View File

@ -31,7 +31,6 @@ setup
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;

View File

@ -30,7 +30,6 @@ setup
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;

View File

@ -8,17 +8,12 @@ setup
SELECT create_distributed_table('test_repair_placement_vs_modification', 'x');
SELECT get_shard_id_for_distribution_column('test_repair_placement_vs_modification', 5) INTO selected_shard;
SET citus.shard_replication_factor TO 1;
CREATE TABLE test_copy_placement_vs_modification (x int, y int);
SELECT create_distributed_table('test_copy_placement_vs_modification', 'x');
}
teardown
{
DROP TABLE test_repair_placement_vs_modification;
DROP TABLE selected_shard;
DROP TABLE test_copy_placement_vs_modification;
}
session "s1"
@ -66,36 +61,6 @@ step "s1-copy"
COPY test_repair_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
}
step "s1-insert-copy-table"
{
INSERT INTO test_copy_placement_vs_modification VALUES (5, 10);
}
step "s1-update-copy-table"
{
UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5;
}
step "s1-delete-copy-table"
{
DELETE FROM test_copy_placement_vs_modification WHERE x = 5;
}
step "s1-select-copy-table"
{
SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5;
}
step "s1-ddl-copy-table"
{
CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x);
}
step "s1-copy-copy-table"
{
COPY test_copy_placement_vs_modification FROM PROGRAM 'echo 1,1 && echo 2,2 && echo 3,3 && echo 4,4 && echo 5,5' WITH CSV;
}
step "s1-commit"
{
COMMIT;
@ -118,13 +83,6 @@ step "s2-repair-placement"
SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638);
}
step "s2-copy-placement"
{
SELECT master_copy_shard_placement((SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5)),
'localhost', 57637, 'localhost', 57638,
do_repair := false, transfer_mode := 'block_writes');
}
step "s2-commit"
{
COMMIT;
@ -168,19 +126,3 @@ permutation "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-b
permutation "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content"
permutation "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-copy" "s2-commit" "s1-commit" "s2-print-content"
permutation "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-ddl" "s2-commit" "s1-commit" "s2-print-index-count"
// verify that copy placement (do_repair := false) blocks other operations, except SELECT
permutation "s1-begin" "s2-begin" "s2-copy-placement" "s1-update-copy-table" "s2-commit" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-copy-placement" "s1-delete-copy-table" "s2-commit" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-copy-placement" "s1-insert-copy-table" "s2-commit" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-copy-placement" "s1-copy-copy-table" "s2-commit" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-copy-placement" "s1-ddl-copy-table" "s2-commit" "s1-commit"
permutation "s1-begin" "s2-begin" "s2-copy-placement" "s1-select-copy-table" "s2-commit" "s1-commit"
// verify that copy placement (do_repair := false) is blocked by other operations, except SELECT
permutation "s1-begin" "s2-begin" "s1-update-copy-table" "s2-copy-placement" "s1-commit" "s2-commit"
permutation "s1-begin" "s2-begin" "s1-delete-copy-table" "s2-copy-placement" "s1-commit" "s2-commit"
permutation "s1-begin" "s2-begin" "s1-insert-copy-table" "s2-copy-placement" "s1-commit" "s2-commit"
permutation "s1-begin" "s2-begin" "s1-copy-copy-table" "s2-copy-placement" "s1-commit" "s2-commit"
permutation "s1-begin" "s2-begin" "s1-ddl-copy-table" "s2-copy-placement" "s1-commit" "s2-commit"
permutation "s1-begin" "s2-begin" "s1-select-copy-table" "s2-copy-placement" "s1-commit" "s2-commit"

View File

@ -91,7 +91,6 @@ step "s2-create-table"
{
CREATE TABLE t1 (a int, b int);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
}
@ -105,7 +104,6 @@ step "s2-create-table-with-type"
{
CREATE TABLE t1 (a int, b tt1);
-- session needs to have replication factor set to 1, can't do in setup
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('t1', 'a');
}

View File

@ -30,6 +30,5 @@ setup
false)
FROM pg_dist_node;
SET citus.replication_model to streaming;
SET citus.shard_replication_factor TO 1;
}

View File

@ -148,7 +148,7 @@ SELECT table_name::text, shard_count, access_method FROM public.citus_tables WHE
-- test with metadata sync
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
CREATE TABLE metadata_sync_table (a BIGSERIAL);
@ -159,7 +159,6 @@ SELECT alter_distributed_table('metadata_sync_table', shard_count:=8);
SELECT table_name, shard_count FROM public.citus_tables WHERE table_name::text = 'metadata_sync_table';
SET citus.replication_model TO DEFAULT;
SELECT stop_metadata_sync_to_node('localhost', :worker_1_port);
-- test complex cascade operations

View File

@ -1,5 +1,4 @@
ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1640000;
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor to 1;
SET citus.shard_count to 4;
@ -123,7 +122,6 @@ order by s_i_id;
\c - - - :master_port
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor to 1;
SET citus.shard_count to 4;

View File

@ -356,7 +356,7 @@ BEGIN;
SELECT logicalrelid::regclass::text FROM pg_dist_partition, pg_tables
WHERE tablename=logicalrelid::regclass::text AND
schemaname='citus_local_tables_test_schema' AND
partmethod = 'n' AND repmodel = 'c'
partmethod = 'n' AND repmodel = 's'
ORDER BY 1;
ROLLBACK;
@ -376,7 +376,7 @@ BEGIN;
SELECT logicalrelid::regclass::text FROM pg_dist_partition, pg_tables
WHERE tablename=logicalrelid::regclass::text AND
schemaname='citus_local_tables_test_schema' AND
partmethod = 'n' AND repmodel = 'c'
partmethod = 'n' AND repmodel = 's'
ORDER BY 1;
ROLLBACK;

View File

@ -15,7 +15,7 @@ RESET client_min_messages;
-- start metadata sync to worker 1
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE dummy_reference_table(a int unique, b int);
SELECT create_reference_table('dummy_reference_table');
@ -545,7 +545,7 @@ TRUNCATE reference_table, citus_local_table, distributed_table;
\c - - - :master_port
SET search_path TO citus_local_table_queries_mx;
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
ALTER TABLE reference_table ADD CONSTRAINT pkey_ref PRIMARY KEY (a);
ALTER TABLE citus_local_table ADD CONSTRAINT pkey_c PRIMARY KEY (a);
@ -588,7 +588,7 @@ ROLLBACK;
\c - - - :master_port
SET search_path TO citus_local_table_queries_mx;
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
ALTER TABLE distributed_table DROP CONSTRAINT fkey_dist_to_ref;
@ -602,7 +602,7 @@ ROLLBACK;
\c - - - :master_port
SET search_path TO citus_local_table_queries_mx;
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
-- remove uniqueness constraint and dependent foreign key constraint for next tests
ALTER TABLE reference_table DROP CONSTRAINT fkey_ref_to_local;

View File

@ -35,7 +35,6 @@ END; $$ language plpgsql STABLE;
CREATE TYPE user_data AS (name text, age int);
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE user_info_data (user_id int, u_data user_data, user_index int);

View File

@ -29,7 +29,6 @@ SELECT create_distributed_function('get_local_node_id_volatile()');
CREATE TYPE user_data AS (name text, age int);
SET citus.replication_model TO streaming;
SET citus.shard_replication_factor TO 1;
CREATE TABLE user_info_data (user_id int, u_data user_data, user_index int);

View File

@ -167,13 +167,11 @@ SET citus.enable_ddl_propagation TO on;
-- use an unusual type to force a new colocation group
CREATE TABLE statement_table(id int2);
SET citus.replication_model TO 'statement';
SET citus.shard_replication_factor TO 1;
SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('statement_table','id');
-- create a table uses streaming-based replication (can be synced)
CREATE TABLE streaming_table(id macaddr);
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('streaming_table','id');
@ -198,7 +196,6 @@ select bool_or(hasmetadata) from pg_dist_node WHERE isactive AND noderole = 'pr
SELECT create_distributed_function('increment(int2)', '$1');
SELECT create_distributed_function('increment(int2)', '$1', colocate_with := 'statement_table');
BEGIN;
SET LOCAL citus.replication_model TO 'statement';
DROP TABLE statement_table;
SELECT create_distributed_function('increment(int2)', '$1');
END;
@ -377,7 +374,6 @@ SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)','$1')
-- a function cannot be colocated with a table that is not "streaming" replicated
SET citus.shard_replication_factor TO 2;
CREATE TABLE replicated_table_func_test (a macaddr);
SET citus.replication_model TO "statement";
SELECT create_distributed_table('replicated_table_func_test', 'a');
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', '$1', colocate_with:='replicated_table_func_test');
@ -387,7 +383,6 @@ SELECT public.wait_until_metadata_sync(30000);
-- as long as there is a coercion path
SET citus.shard_replication_factor TO 1;
CREATE TABLE replicated_table_func_test_2 (a macaddr8);
SET citus.replication_model TO "streaming";
SELECT create_distributed_table('replicated_table_func_test_2', 'a');
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', 'val1', colocate_with:='replicated_table_func_test_2');
@ -401,7 +396,6 @@ SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', 'val
-- finally, colocate the function with a distributed table
SET citus.shard_replication_factor TO 1;
CREATE TABLE replicated_table_func_test_4 (a macaddr);
SET citus.replication_model TO "streaming";
SELECT create_distributed_table('replicated_table_func_test_4', 'a');
SELECT create_distributed_function('eq_with_param_names(macaddr, macaddr)', '$1', colocate_with:='replicated_table_func_test_4');

View File

@ -29,7 +29,6 @@ CREATE OR REPLACE FUNCTION wait_until_metadata_sync(timeout INTEGER DEFAULT 1500
-- procedures are distributed by text arguments, when run in isolation it is not guaranteed a table actually exists.
CREATE TABLE colocation_table(id text);
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
SELECT create_distributed_table('colocation_table','id');

View File

@ -64,20 +64,8 @@ ALTER TABLE test_table ADD COLUMN new_column INT;
-- show that we've never commited the changes
SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass;
-- kill as soon as the coordinator sends COMMIT
SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()');
ALTER TABLE test_table ADD COLUMN new_column INT;
SELECT citus.mitmproxy('conn.allow()');
-- since we've killed the connection just after
-- the coordinator sends the COMMIT, the command should be applied
-- to the distributed table and the shards on the other worker
-- however, there is no way to recover the failure on the shards
-- that live in the failed worker, since we're running 1PC
SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass;
SELECT run_command_on_placements('test_table', $$SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = '%s'::regclass;$$) ORDER BY 1;
-- manually drop & re-create the table for the next tests
SELECT citus.mitmproxy('conn.allow()');
DROP TABLE test_table;
SET citus.next_shard_id TO 100800;
SET citus.multi_shard_commit_protocol TO '1pc';

View File

@ -8,7 +8,6 @@ SET citus.next_shard_id TO 1980000;
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
SELECT start_metadata_sync_to_node('localhost', :worker_2_proxy_port);
SET citus.replication_model TO 'streaming';
SET citus.shard_replication_factor TO 1;
CREATE TABLE failover_to_local (key int PRIMARY KEY, value varchar(10));
SELECT create_distributed_table('failover_to_local', 'key');

View File

@ -6,7 +6,6 @@ SET SEARCH_PATH = mx_metadata_sync;
SET citus.shard_count TO 2;
SET citus.next_shard_id TO 16000000;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SELECT pg_backend_pid() as pid \gset
SELECT citus.mitmproxy('conn.allow()');

View File

@ -4,7 +4,6 @@ SELECT citus.mitmproxy('conn.allow()');
SET citus.shard_replication_factor TO 2;
SET "citus.replication_model" to "statement";
SET citus.shard_count TO 4;
CREATE TABLE partitioned_table (

View File

@ -13,7 +13,7 @@ CREATE VIEW citus_local_tables_in_schema AS
SELECT logicalrelid FROM pg_dist_partition, pg_tables
WHERE tablename=logicalrelid::regclass::text AND
schemaname='fkeys_between_local_ref' AND
partmethod = 'n' AND repmodel = 'c';
partmethod = 'n' AND repmodel = 's';
-- remove coordinator if it is added to pg_dist_node and test

View File

@ -4,7 +4,6 @@ SET search_path TO 'insert_select_repartition';
SET citus.next_shard_id TO 4213581;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
-- 4 shards, hash distributed.
-- Negate distribution column value.
@ -451,7 +450,6 @@ DROP TABLE source_table, target_table;
--
SET citus.shard_replication_factor TO 2;
SET citus.replication_model TO 'statement';
SET citus.shard_count TO 4;
CREATE TABLE source_table(a int, b int);
SELECT create_distributed_table('source_table', 'a');

View File

@ -497,8 +497,6 @@ INSERT INTO table_1
-- append partitioned/heap-type
SET citus.replication_model TO statement;
-- do not print out 'building index pg_toast_xxxxx_index' messages
SET client_min_messages TO DEFAULT;
CREATE TABLE range_partitioned(range_column text, data int);

View File

@ -9,8 +9,6 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupid := 0);
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE reference_table (key int PRIMARY KEY);
SELECT create_reference_table('reference_table');

View File

@ -3,7 +3,6 @@ SET search_path TO local_shard_execution;
SET citus.shard_count TO 4;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
SET citus.next_shard_id TO 1470000;
CREATE TABLE reference_table (key int PRIMARY KEY);
@ -877,7 +876,6 @@ RESET citus.log_local_commands;
\c - - - :master_port
SET citus.next_shard_id TO 1480000;
-- test both local and remote execution with custom type
SET citus.replication_model TO "streaming";
SET citus.shard_replication_factor TO 1;
CREATE TYPE invite_resp AS ENUM ('yes', 'no', 'maybe');

View File

@ -6,7 +6,6 @@ SET citus.log_local_commands TO TRUE;
SET citus.shard_count TO 4;
SET citus.next_shard_id TO 1580000;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
CREATE TABLE table_1 (key int, value text);
SELECT create_distributed_table('table_1', 'key');

View File

@ -3,7 +3,6 @@ CREATE SCHEMA mcsp;
SET search_path TO mcsp;
SET citus.next_shard_id TO 8139000;
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'statement';
CREATE TABLE ref_table(a int, b text unique);
SELECT create_reference_table('ref_table');
@ -25,6 +24,10 @@ CREATE TABLE history_p1 PARTITION OF history FOR VALUES FROM ('2019-01-01') TO (
CREATE TABLE history_p2 PARTITION OF history FOR VALUES FROM ('2020-01-01') TO ('2021-01-01');
SELECT create_distributed_table('history','key');
-- Mark tables as non-mx tables, in order to be able to test master_copy_shard_placement
UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid IN
('data'::regclass, 'history'::regclass);
INSERT INTO data VALUES ('key-1', 'value-1');
INSERT INTO data VALUES ('key-2', 'value-2');
@ -88,8 +91,13 @@ SELECT count(*) FROM history;
-- test we can not replicate MX tables
SET citus.shard_replication_factor TO 1;
SET citus.replication_model TO 'streaming';
-- metadata sync will fail as we have a statement replicated table
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
-- use streaming replication to enable metadata syncing
UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid IN
('history'::regclass);
SELECT start_metadata_sync_to_node('localhost', :worker_1_port);
CREATE TABLE mx_table(a int);

View File

@ -33,6 +33,11 @@ SELECT master_get_active_worker_nodes();
SET citus.shard_count TO 16;
SET citus.shard_replication_factor TO 1;
-- test warnings on setting the deprecated guc for replication model
BEGIN;
SET citus.replication_model to 'statement';
ROLLBACK;
SELECT * FROM citus_activate_node('localhost', :worker_2_port);
CREATE TABLE cluster_management_test (col_1 text, col_2 int);
SELECT create_distributed_table('cluster_management_test', 'col_1', 'hash');
@ -102,6 +107,7 @@ ABORT;
\c - postgres - :master_port
SET citus.next_shard_id TO 1220016;
SET citus.enable_object_propagation TO off; -- prevent object propagation on add node during setup
SET citus.shard_replication_factor TO 1;
SELECT master_get_active_worker_nodes();
-- restore the node for next tests
@ -123,6 +129,7 @@ SELECT master_get_active_worker_nodes();
UPDATE pg_dist_placement SET shardstate=4 WHERE groupid=:worker_2_group;
SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port;
CREATE TABLE cluster_management_test_colocated (col_1 text, col_2 int);
-- Check that we warn the user about colocated shards that will not get created for shards that do not have active placements
SELECT create_distributed_table('cluster_management_test_colocated', 'col_1', 'hash', colocate_with=>'cluster_management_test');
-- Check that colocated shards don't get created for shards that are to be deleted

View File

@ -429,6 +429,7 @@ UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid = 'table1_groupG'
CREATE TABLE table2_groupG ( id int );
SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'table1_groupG');
DROP TABLE table2_groupG;
CREATE TABLE table2_groupG ( id int );
SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'NONE');

Some files were not shown because too many files have changed in this diff Show More