Merge remote-tracking branch 'origin/users/niupre/BlockingSplitAPI' into users/saawasek/non_blocking_split_integrated

users/saawasek/non_blocking_split_integrated
Sameer Awasekar 2022-07-10 19:35:36 +05:30
commit fd46d8011d
155 changed files with 4857 additions and 3938 deletions

View File

@ -1,3 +1,12 @@
### citus v11.0.3 (July 5, 2022) ###
* Fixes a bug that prevents adding local tables with materialized views to
Citus metadata
* Fixes a bug that prevents using `COMPRESSION` and `CONSTRAINT` on a column
* Fixes upgrades to Citus 11 when there are no nodes in the metadata
### citus v11.0.2 (June 15, 2022) ###
* Drops support for PostgreSQL 12

View File

@ -35,6 +35,12 @@ why we ask this as well as instructions for how to proceed, see the
# Optionally, you might instead want to use `make install-all`
# since `multi_extension` regression test would fail due to missing downgrade scripts.
cd src/test/regress
pip install pipenv
pipenv --rm
pipenv install
pipenv shell
make check
```
@ -53,7 +59,7 @@ why we ask this as well as instructions for how to proceed, see the
autoconf flex git libcurl4-gnutls-dev libicu-dev \
libkrb5-dev liblz4-dev libpam0g-dev libreadline-dev \
libselinux1-dev libssl-dev libxslt1-dev libzstd-dev \
make uuid-dev mitmproxy
make uuid-dev
```
2. Get, build, and test the code
@ -67,6 +73,12 @@ why we ask this as well as instructions for how to proceed, see the
# Optionally, you might instead want to use `sudo make install-all`
# since `multi_extension` regression test would fail due to missing downgrade scripts.
cd src/test/regress
pip install pipenv
pipenv --rm
pipenv install
pipenv shell
make check
```
@ -111,6 +123,12 @@ why we ask this as well as instructions for how to proceed, see the
# Optionally, you might instead want to use `sudo make install-all`
# since `multi_extension` regression test would fail due to missing downgrade scripts.
cd src/test/regress
pip install pipenv
pipenv --rm
pipenv install
pipenv shell
make check
```

View File

@ -277,6 +277,11 @@ ColumnarSetRelPathlistHook(PlannerInfo *root, RelOptInfo *rel, Index rti,
* into the scan of the table to minimize the data read.
*/
Relation relation = RelationIdGetRelation(rte->relid);
if (!RelationIsValid(relation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", rte->relid)));
}
if (relation->rd_tableam == GetColumnarTableAmRoutine())
{
if (rte->tablesample != NULL)
@ -501,6 +506,11 @@ ColumnarIndexScanAdditionalCost(PlannerInfo *root, RelOptInfo *rel,
&indexCorrelation, &fakeIndexPages);
Relation relation = RelationIdGetRelation(relationId);
if (!RelationIsValid(relation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
}
uint64 rowCount = ColumnarTableRowCount(relation);
RelationClose(relation);
double estimatedRows = rowCount * indexSelectivity;
@ -596,6 +606,11 @@ static int
RelationIdGetNumberOfAttributes(Oid relationId)
{
Relation relation = RelationIdGetRelation(relationId);
if (!RelationIsValid(relation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
}
int nattrs = relation->rd_att->natts;
RelationClose(relation);
return nattrs;
@ -1399,6 +1414,11 @@ static Cost
ColumnarPerStripeScanCost(RelOptInfo *rel, Oid relationId, int numberOfColumnsRead)
{
Relation relation = RelationIdGetRelation(relationId);
if (!RelationIsValid(relation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
}
List *stripeList = StripesForRelfilenode(relation->rd_node);
RelationClose(relation);
@ -1451,6 +1471,11 @@ static uint64
ColumnarTableStripeCount(Oid relationId)
{
Relation relation = RelationIdGetRelation(relationId);
if (!RelationIsValid(relation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
}
List *stripeList = StripesForRelfilenode(relation->rd_node);
int stripeCount = list_length(stripeList);
RelationClose(relation);

View File

@ -0,0 +1 @@
-- no changes needed

View File

@ -0,0 +1 @@
-- no changes needed

View File

@ -207,6 +207,7 @@ static char * CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaNa
char *sourceName,
char *targetSchemaName,
char *targetName);
static void ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid);
static char * CreateMaterializedViewDDLCommand(Oid matViewOid);
static char * GetAccessMethodForMatViewIfExists(Oid viewOid);
static bool WillRecreateForeignKeyToReferenceTable(Oid relationId,
@ -223,6 +224,8 @@ PG_FUNCTION_INFO_V1(worker_change_sequence_dependency);
/* global variable keeping track of whether we are in a table type conversion function */
bool InTableTypeConversionFunctionCall = false;
/* controlled by GUC, in MB */
int MaxMatViewSizeToAutoRecreate = 1024;
/*
* undistribute_table gets a distributed table name and
@ -854,6 +857,11 @@ static void
DropIndexesNotSupportedByColumnar(Oid relationId, bool suppressNoticeMessages)
{
Relation columnarRelation = RelationIdGetRelation(relationId);
if (!RelationIsValid(columnarRelation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
}
List *indexIdList = RelationGetIndexList(columnarRelation);
/*
@ -1357,6 +1365,7 @@ List *
GetViewCreationCommandsOfTable(Oid relationId)
{
List *views = GetDependingViews(relationId);
List *commands = NIL;
Oid viewOid = InvalidOid;
@ -1367,6 +1376,8 @@ GetViewCreationCommandsOfTable(Oid relationId)
/* See comments on CreateMaterializedViewDDLCommand for its limitations */
if (get_rel_relkind(viewOid) == RELKIND_MATVIEW)
{
ErrorIfMatViewSizeExceedsTheLimit(viewOid);
char *matViewCreateCommands = CreateMaterializedViewDDLCommand(viewOid);
appendStringInfoString(query, matViewCreateCommands);
}
@ -1406,6 +1417,42 @@ GetViewCreationTableDDLCommandsOfTable(Oid relationId)
}
/*
* ErrorIfMatViewSizeExceedsTheLimit takes the oid of a materialized view and errors
* out if the size of the matview exceeds the limit set by the GUC
* citus.max_matview_size_to_auto_recreate.
*/
static void
ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid)
{
if (MaxMatViewSizeToAutoRecreate >= 0)
{
/* if it's below 0, it means the user has removed the limit */
Datum relSizeDatum = DirectFunctionCall1(pg_total_relation_size,
ObjectIdGetDatum(matViewOid));
uint64 matViewSize = DatumGetInt64(relSizeDatum);
/* convert from MB to bytes */
uint64 limitSizeInBytes = MaxMatViewSizeToAutoRecreate * 1024L * 1024L;
if (matViewSize > limitSizeInBytes)
{
ereport(ERROR, (errmsg("size of the materialized view %s exceeds "
"citus.max_matview_size_to_auto_recreate "
"(currently %d MB)", get_rel_name(matViewOid),
MaxMatViewSizeToAutoRecreate),
errdetail("Citus restricts automatically recreating "
"materialized views that are larger than the "
"limit, because it could take too long."),
errhint(
"Consider increasing the size limit by setting "
"citus.max_matview_size_to_auto_recreate; "
"or you can remove the limit by setting it to -1")));
}
}
}
/*
* CreateMaterializedViewDDLCommand creates the command to create materialized view.
* Note that this function doesn't support
@ -1967,6 +2014,19 @@ ExecuteQueryViaSPI(char *query, int SPIOK)
}
/*
* ExecuteAndLogQueryViaSPI is a wrapper around ExecuteQueryViaSPI, that logs
* the query to be executed, with the given log level.
*/
void
ExecuteAndLogQueryViaSPI(char *query, int SPIOK, int logLevel)
{
ereport(logLevel, (errmsg("executing \"%s\"", query)));
ExecuteQueryViaSPI(query, SPIOK);
}
/*
* SwitchToSequentialAndLocalExecutionIfRelationNameTooLong generates the longest shard name
* on the shards of a distributed table, and if exceeds the limit switches to sequential and

View File

@ -26,6 +26,7 @@
#include "distributed/reference_table_utils.h"
#include "distributed/relation_access_tracking.h"
#include "distributed/worker_protocol.h"
#include "executor/spi.h"
#include "miscadmin.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
@ -513,12 +514,12 @@ ExecuteCascadeOperationForRelationIdList(List *relationIdList,
/*
* ExecuteAndLogUtilityCommandListInTableTypeConversion is a wrapper function
* around ExecuteAndLogUtilityCommandList, that makes it execute with the flag
* InTableTypeConversionFunctionCall is set to true.
* ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI is a wrapper function
* around ExecuteAndLogQueryViaSPI, that executes view creation commands
* with the flag InTableTypeConversionFunctionCall set to true.
*/
void
ExecuteAndLogUtilityCommandListInTableTypeConversion(List *utilityCommandList)
ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandList)
{
bool oldValue = InTableTypeConversionFunctionCall;
InTableTypeConversionFunctionCall = true;
@ -526,7 +527,15 @@ ExecuteAndLogUtilityCommandListInTableTypeConversion(List *utilityCommandList)
MemoryContext savedMemoryContext = CurrentMemoryContext;
PG_TRY();
{
ExecuteAndLogUtilityCommandList(utilityCommandList);
char *utilityCommand = NULL;
foreach_ptr(utilityCommand, utilityCommandList)
{
/*
* CREATE MATERIALIZED VIEW commands need to be parsed/transformed,
* which SPI does for us.
*/
ExecuteAndLogQueryViaSPI(utilityCommand, SPI_OK_UTILITY, DEBUG1);
}
}
PG_CATCH();
{

View File

@ -349,7 +349,7 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve
* Execute the view creation commands with the shell table.
* Views will be distributed via FinalizeCitusLocalTableCreation below.
*/
ExecuteAndLogUtilityCommandListInTableTypeConversion(tableViewCreationCommands);
ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(tableViewCreationCommands);
/*
* Set shellRelationId as the relation with relationId now points
@ -891,6 +891,11 @@ static void
RenameShardRelationStatistics(Oid shardRelationId, uint64 shardId)
{
Relation shardRelation = RelationIdGetRelation(shardRelationId);
if (!RelationIsValid(shardRelation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", shardRelationId)));
}
List *statsOidList = RelationGetStatExtList(shardRelation);
RelationClose(shardRelation);
@ -1053,7 +1058,9 @@ DropViewsOnTable(Oid relationId)
char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName);
StringInfo dropCommand = makeStringInfo();
appendStringInfo(dropCommand, "DROP VIEW IF EXISTS %s",
appendStringInfo(dropCommand, "DROP %sVIEW IF EXISTS %s",
get_rel_relkind(viewId) == RELKIND_MATVIEW ? "MATERIALIZED " :
"",
qualifiedViewName);
ExecuteAndLogUtilityCommand(dropCommand->data);

View File

@ -1024,6 +1024,14 @@ static DistributeObjectOps Type_Rename = {
.address = RenameTypeStmtObjectAddress,
.markDistributed = false,
};
static DistributeObjectOps Vacuum_Analyze = {
.deparse = NULL,
.qualify = NULL,
.preprocess = NULL,
.postprocess = PostprocessVacuumStmt,
.address = NULL,
.markDistributed = false,
};
/*
* PreprocessRenameViewStmt function can be called internally by ALTER TABLE view_name
@ -1653,6 +1661,11 @@ GetDistributeObjectOps(Node *node)
return &Any_Reindex;
}
case T_VacuumStmt:
{
return &Vacuum_Analyze;
}
case T_RenameStmt:
{
RenameStmt *stmt = castNode(RenameStmt, node);

View File

@ -321,6 +321,11 @@ ExecuteFunctionOnEachTableIndex(Oid relationId, PGIndexProcessor pgIndexProcesso
List *result = NIL;
Relation relation = RelationIdGetRelation(relationId);
if (!RelationIsValid(relation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
}
List *indexIdList = RelationGetIndexList(relation);
Oid indexId = InvalidOid;
foreach_oid(indexId, indexIdList)

View File

@ -48,11 +48,12 @@ PreprocessRenameStmt(Node *node, const char *renameCommand,
/*
* The lock levels here should be same as the ones taken in
* RenameRelation(), renameatt() and RenameConstraint(). However, since all
* four statements have identical lock levels, we just use a single statement.
* RenameRelation(), renameatt() and RenameConstraint(). All statements
* have identical lock levels except alter index rename.
*/
objectRelationId = RangeVarGetRelid(renameStmt->relation,
AccessExclusiveLock,
LOCKMODE lockmode = (IsIndexRenameStmt(renameStmt)) ?
ShareUpdateExclusiveLock : AccessExclusiveLock;
objectRelationId = RangeVarGetRelid(renameStmt->relation, lockmode,
renameStmt->missing_ok);
/*
@ -99,6 +100,18 @@ PreprocessRenameStmt(Node *node, const char *renameCommand,
case OBJECT_TABCONSTRAINT:
case OBJECT_POLICY:
{
if (relKind == RELKIND_INDEX ||
relKind == RELKIND_PARTITIONED_INDEX)
{
/*
* Although weird, postgres allows ALTER TABLE .. RENAME command
* on indexes. We don't want to break non-distributed tables,
* so allow.
*/
tableRelationId = IndexGetRelation(objectRelationId, false);
break;
}
/* the target object is our tableRelationId. */
tableRelationId = objectRelationId;
break;
@ -106,6 +119,25 @@ PreprocessRenameStmt(Node *node, const char *renameCommand,
case OBJECT_INDEX:
{
if (relKind == RELKIND_RELATION ||
relKind == RELKIND_PARTITIONED_TABLE)
{
/*
* Although weird, postgres allows ALTER INDEX .. RENAME command
* on tables. We don't want to break non-distributed tables,
* so allow.
* Because of the weird syntax, we locked with wrong level, so relock
* the relation to acquire true level of lock. Same logic
* can be found in the function RenameRelation(RenameStmt) at tablecmds.c
*/
UnlockRelationOid(objectRelationId, lockmode);
objectRelationId = RangeVarGetRelid(renameStmt->relation,
AccessExclusiveLock,
renameStmt->missing_ok);
tableRelationId = objectRelationId;
break;
}
/*
* here, objRelationId points to the index relation entry, and we
* are interested into the entry of the table on which the index is

View File

@ -469,6 +469,11 @@ GetExplicitStatisticsCommandList(Oid relationId)
List *explicitStatisticsCommandList = NIL;
Relation relation = RelationIdGetRelation(relationId);
if (!RelationIsValid(relation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
}
List *statisticsIdList = RelationGetStatExtList(relation);
RelationClose(relation);
@ -540,6 +545,11 @@ GetExplicitStatisticsSchemaIdList(Oid relationId)
List *schemaIdList = NIL;
Relation relation = RelationIdGetRelation(relationId);
if (!RelationIsValid(relation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", relationId)));
}
List *statsIdList = RelationGetStatExtList(relation);
RelationClose(relation);

View File

@ -830,14 +830,6 @@ ProcessUtilityInternal(PlannedStmt *pstmt,
}
}
/* TODO: fold VACUUM's processing into the above block */
if (IsA(parsetree, VacuumStmt))
{
VacuumStmt *vacuumStmt = (VacuumStmt *) parsetree;
PostprocessVacuumStmt(vacuumStmt, queryString);
}
if (!IsDropCitusExtensionStmt(parsetree) && !IsA(parsetree, DropdbStmt))
{
/*

View File

@ -20,7 +20,7 @@
#include "distributed/deparse_shard_query.h"
#include "distributed/listutils.h"
#include "distributed/metadata_cache.h"
#include "distributed/multi_executor.h"
#include "distributed/metadata_sync.h"
#include "distributed/resource_lock.h"
#include "distributed/transaction_management.h"
#include "distributed/version_compat.h"
@ -48,7 +48,7 @@ typedef struct CitusVacuumParams
} CitusVacuumParams;
/* Local functions forward declarations for processing distributed table commands */
static bool IsDistributedVacuumStmt(int vacuumOptions, List *VacuumCitusRelationIdList);
static bool IsDistributedVacuumStmt(List *vacuumRelationIdList);
static List * VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams,
List *vacuumColumnList);
static char * DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams);
@ -57,44 +57,28 @@ static List * VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex);
static List * ExtractVacuumTargetRels(VacuumStmt *vacuumStmt);
static void ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
CitusVacuumParams vacuumParams);
static void ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt,
CitusVacuumParams vacuumParams);
static CitusVacuumParams VacuumStmtParams(VacuumStmt *vacstmt);
static List * VacuumCitusRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams
vacuumParams);
static List * VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams
vacuumParams);
/*
* PostprocessVacuumStmt processes vacuum statements that may need propagation to
* distributed tables. If a VACUUM or ANALYZE command references a distributed
* table, it is propagated to all involved nodes; otherwise, this function will
* immediately exit after some error checking.
* citus tables only if ddl propagation is enabled. If a VACUUM or ANALYZE command
* references a citus table or no table, it is propagated to all involved nodes; otherwise,
* the statements will not be propagated.
*
* Unlike most other Process functions within this file, this function does not
* return a modified parse node, as it is expected that the local VACUUM or
* ANALYZE has already been processed.
*/
void
PostprocessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
List *
PostprocessVacuumStmt(Node *node, const char *vacuumCommand)
{
VacuumStmt *vacuumStmt = castNode(VacuumStmt, node);
CitusVacuumParams vacuumParams = VacuumStmtParams(vacuumStmt);
const char *stmtName = (vacuumParams.options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE";
/*
* No table in the vacuum statement means vacuuming all relations
* which is not supported by citus.
*/
if (list_length(vacuumStmt->rels) == 0)
{
/* WARN for unqualified VACUUM commands */
ereport(WARNING, (errmsg("not propagating %s command to worker nodes", stmtName),
errhint("Provide a specific table in order to %s "
"distributed tables.", stmtName)));
}
List *citusRelationIdList = VacuumCitusRelationIdList(vacuumStmt, vacuumParams);
if (list_length(citusRelationIdList) == 0)
{
return;
}
if (vacuumParams.options & VACOPT_VACUUM)
{
@ -109,32 +93,42 @@ PostprocessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand)
}
/*
* Here we get the relation list again because we might have
* closed the current transaction and the memory context got reset.
* Vacuum's context is PortalContext, which lasts for the whole session
* so committing/starting a new transaction doesn't affect it.
* when no table is specified propagate the command as it is;
* otherwise, only propagate when there is at least 1 citus table
*/
citusRelationIdList = VacuumCitusRelationIdList(vacuumStmt, vacuumParams);
bool distributedVacuumStmt = IsDistributedVacuumStmt(vacuumParams.options,
citusRelationIdList);
if (!distributedVacuumStmt)
List *relationIdList = VacuumRelationIdList(vacuumStmt, vacuumParams);
if (list_length(vacuumStmt->rels) == 0)
{
return;
/* no table is specified (unqualified vacuum) */
ExecuteUnqualifiedVacuumTasks(vacuumStmt, vacuumParams);
}
else if (IsDistributedVacuumStmt(relationIdList))
{
/* there is at least 1 citus table specified */
ExecuteVacuumOnDistributedTables(vacuumStmt, relationIdList,
vacuumParams);
}
ExecuteVacuumOnDistributedTables(vacuumStmt, citusRelationIdList, vacuumParams);
/* else only local tables are specified */
return NIL;
}
/*
* VacuumCitusRelationIdList returns the oid of the relations in the given vacuum statement.
* VacuumRelationIdList returns the oid of the relations in the given vacuum statement.
*/
static List *
VacuumCitusRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
{
LOCKMODE lockMode = (vacuumParams.options & VACOPT_FULL) ? AccessExclusiveLock :
ShareUpdateExclusiveLock;
bool skipLocked = (vacuumParams.options & VACOPT_SKIP_LOCKED);
List *vacuumRelationList = ExtractVacuumTargetRels(vacuumStmt);
List *relationIdList = NIL;
@ -142,18 +136,45 @@ VacuumCitusRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams
RangeVar *vacuumRelation = NULL;
foreach_ptr(vacuumRelation, vacuumRelationList)
{
Oid relationId = RangeVarGetRelid(vacuumRelation, lockMode, false);
if (!IsCitusTable(relationId))
/*
* If skip_locked option is enabled, we are skipping that relation
* if the lock for it is currently not available; else, we get the lock.
*/
Oid relationId = RangeVarGetRelidExtended(vacuumRelation,
lockMode,
skipLocked ? RVR_SKIP_LOCKED : 0, NULL,
NULL);
if (OidIsValid(relationId))
{
continue;
relationIdList = lappend_oid(relationIdList, relationId);
}
relationIdList = lappend_oid(relationIdList, relationId);
}
return relationIdList;
}
/*
* IsDistributedVacuumStmt returns true if there is any citus table in the relation id list;
* otherwise, it returns false.
*/
static bool
IsDistributedVacuumStmt(List *vacuumRelationIdList)
{
Oid relationId = InvalidOid;
foreach_oid(relationId, vacuumRelationIdList)
{
if (OidIsValid(relationId) && IsCitusTable(relationId))
{
return true;
}
}
return false;
}
/*
* ExecuteVacuumOnDistributedTables executes the vacuum for the shard placements of given tables
* if they are citus tables.
@ -183,53 +204,6 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList,
}
/*
* IsDistributedVacuumStmt returns whether distributed execution of a
* given VacuumStmt is supported. The provided relationId list represents
* the list of tables targeted by the provided statement.
*
* Returns true if the statement requires distributed execution and returns
* false otherwise.
*/
static bool
IsDistributedVacuumStmt(int vacuumOptions, List *VacuumCitusRelationIdList)
{
bool distributeStmt = false;
int distributedRelationCount = 0;
const char *stmtName = (vacuumOptions & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE";
Oid relationId = InvalidOid;
foreach_oid(relationId, VacuumCitusRelationIdList)
{
if (OidIsValid(relationId) && IsCitusTable(relationId))
{
distributedRelationCount++;
}
}
if (distributedRelationCount == 0)
{
/* nothing to do here */
}
else if (!EnableDDLPropagation)
{
/* WARN if DDL propagation is not enabled */
ereport(WARNING, (errmsg("not propagating %s command to worker nodes", stmtName),
errhint("Set citus.enable_ddl_propagation to true in order to "
"send targeted %s commands to worker nodes.",
stmtName)));
}
else
{
distributeStmt = true;
}
return distributeStmt;
}
/*
* VacuumTaskList returns a list of tasks to be executed as part of processing
* a VacuumStmt which targets a distributed relation.
@ -237,6 +211,9 @@ IsDistributedVacuumStmt(int vacuumOptions, List *VacuumCitusRelationIdList)
static List *
VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColumnList)
{
LOCKMODE lockMode = (vacuumParams.options & VACOPT_FULL) ? AccessExclusiveLock :
ShareUpdateExclusiveLock;
/* resulting task list */
List *taskList = NIL;
@ -255,8 +232,20 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum
* RowExclusiveLock. However if VACUUM FULL is used, we already obtain
* AccessExclusiveLock before reaching to that point and INSERT's will be
* blocked anyway. This is inline with PostgreSQL's own behaviour.
* Also note that if skip locked option is enabled, we try to acquire the lock
* in nonblocking way. If lock is not available, vacuum just skip that relation.
*/
LockRelationOid(relationId, ShareUpdateExclusiveLock);
if (!(vacuumParams.options & VACOPT_SKIP_LOCKED))
{
LockRelationOid(relationId, lockMode);
}
else
{
if (!ConditionalLockRelationOid(relationId, lockMode))
{
return NIL;
}
}
List *shardIntervalList = LoadShardIntervalList(relationId);
@ -391,10 +380,33 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams)
if (vacuumParams.index_cleanup != VACOPTVALUE_UNSPECIFIED)
{
appendStringInfoString(vacuumPrefix,
vacuumParams.index_cleanup == VACOPTVALUE_ENABLED ?
"INDEX_CLEANUP," : "INDEX_CLEANUP false,"
);
switch (vacuumParams.index_cleanup)
{
case VACOPTVALUE_ENABLED:
{
appendStringInfoString(vacuumPrefix, "INDEX_CLEANUP true,");
break;
}
case VACOPTVALUE_DISABLED:
{
appendStringInfoString(vacuumPrefix, "INDEX_CLEANUP false,");
break;
}
#if PG_VERSION_NUM >= PG_VERSION_14
case VACOPTVALUE_AUTO:
{
appendStringInfoString(vacuumPrefix, "INDEX_CLEANUP auto,");
break;
}
#endif
default:
{
break;
}
}
}
#if PG_VERSION_NUM >= PG_VERSION_13
@ -552,8 +564,32 @@ VacuumStmtParams(VacuumStmt *vacstmt)
#endif
else if (strcmp(opt->defname, "index_cleanup") == 0)
{
#if PG_VERSION_NUM >= PG_VERSION_14
/* Interpret no string as the default, which is 'auto' */
if (!opt->arg)
{
params.index_cleanup = VACOPTVALUE_AUTO;
}
else
{
char *sval = defGetString(opt);
/* Try matching on 'auto' string, or fall back on boolean */
if (pg_strcasecmp(sval, "auto") == 0)
{
params.index_cleanup = VACOPTVALUE_AUTO;
}
else
{
params.index_cleanup = defGetBoolean(opt) ? VACOPTVALUE_ENABLED :
VACOPTVALUE_DISABLED;
}
}
#else
params.index_cleanup = defGetBoolean(opt) ? VACOPTVALUE_ENABLED :
VACOPTVALUE_DISABLED;
#endif
}
else if (strcmp(opt->defname, "truncate") == 0)
{
@ -606,3 +642,62 @@ VacuumStmtParams(VacuumStmt *vacstmt)
(disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0);
return params;
}
/*
* ExecuteUnqualifiedVacuumTasks executes tasks for unqualified vacuum commands
*/
static void
ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams)
{
/* don't allow concurrent node list changes that require an exclusive lock */
List *workerNodes = TargetWorkerSetNodeList(ALL_SHARD_NODES, RowShareLock);
if (list_length(workerNodes) == 0)
{
return;
}
const char *vacuumStringPrefix = DeparseVacuumStmtPrefix(vacuumParams);
StringInfo vacuumCommand = makeStringInfo();
appendStringInfoString(vacuumCommand, vacuumStringPrefix);
List *unqualifiedVacuumCommands = list_make3(DISABLE_DDL_PROPAGATION,
vacuumCommand->data,
ENABLE_DDL_PROPAGATION);
Task *task = CitusMakeNode(Task);
task->jobId = INVALID_JOB_ID;
task->taskType = VACUUM_ANALYZE_TASK;
SetTaskQueryStringList(task, unqualifiedVacuumCommands);
task->dependentTaskList = NULL;
task->replicationModel = REPLICATION_MODEL_INVALID;
task->cannotBeExecutedInTransction = ((vacuumParams.options) & VACOPT_VACUUM);
bool hasPeerWorker = false;
int32 localNodeGroupId = GetLocalGroupId();
WorkerNode *workerNode = NULL;
foreach_ptr(workerNode, workerNodes)
{
if (workerNode->groupId != localNodeGroupId)
{
ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement);
targetPlacement->nodeName = workerNode->workerName;
targetPlacement->nodePort = workerNode->workerPort;
targetPlacement->groupId = workerNode->groupId;
task->taskPlacementList = lappend(task->taskPlacementList,
targetPlacement);
hasPeerWorker = true;
}
}
if (hasPeerWorker)
{
bool localExecution = false;
ExecuteUtilityTaskList(list_make1(task), localExecution);
}
}

View File

@ -377,6 +377,14 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
atttypmod);
appendStringInfoString(&buffer, attributeTypeName);
#if PG_VERSION_NUM >= PG_VERSION_14
if (CompressionMethodIsValid(attributeForm->attcompression))
{
appendStringInfo(&buffer, " COMPRESSION %s",
GetCompressionMethodName(attributeForm->attcompression));
}
#endif
/* if this column has a default value, append the default value */
if (attributeForm->atthasdef)
{
@ -448,14 +456,6 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults
appendStringInfoString(&buffer, " NOT NULL");
}
#if PG_VERSION_NUM >= PG_VERSION_14
if (CompressionMethodIsValid(attributeForm->attcompression))
{
appendStringInfo(&buffer, " COMPRESSION %s",
GetCompressionMethodName(attributeForm->attcompression));
}
#endif
if (attributeForm->attcollation != InvalidOid &&
attributeForm->attcollation != DEFAULT_COLLATION_OID)
{

View File

@ -77,6 +77,7 @@ LookupSplitMode(Oid shardSplitModeOid)
Datum enumLabelDatum = DirectFunctionCall1(enum_out, shardSplitModeOid);
char *enumLabel = DatumGetCString(enumLabelDatum);
/* Extend with other modes as we support them */
if (strncmp(enumLabel, "blocking", NAMEDATALEN) == 0)
{
shardSplitMode = BLOCKING_SPLIT;
@ -88,7 +89,8 @@ LookupSplitMode(Oid shardSplitModeOid)
/* Extend with other modes as we support them */
else
{
ereport(ERROR, (errmsg("Invalid label for enum: %s", enumLabel)));
ereport(ERROR, (errmsg("Invalid split mode: %s. Expected split mode is blocking.",
enumLabel)));
}
return shardSplitMode;

View File

@ -40,9 +40,11 @@ static void ErrorIfCannotSplitShardExtended(SplitOperation splitOperation,
ShardInterval *shardIntervalToSplit,
List *shardSplitPointsList,
List *nodeIdsForPlacementList);
static void CreateSplitShardsForShardGroup(WorkerNode *sourceShardNode,
List *sourceColocatedShardIntervalList,
List *shardGroupSplitIntervalListList,
static void CreateAndCopySplitShardsForShardGroup(WorkerNode *sourceShardNode,
List *sourceColocatedShardIntervalList,
List *shardGroupSplitIntervalListList,
List *workersForPlacementList);
static void CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList,
List *workersForPlacementList);
static void CreateSplitShardsForShardGroupTwo(WorkerNode *sourceShardNode,
List *sourceColocatedShardIntervalList,
@ -57,6 +59,8 @@ static void SplitShardReplicationSetup(List *sourceColocatedShardIntervalList,
WorkerNode *sourceWorkerNode,
List *workersForPlacementList);
static HTAB * CreateWorkerForPlacementSet(List *workersForPlacementList);
static void CreateAuxiliaryStructuresForShardGroup(List *shardGroupSplitIntervalListList,
List *workersForPlacementList);
static void CreateObjectOnPlacement(List *objectCreationCommandList,
WorkerNode *workerNode);
static List * CreateSplitIntervalsForShardGroup(List *sourceColocatedShardList,
@ -273,10 +277,13 @@ ErrorIfCannotSplitShardExtended(SplitOperation splitOperation,
{
int32 shardSplitPointValue = DatumGetInt32(shardSplitPoint);
/* All Split points should lie within the shard interval range. */
int splitPointShardIndex = FindShardIntervalIndex(shardSplitPoint,
cachedTableEntry);
if (shardIntervalToSplit->shardIndex != splitPointShardIndex)
/*
* 1) All Split points should lie within the shard interval range.
* 2) Given our split points inclusive, you cannot specify the max value in a range as a split point.
* Example: Shard 81060002 range is from (0,1073741823). '1073741823' as split point is invalid.
* '1073741822' is correct and will split shard to: (0, 1073741822) and (1073741823, 1073741823).
*/
if (shardSplitPointValue < minValue || shardSplitPointValue > maxValue)
{
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
@ -287,6 +294,16 @@ ErrorIfCannotSplitShardExtended(SplitOperation splitOperation,
DatumGetInt32(shardIntervalToSplit->maxValue),
shardIntervalToSplit->shardId)));
}
else if (maxValue == shardSplitPointValue)
{
int32 validSplitPoint = shardIntervalToSplit->maxValue - 1;
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg(
"Invalid split point %d, as split points should be inclusive. Please use %d instead.",
maxValue,
validSplitPoint)));
}
/* Split points should be in strictly increasing order */
int32 lastShardSplitPointValue = DatumGetInt32(lastShardSplitPoint.value);
@ -302,22 +319,6 @@ ErrorIfCannotSplitShardExtended(SplitOperation splitOperation,
shardSplitPointValue)));
}
/*
* Given our split points inclusive, you cannot specify the max value in a range as a split point.
* Example: Shard 81060002 range is from (0,1073741823). '1073741823' as split point is invalid.
* '1073741822' is correct and will split shard to: (0, 1073741822) and (1073741823, 1073741823).
*/
if (maxValue == shardSplitPointValue)
{
int32 validSplitPoint = shardIntervalToSplit->maxValue - 1;
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg(
"Invalid split point %d, as split points should be inclusive. Please use %d instead.",
maxValue,
validSplitPoint)));
}
lastShardSplitPoint = (NullableDatum) {
shardSplitPoint, false
};
@ -446,11 +447,11 @@ BlockingShardSplit(SplitOperation splitOperation,
PG_TRY();
{
/*
* Physically create split children, perform split copy and create auxillary structures.
* Physically create split children, perform split copy and create auxiliary structures.
* This includes: indexes, replicaIdentity. triggers and statistics.
* Foreign key constraints are created after Metadata changes (see CreateForeignKeyConstraints).
*/
CreateSplitShardsForShardGroup(
CreateAndCopySplitShardsForShardGroup(
sourceShardToCopyNode,
sourceColocatedShardIntervalList,
shardGroupSplitIntervalListList,
@ -491,9 +492,7 @@ BlockingShardSplit(SplitOperation splitOperation,
/* Create ShardGroup split children on a list of corresponding workers. */
static void
CreateSplitShardsForShardGroup(WorkerNode *sourceShardNode,
List *sourceColocatedShardIntervalList,
List *shardGroupSplitIntervalListList,
CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList,
List *workersForPlacementList)
{
/* Iterate on shard interval list for shard group */
@ -519,14 +518,20 @@ CreateSplitShardsForShardGroup(WorkerNode *sourceShardNode,
CreateObjectOnPlacement(splitShardCreationCommandList, workerPlacementNode);
}
}
}
/* Perform Split Copy */
DoSplitCopy(sourceShardNode, sourceColocatedShardIntervalList,
shardGroupSplitIntervalListList, workersForPlacementList);
/* Create ShardGroup auxiliary structures (indexes, stats, replicaindentities, triggers)
* on a list of corresponding workers.
*/
static void
CreateAuxiliaryStructuresForShardGroup(List *shardGroupSplitIntervalListList,
List *workersForPlacementList)
{
/*
* Create auxillary structures post copy.
* Create auxiliary structures post copy.
*/
List *shardIntervalList = NULL;
foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList)
{
/* Iterate on split shard interval list and corresponding placement worker */
@ -549,6 +554,28 @@ CreateSplitShardsForShardGroup(WorkerNode *sourceShardNode,
}
/*
* Create ShardGroup split children, perform copy and create auxiliary structures
* on a list of corresponding workers.
*/
static void
CreateAndCopySplitShardsForShardGroup(WorkerNode *sourceShardNode,
List *sourceColocatedShardIntervalList,
List *shardGroupSplitIntervalListList,
List *workersForPlacementList)
{
CreateSplitShardsForShardGroup(shardGroupSplitIntervalListList,
workersForPlacementList);
DoSplitCopy(sourceShardNode, sourceColocatedShardIntervalList,
shardGroupSplitIntervalListList, workersForPlacementList);
/* Create auxiliary structures (indexes, stats, replicaindentities, triggers) */
CreateAuxiliaryStructuresForShardGroup(shardGroupSplitIntervalListList,
workersForPlacementList);
}
/*
* Perform Split Copy from source shard(s) to split children.
* 'sourceShardNode' : Source shard worker node.
@ -648,7 +675,7 @@ CreateSplitCopyCommand(ShardInterval *sourceShardSplitInterval,
appendStringInfo(splitCopyInfoArray, "]");
StringInfo splitCopyUdf = makeStringInfo();
appendStringInfo(splitCopyUdf, "SELECT worker_split_copy(%lu, %s);",
appendStringInfo(splitCopyUdf, "SELECT pg_catalog.worker_split_copy(%lu, %s);",
sourceShardSplitInterval->shardId,
splitCopyInfoArray->data);
@ -673,6 +700,14 @@ CreateObjectOnPlacement(List *objectCreationCommandList,
/*
* Create split children intervals for a shardgroup given list of split points.
* Example:
* 'sourceColocatedShardIntervalList': Colocated shard S1[-2147483648, 2147483647] & S2[-2147483648, 2147483647]
* 'splitPointsForShard': [0] (2 way split)
* 'shardGroupSplitIntervalListList':
* [
* [ S1_1(-2147483648, 0), S1_2(1, 2147483647) ], // Split Interval List for S1.
* [ S2_1(-2147483648, 0), S2_2(1, 2147483647) ] // Split Interval List for S2.
* ]
*/
static List *
CreateSplitIntervalsForShardGroup(List *sourceColocatedShardIntervalList,
@ -774,10 +809,10 @@ InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList,
0, /* shard length (zero for HashDistributed Table) */
workerPlacementNode->groupId);
if (ShouldSyncTableMetadata(shardInterval->relationId))
{
syncedShardList = lappend(syncedShardList, shardInterval);
}
if (ShouldSyncTableMetadata(shardInterval->relationId))
{
syncedShardList = lappend(syncedShardList, shardInterval);
}
}
}
@ -817,15 +852,20 @@ CreateForeignKeyConstraints(List *shardGroupSplitIntervalListList,
&
referenceTableForeignConstraintList);
List *commandList = NIL;
commandList = list_concat(commandList, shardForeignConstraintCommandList);
commandList = list_concat(commandList, referenceTableForeignConstraintList);
List *constraintCommandList = NIL;
constraintCommandList = list_concat(constraintCommandList,
shardForeignConstraintCommandList);
constraintCommandList = list_concat(constraintCommandList,
referenceTableForeignConstraintList);
SendCommandListToWorkerOutsideTransaction(
workerPlacementNode->workerName,
workerPlacementNode->workerPort,
TableOwner(shardInterval->relationId),
commandList);
char *constraintCommand = NULL;
foreach_ptr(constraintCommand, constraintCommandList)
{
SendCommandToWorker(
workerPlacementNode->workerName,
workerPlacementNode->workerPort,
constraintCommand);
}
}
}
}

View File

@ -74,7 +74,6 @@ static bool CanUseLocalCopy(uint64 destinationNodeId);
static StringInfo ConstructCopyStatement(List *destinationShardFullyQualifiedName, bool
useBinaryFormat);
static void WriteLocalTuple(TupleTableSlot *slot, ShardCopyDestReceiver *copyDest);
static bool ShouldSendCopyNow(StringInfo buffer);
static int ReadFromLocalBufferCallback(void *outBuf, int minRead, int maxRead);
static void LocalCopyToShard(ShardCopyDestReceiver *copyDest, CopyOutState
localCopyOutState);
@ -88,18 +87,6 @@ CanUseLocalCopy(uint64 destinationNodeId)
}
/*
* ShouldSendCopyNow returns true if the given buffer size exceeds the
* local copy buffer size threshold.
*/
static bool
ShouldSendCopyNow(StringInfo buffer)
{
/* LocalCopyFlushThreshold is in bytes */
return buffer->len > LocalCopyFlushThresholdByte;
}
/* Connect to node with source shard and trigger copy start. */
static void
ConnectToRemoteAndStartCopy(ShardCopyDestReceiver *copyDest)
@ -197,7 +184,7 @@ ShardCopyDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest)
if (copyDest->useLocalCopy)
{
WriteLocalTuple(slot, copyDest);
if (ShouldSendCopyNow(copyOutState->fe_msgbuf))
if (copyOutState->fe_msgbuf->len > LocalCopyFlushThresholdByte)
{
LocalCopyToShard(copyDest, copyOutState);
}

View File

@ -81,13 +81,18 @@ worker_split_copy(PG_FUNCTION_ARGS)
shardIntervalToSplitCopy,
splitCopyInfoList);
char *sourceShardToCopyName = generate_qualified_relation_name(
Oid sourceShardToCopySchemaOId = get_rel_namespace(
shardIntervalToSplitCopy->relationId);
char *sourceShardToCopySchemaName = get_namespace_name(sourceShardToCopySchemaOId);
char *sourceShardToCopyName = get_rel_name(shardIntervalToSplitCopy->relationId);
AppendShardIdToName(&sourceShardToCopyName, shardIdToSplitCopy);
char *sourceShardToCopyQualifiedName = quote_qualified_identifier(
sourceShardToCopySchemaName,
sourceShardToCopyName);
StringInfo selectShardQueryForCopy = makeStringInfo();
appendStringInfo(selectShardQueryForCopy,
"SELECT * FROM %s;", sourceShardToCopyName);
"SELECT * FROM %s;", sourceShardToCopyQualifiedName);
ParamListInfo params = NULL;
ExecuteQueryStringIntoDestReceiver(selectShardQueryForCopy->data, params,
@ -197,9 +202,9 @@ CreateShardCopyDestReceivers(EState *estate, ShardInterval *shardIntervalToSplit
char *sourceShardNamePrefix = get_rel_name(shardIntervalToSplitCopy->relationId);
foreach_ptr(splitCopyInfo, splitCopyInfoList)
{
char *destinationShardSchemaName = get_namespace_name(get_rel_namespace(
shardIntervalToSplitCopy
->relationId));
Oid destinationShardSchemaOid = get_rel_namespace(
shardIntervalToSplitCopy->relationId);
char *destinationShardSchemaName = get_namespace_name(destinationShardSchemaOid);
char *destinationShardNameCopy = pstrdup(sourceShardNamePrefix);
AppendShardIdToName(&destinationShardNameCopy, splitCopyInfo->destinationShardId);

View File

@ -1462,6 +1462,17 @@ RegisterCitusConfigVariables(void)
GUC_UNIT_KB | GUC_STANDARD,
NULL, NULL, NULL);
DefineCustomIntVariable(
"citus.max_matview_size_to_auto_recreate",
gettext_noop("Sets the maximum size of materialized views in MB to "
"automatically distribute them."),
NULL,
&MaxMatViewSizeToAutoRecreate,
1024, -1, INT_MAX,
PGC_USERSET,
GUC_UNIT_MB | GUC_STANDARD,
NULL, NULL, NULL);
DefineCustomIntVariable(
"citus.max_rebalancer_logged_ignored_moves",
gettext_noop("Sets the maximum number of ignored moves the rebalance logs"),

View File

@ -0,0 +1 @@
#include "udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql"

View File

@ -7,7 +7,12 @@ DROP FUNCTION pg_catalog.worker_merge_files_into_table(bigint, integer, text[],
DROP FUNCTION pg_catalog.worker_range_partition_table(bigint, integer, text, text, oid, anyarray);
DROP FUNCTION pg_catalog.worker_repartition_cleanup(bigint);
#include "../../columnar/sql/columnar--11.0-2--11.1-1.sql"
#include "udfs/citus_split_shard_by_split_points/11.0-2.sql"
#include "udfs/worker_split_copy/11.0-2.sql"
#include "../../columnar/sql/columnar--11.0-3--11.1-1.sql"
DROP FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4,
OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz,
OUT global_pid int8);
#include "udfs/get_all_active_transactions/11.1-1.sql"
#include "udfs/citus_split_shard_by_split_points/11.1-1.sql"
#include "udfs/worker_split_copy/11.1-1.sql"
#include "udfs/worker_split_shard_replication_setup/11.0-2.sql"

View File

@ -1,5 +1,6 @@
#include "../udfs/citus_shards_on_worker/11.0-1.sql"
#include "../udfs/citus_shard_indexes_on_worker/11.0-1.sql"
#include "../udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql"
DROP FUNCTION pg_catalog.citus_disable_node(text, integer, bool);
CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool default false)
@ -15,5 +16,4 @@ DROP FUNCTION pg_catalog.citus_is_coordinator();
DROP FUNCTION pg_catalog.run_command_on_coordinator(text,boolean);
DROP FUNCTION pg_catalog.start_metadata_sync_to_all_nodes();
DROP FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(boolean);
DROP PROCEDURE pg_catalog.citus_finish_citus_upgrade();

View File

@ -0,0 +1 @@
#include "../udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql"

View File

@ -46,8 +46,6 @@ CREATE FUNCTION pg_catalog.worker_repartition_cleanup(bigint)
STRICT
AS 'MODULE_PATHNAME', $function$worker_repartition_cleanup$function$;
DROP TYPE IF EXISTS citus.split_mode;
DROP TYPE IF EXISTS citus.split_copy_info;
DROP FUNCTION pg_catalog.citus_split_shard_by_split_points(
shard_id bigint,
split_points text[],
@ -56,5 +54,12 @@ DROP FUNCTION pg_catalog.citus_split_shard_by_split_points(
DROP FUNCTION pg_catalog.worker_split_copy(
source_shard_id bigint,
splitCopyInfos citus.split_copy_info[]);
DROP TYPE citus.split_mode;
DROP TYPE citus.split_copy_info;
#include "../../../columnar/sql/downgrades/columnar--11.1-1--11.0-2.sql"
#include "../../../columnar/sql/downgrades/columnar--11.1-1--11.0-3.sql"
DROP FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4,
OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz,
OUT global_pid int8);
#include "../udfs/get_all_active_transactions/11.0-1.sql"

View File

@ -0,0 +1,224 @@
-- citus_finalize_upgrade_to_citus11() is a helper UDF ensures
-- the upgrade to Citus 11 is finished successfully. Upgrade to
-- Citus 11 requires all active primary worker nodes to get the
-- metadata. And, this function's job is to sync the metadata to
-- the nodes that does not already have
-- once the function finishes without any errors and returns true
-- the cluster is ready for running distributed queries from
-- the worker nodes. When debug is enabled, the function provides
-- more information to the user.
CREATE OR REPLACE FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(enforce_version_check bool default true)
RETURNS bool
LANGUAGE plpgsql
AS $$
BEGIN
---------------------------------------------
-- This script consists of N stages
-- Each step is documented, and if log level
-- is reduced to DEBUG1, each step is logged
-- as well
---------------------------------------------
------------------------------------------------------------------------------------------
-- STAGE 0: Ensure no concurrent node metadata changing operation happens while this
-- script is running via acquiring a strong lock on the pg_dist_node
------------------------------------------------------------------------------------------
BEGIN
LOCK TABLE pg_dist_node IN EXCLUSIVE MODE NOWAIT;
EXCEPTION WHEN OTHERS THEN
RAISE 'Another node metadata changing operation is in progress, try again.';
END;
------------------------------------------------------------------------------------------
-- STAGE 1: We want all the commands to run in the same transaction block. Without
-- sequential mode, metadata syncing cannot be done in a transaction block along with
-- other commands
------------------------------------------------------------------------------------------
SET LOCAL citus.multi_shard_modify_mode TO 'sequential';
------------------------------------------------------------------------------------------
-- STAGE 2: Ensure we have the prerequisites
-- (a) only superuser can run this script
-- (b) cannot be executed when enable_ddl_propagation is False
-- (c) can only be executed from the coordinator
------------------------------------------------------------------------------------------
DECLARE
is_superuser_running boolean := False;
enable_ddl_prop boolean:= False;
local_group_id int := 0;
BEGIN
SELECT rolsuper INTO is_superuser_running FROM pg_roles WHERE rolname = current_user;
IF is_superuser_running IS NOT True THEN
RAISE EXCEPTION 'This operation can only be initiated by superuser';
END IF;
SELECT current_setting('citus.enable_ddl_propagation') INTO enable_ddl_prop;
IF enable_ddl_prop IS NOT True THEN
RAISE EXCEPTION 'This operation cannot be completed when citus.enable_ddl_propagation is False.';
END IF;
SELECT groupid INTO local_group_id FROM pg_dist_local_group;
IF local_group_id != 0 THEN
RAISE EXCEPTION 'Operation is not allowed on this node. Connect to the coordinator and run it again.';
ELSE
RAISE DEBUG 'We are on the coordinator, continue to sync metadata';
END IF;
END;
------------------------------------------------------------------------------------------
-- STAGE 3: Ensure all primary nodes are active
------------------------------------------------------------------------------------------
DECLARE
primary_disabled_worker_node_count int := 0;
BEGIN
SELECT count(*) INTO primary_disabled_worker_node_count FROM pg_dist_node
WHERE groupid != 0 AND noderole = 'primary' AND NOT isactive;
IF primary_disabled_worker_node_count != 0 THEN
RAISE EXCEPTION 'There are inactive primary worker nodes, you need to activate the nodes first.'
'Use SELECT citus_activate_node() to activate the disabled nodes';
ELSE
RAISE DEBUG 'There are no disabled worker nodes, continue to sync metadata';
END IF;
END;
------------------------------------------------------------------------------------------
-- STAGE 4: Ensure there is no connectivity issues in the cluster
------------------------------------------------------------------------------------------
DECLARE
all_nodes_can_connect_to_each_other boolean := False;
BEGIN
SELECT bool_and(coalesce(result, false)) INTO all_nodes_can_connect_to_each_other FROM citus_check_cluster_node_health();
IF all_nodes_can_connect_to_each_other != True THEN
RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all '
'nodes are up and runnnig. Also, make sure that all nodes can connect '
'to each other. Use SELECT * FROM citus_check_cluster_node_health(); '
'to check the cluster health';
ELSE
RAISE DEBUG 'Cluster is healthy, all nodes can connect to each other';
END IF;
END;
------------------------------------------------------------------------------------------
-- STAGE 5: Ensure all nodes are on the same version
------------------------------------------------------------------------------------------
DECLARE
coordinator_version text := '';
worker_node_version text := '';
worker_node_version_count int := 0;
BEGIN
SELECT extversion INTO coordinator_version from pg_extension WHERE extname = 'citus';
-- first, check if all nodes have the same versions
SELECT
count(distinct result) INTO worker_node_version_count
FROM
run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus''');
IF enforce_version_check AND worker_node_version_count = 0 THEN
RAISE DEBUG 'There are no worker nodes';
ELSIF enforce_version_check AND worker_node_version_count != 1 THEN
RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently '
'some of the workers have different versions.';
ELSE
RAISE DEBUG 'All worker nodes have the same Citus version';
END IF;
-- second, check if all nodes have the same versions
SELECT
result INTO worker_node_version
FROM
run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'';')
GROUP BY result;
IF enforce_version_check AND coordinator_version != worker_node_version THEN
RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently '
'the coordinator has version % and the worker(s) has %',
coordinator_version, worker_node_version;
ELSE
RAISE DEBUG 'All nodes have the same Citus version';
END IF;
END;
------------------------------------------------------------------------------------------
-- STAGE 6: Ensure all the partitioned tables have the proper naming structure
-- As described on https://github.com/citusdata/citus/issues/4962
-- existing indexes on partitioned distributed tables can collide
-- with the index names exists on the shards
-- luckily, we know how to fix it.
-- And, note that we should do this even if the cluster is a basic plan
-- (e.g., single node Citus) such that when cluster scaled out, everything
-- works as intended
-- And, this should be done only ONCE for a cluster as it can be a pretty
-- time consuming operation. Thus, even if the function is called multiple time,
-- we keep track of it and do not re-execute this part if not needed.
------------------------------------------------------------------------------------------
DECLARE
partitioned_table_exists_pre_11 boolean:=False;
BEGIN
-- we recorded if partitioned tables exists during upgrade to Citus 11
SELECT metadata->>'partitioned_citus_table_exists_pre_11' INTO partitioned_table_exists_pre_11
FROM pg_dist_node_metadata;
IF partitioned_table_exists_pre_11 IS NOT NULL AND partitioned_table_exists_pre_11 THEN
-- this might take long depending on the number of partitions and shards...
RAISE NOTICE 'Preparing all the existing partitioned table indexes';
PERFORM pg_catalog.fix_all_partition_shard_index_names();
-- great, we are done with fixing the existing wrong index names
-- so, lets remove this
UPDATE pg_dist_node_metadata
SET metadata=jsonb_delete(metadata, 'partitioned_citus_table_exists_pre_11');
ELSE
RAISE DEBUG 'There are no partitioned tables that should be fixed';
END IF;
END;
------------------------------------------------------------------------------------------
-- STAGE 7: Return early if there are no primary worker nodes
-- We don't strictly need this step, but it gives a nicer notice message
------------------------------------------------------------------------------------------
DECLARE
primary_worker_node_count bigint :=0;
BEGIN
SELECT count(*) INTO primary_worker_node_count FROM pg_dist_node WHERE groupid != 0 AND noderole = 'primary';
IF primary_worker_node_count = 0 THEN
RAISE NOTICE 'There are no primary worker nodes, no need to sync metadata to any node';
RETURN true;
ELSE
RAISE DEBUG 'There are % primary worker nodes, continue to sync metadata', primary_worker_node_count;
END IF;
END;
------------------------------------------------------------------------------------------
-- STAGE 8: Do the actual metadata & object syncing to the worker nodes
-- For the "already synced" metadata nodes, we do not strictly need to
-- sync the objects & metadata, but there is no harm to do it anyway
-- it'll only cost some execution time but makes sure that we have a
-- a consistent metadata & objects across all the nodes
------------------------------------------------------------------------------------------
DECLARE
BEGIN
-- this might take long depending on the number of tables & objects ...
RAISE NOTICE 'Preparing to sync the metadata to all nodes';
PERFORM start_metadata_sync_to_all_nodes();
END;
RETURN true;
END;
$$;
COMMENT ON FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(bool)
IS 'finalizes upgrade to Citus';
REVOKE ALL ON FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(bool) FROM PUBLIC;

View File

@ -120,7 +120,10 @@ END;
count(distinct result) INTO worker_node_version_count
FROM
run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus''');
IF enforce_version_check AND worker_node_version_count != 1 THEN
IF enforce_version_check AND worker_node_version_count = 0 THEN
RAISE DEBUG 'There are no worker nodes';
ELSIF enforce_version_check AND worker_node_version_count != 1 THEN
RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently '
'some of the workers have different versions.';
ELSE

View File

@ -0,0 +1,12 @@
DROP FUNCTION IF EXISTS pg_catalog.get_all_active_transactions();
CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4,
OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz,
OUT global_pid int8)
RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$get_all_active_transactions$$;
COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4,
OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz,
OUT global_pid int8)
IS 'returns transaction information for all Citus initiated transactions';

View File

@ -6,7 +6,7 @@ RETURNS SETOF RECORD
LANGUAGE C STRICT AS 'MODULE_PATHNAME',
$$get_all_active_transactions$$;
COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4,
COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4,
OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz,
OUT global_pid int8)
IS 'returns transaction information for all Citus initiated transactions';

View File

@ -506,6 +506,11 @@ CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId, Oid partitionRel
}
Relation parentRelation = RelationIdGetRelation(parentRelationId);
if (!RelationIsValid(parentRelation))
{
ereport(ERROR, (errmsg("could not open relation with OID %u", parentRelationId)));
}
List *parentIndexIdList = NIL;
if (parentIndexOid != InvalidOid)

View File

@ -29,6 +29,7 @@ extern bool EnableLocalReferenceForeignKeys;
extern bool EnableUnsafeTriggers;
extern int MaxMatViewSizeToAutoRecreate;
extern void SwitchToSequentialAndLocalExecutionIfRelationNameTooLong(Oid relationId,
char *
@ -553,7 +554,7 @@ extern void UpdateFunctionDistributionInfo(const ObjectAddress *distAddress,
bool *forceDelegation);
/* vacuum.c - forward declarations */
extern void PostprocessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand);
extern List * PostprocessVacuumStmt(Node *node, const char *vacuumCommand);
/* view.c - forward declarations */
extern List * PreprocessViewStmt(Node *node, const char *queryString,
@ -642,7 +643,7 @@ extern bool RelationIdListHasReferenceTable(List *relationIdList);
extern List * GetFKeyCreationCommandsForRelationIdList(List *relationIdList);
extern void DropRelationForeignKeys(Oid relationId, int flags);
extern void SetLocalEnableLocalReferenceForeignKeys(bool state);
extern void ExecuteAndLogUtilityCommandListInTableTypeConversion(
extern void ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(
List *utilityCommandList);
extern void ExecuteAndLogUtilityCommandList(List *ddlCommandList);
extern void ExecuteAndLogUtilityCommand(const char *commandString);

View File

@ -305,6 +305,7 @@ extern bool GetNodeDiskSpaceStatsForConnection(MultiConnection *connection,
uint64 *availableBytes,
uint64 *totalBytes);
extern void ExecuteQueryViaSPI(char *query, int SPIOK);
extern void ExecuteAndLogQueryViaSPI(char *query, int SPIOK, int logLevel);
extern void EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid
ownerRelationId);
extern void AlterSequenceType(Oid seqOid, Oid typeOid);

View File

@ -1,4 +1,5 @@
# ----------
# isolation setup steps
# ----------
test: isolation_setup
test: isolation_cluster_management

View File

@ -1,3 +1,5 @@
test: isolation_setup
# tests that change node metadata should precede
# isolation_cluster_management such that tests
# that come later can be parallelized

View File

@ -11,3 +11,5 @@ test: isolation_ref2ref_foreign_keys_enterprise
test: isolation_pg_send_cancellation
test: isolation_shard_move_vs_start_metadata_sync
test: isolation_tenant_isolation
test: isolation_blocking_shard_split
test: isolation_blocking_shard_split_with_fkey_to_reference

View File

@ -522,7 +522,7 @@ SELECT table_name, citus_table_type, distribution_column, shard_count, access_me
(2 rows)
SELECT c.relname, a.amname FROM pg_class c, pg_am a where c.relname SIMILAR TO 'table_type\D*' AND c.relnamespace = 'alter_table_set_access_method'::regnamespace AND c.relam = a.oid;
relname | amname
relname | amname
---------------------------------------------------------------------
table_type_citus_local | columnar
table_type_dist | columnar

View File

@ -876,7 +876,29 @@ CREATE TABLE loc_tb (a int );
CREATE VIEW v100 AS SELECT * FROM loc_tb;
CREATE VIEW v101 AS SELECT * FROM loc_tb JOIN ref_tb USING (a);
CREATE VIEW v102 AS SELECT * FROM v101;
-- a regular matview that depends on local table
CREATE MATERIALIZED VIEW matview_101 AS SELECT * from loc_tb;
-- a matview and a view that depend on the local table + each other
CREATE VIEW v103 AS SELECT * from loc_tb;
CREATE MATERIALIZED VIEW matview_102 AS SELECT * from loc_tb JOIN v103 USING (a);
CREATE OR REPLACE VIEW v103 AS SELECT * from loc_tb JOIN matview_102 USING (a);
SET client_min_messages TO DEBUG1;
-- auto undistribute
ALTER TABLE loc_tb ADD CONSTRAINT fkey FOREIGN KEY (a) references ref_tb(a);
DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v100 (a) AS SELECT loc_tb.a
FROM citus_local_tables_mx.loc_tb; ALTER VIEW citus_local_tables_mx.v100 OWNER TO postgres"
DEBUG: "view v100" has dependency to "table loc_tb" that is not in Citus' metadata
DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v101 (a) AS SELECT loc_tb.a
FROM (citus_local_tables_mx.loc_tb
JOIN citus_local_tables_mx.ref_tb USING (a)); ALTER VIEW citus_local_tables_mx.v101 OWNER TO postgres"
DEBUG: "view v101" has dependency to "table loc_tb" that is not in Citus' metadata
DEBUG: executing "CREATE MATERIALIZED VIEW citus_local_tables_mx.matview_101 USING heap AS SELECT loc_tb.a
FROM citus_local_tables_mx.loc_tb;ALTER MATERIALIZED VIEW citus_local_tables_mx.matview_101 OWNER TO postgres"
DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v102 (a) AS SELECT v101.a
FROM citus_local_tables_mx.v101; ALTER VIEW citus_local_tables_mx.v102 OWNER TO postgres"
DEBUG: "view v102" has dependency to "table loc_tb" that is not in Citus' metadata
DEBUG: validating foreign key constraint "fkey_xxxxxxx"
SET client_min_messages TO WARNING;
-- works fine
select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100, citus_local_tables_mx.v101, citus_local_tables_mx.v102$$);
run_command_on_workers
@ -908,6 +930,152 @@ select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100$
(localhost,57638,f,"ERROR: relation ""citus_local_tables_mx.v102"" does not exist")
(2 rows)
INSERT INTO loc_tb VALUES (1), (2);
-- test a matview with columnar
CREATE MATERIALIZED VIEW matview_columnar USING COLUMNAR AS SELECT * FROM loc_tb WITH DATA;
-- cant recreate matviews, because the size limit is set to zero, by the GUC
SET citus.max_matview_size_to_auto_recreate TO 0;
SELECT citus_add_local_table_to_metadata('loc_tb', true);
ERROR: size of the materialized view matview_columnar exceeds citus.max_matview_size_to_auto_recreate (currently 0 MB)
-- remove the limit
SET citus.max_matview_size_to_auto_recreate TO -1;
SELECT citus_add_local_table_to_metadata('loc_tb', true);
citus_add_local_table_to_metadata
---------------------------------------------------------------------
(1 row)
-- test REFRESH MAT VIEW
SELECT * FROM matview_101 ORDER BY a;
a
---------------------------------------------------------------------
(0 rows)
REFRESH MATERIALIZED VIEW matview_101;
SELECT * FROM matview_101 ORDER BY a;
a
---------------------------------------------------------------------
1
2
(2 rows)
-- verify columnar matview works on a table added to metadata
SELECT * FROM matview_columnar;
a
---------------------------------------------------------------------
(0 rows)
REFRESH MATERIALIZED VIEW matview_columnar;
SELECT * FROM matview_columnar ORDER BY a;
a
---------------------------------------------------------------------
1
2
(2 rows)
-- test with partitioned tables
SET citus.use_citus_managed_tables TO ON;
CREATE TABLE parent_1 (a INT UNIQUE) PARTITION BY RANGE(a);
SET citus.use_citus_managed_tables TO OFF;
CREATE MATERIALIZED VIEW part_matview1 as SELECT count(*) FROM parent_1 JOIN parent_1 p2 ON (true);
CREATE MATERIALIZED VIEW part_matview2 as SELECT count(*) FROM parent_1 JOIN part_matview1 on (true);
SELECT count(*) FROM citus_local_tables_mx.part_matview1 JOIN citus_local_tables_mx.part_matview2 ON (true);
count
---------------------------------------------------------------------
1
(1 row)
CREATE TABLE parent_1_child_1 (a int);
CREATE TABLE parent_1_child_2 (a int);
-- create matviews on partition tables
CREATE MATERIALIZED VIEW mv1 AS SELECT * FROM parent_1_child_1;
CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM parent_1_child_2;
CREATE MATERIALIZED VIEW mv3 AS SELECT parent_1_child_2.* FROM parent_1_child_2 JOIN parent_1_child_1 USING(a);
CREATE MATERIALIZED VIEW mv4 AS SELECT * FROM mv3;
alter table parent_1 attach partition parent_1_child_1 FOR VALUES FROM (0) TO (10) ;
-- all matviews work
SELECT count(*) FROM citus_local_tables_mx.mv1;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM citus_local_tables_mx.mv2;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM citus_local_tables_mx.mv3;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM citus_local_tables_mx.mv4;
count
---------------------------------------------------------------------
0
(1 row)
-- recreate matviews and verify they still work
alter table parent_1 attach partition parent_1_child_2 FOR VALUES FROM (10) TO (20);
SELECT count(*) FROM citus_local_tables_mx.mv1;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM citus_local_tables_mx.mv2;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM citus_local_tables_mx.mv3;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM citus_local_tables_mx.mv4;
count
---------------------------------------------------------------------
0
(1 row)
-- verify matviews work after undistributing
SELECT undistribute_table('parent_1');
undistribute_table
---------------------------------------------------------------------
(1 row)
SELECT count(*) FROM citus_local_tables_mx.mv1;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM citus_local_tables_mx.mv2;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM citus_local_tables_mx.mv3;
count
---------------------------------------------------------------------
0
(1 row)
SELECT count(*) FROM citus_local_tables_mx.mv4;
count
---------------------------------------------------------------------
0
(1 row)
-- todo: add more matview tests once 5968 and 6028 are fixed
-- cleanup at exit
set client_min_messages to error;
DROP SCHEMA citus_local_tables_mx CASCADE;

View File

@ -1,133 +1,456 @@
-- Split Shards by Split Points tests.
-- Setup for Test.
CREATE SCHEMA citus_split_shard_by_split_points;
SET search_path TO citus_split_shard_by_split_points;
SET citus.shard_count TO 1;
/*
Citus Shard Split Test.The test is model similar to 'shard_move_constraints'.
Here is a high level overview of test plan:
1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table.
2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors.
3. Create Foreign key constraints between the two co-located distributed tables.
4. Load data into the three tables.
5. Move one of the shards for 'sensors' to test ShardMove -> Split.
6. Trigger Split on both shards of 'sensors'. This will also split co-located tables.
7. Move one of the split shard to test Split -> ShardMove.
8. Split an already split shard second time on a different schema.
*/
CREATE SCHEMA "citus_split_test_schema";
SET search_path TO "citus_split_test_schema";
SET citus.next_shard_id TO 8981000;
SET citus.next_placement_id TO 8610000;
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 82060000;
-- Create distributed table 'lineitem_streaming'
CREATE TABLE lineitem_streaming (
l_orderkey bigint not null,
l_partkey integer not null,
l_suppkey integer not null,
l_linenumber integer not null,
l_quantity decimal(15, 2) not null,
l_extendedprice decimal(15, 2) not null,
l_discount decimal(15, 2) not null,
l_tax decimal(15, 2) not null,
l_returnflag char(1) not null,
l_linestatus char(1) not null,
l_shipdate date not null,
l_commitdate date not null,
l_receiptdate date not null,
l_shipinstruct char(25) not null,
l_shipmode char(10) not null,
l_comment varchar(44) not null);
SELECT create_distributed_table('lineitem_streaming', 'l_orderkey');
-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc.
CREATE TABLE sensors(
measureid integer,
eventdatetime date,
measure_data jsonb,
meaure_quantity decimal(15, 2),
measure_status char(1),
measure_comment varchar(44),
PRIMARY KEY (measureid, eventdatetime, measure_data));
CREATE INDEX index_on_sensors ON sensors(lower(measureid::text));
ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000;
CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed'));
CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status);
CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors;
SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- Create distributed table 'orders_streaming'
CREATE TABLE orders_streaming (
o_orderkey bigint not null primary key,
o_custkey integer not null,
o_orderstatus char(1) not null,
o_totalprice decimal(15,2) not null,
o_orderdate date not null,
o_orderpriority char(15) not null,
o_clerk char(15) not null,
o_shippriority integer not null,
o_comment varchar(79) not null);
SELECT create_distributed_table('orders_streaming', 'o_orderkey');
-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc.
-- BEGIN: Create co-located distributed and reference tables.
CREATE TABLE reference_table (measureid integer PRIMARY KEY);
SELECT create_reference_table('reference_table');
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY);
CLUSTER colocated_dist_table USING colocated_dist_table_pkey;
SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- Insert data into 'lineitem_streaming'
\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|'
-- Insert data into 'orders_streaming'
\COPY orders_streaming FROM STDIN WITH DELIMITER '|'
-- Initial Row Count in Shard
SELECT COUNT(*) FROM orders_streaming;
count
CREATE TABLE table_with_index_rep_identity(key int NOT NULL);
CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key);
ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx;
CLUSTER table_with_index_rep_identity USING uqx;
SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors');
create_distributed_table
---------------------------------------------------------------------
7
(1 row)
SELECT COUNT(*) FROM lineitem_streaming;
-- END: Create co-located distributed and reference tables.
-- BEGIN : Create Foreign key constraints.
ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid);
-- END : Create Foreign key constraints.
-- BEGIN : Load data into tables.
INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i;
INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i;
INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i;
SELECT COUNT(*) FROM sensors;
count
---------------------------------------------------------------------
22
1001
(1 row)
-- Create Foreign constraint between two shards.
ALTER TABLE lineitem_streaming ADD CONSTRAINT test_constraint
FOREIGN KEY(l_orderkey) REFERENCES orders_streaming(o_orderkey);
-- Before Split, List shard and placement data.
SELECT shard.shardid, logicalrelid, shardstorage, shardminvalue, shardmaxvalue nodename, nodeport, placementid
SELECT COUNT(*) FROM reference_table;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM colocated_dist_table;
count
---------------------------------------------------------------------
1001
(1 row)
-- END: Load data into tables.
-- BEGIN : Display current state.
-- TODO(niupre): Can we refactor this to be a function?
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
WHERE node.noderole = 'primary' AND (logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass)
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardstorage | shardminvalue | nodename | nodeport | placementid
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
82060000 | lineitem_streaming | t | -2147483648 | 2147483647 | 57637 | 117
82060001 | orders_streaming | t | -2147483648 | 2147483647 | 57637 | 118
8981000 | sensors | -2147483648 | -1 | localhost | 57637
8981001 | sensors | 0 | 2147483647 | localhost | 57638
8981003 | colocated_dist_table | -2147483648 | -1 | localhost | 57637
8981004 | colocated_dist_table | 0 | 2147483647 | localhost | 57638
8981005 | table_with_index_rep_identity | -2147483648 | -1 | localhost | 57637
8981006 | table_with_index_rep_identity | 0 | 2147483647 | localhost | 57638
(6 rows)
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981000 | fkey_table_to_dist_8981000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981003(measureid)
(1 row)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981000 | CREATE INDEX hash_index_on_sensors_8981000 ON citus_split_test_schema.sensors_8981000 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981000 | CREATE INDEX index_on_sensors_8981000 ON citus_split_test_schema.sensors_8981000 USING btree (lower((measureid)::text))
sensors_8981000 | CREATE INDEX index_with_include_on_sensors_8981000 ON citus_split_test_schema.sensors_8981000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981000 | CREATE UNIQUE INDEX sensors_pkey_8981000 ON citus_split_test_schema.sensors_8981000 USING btree (measureid, eventdatetime, measure_data)
(4 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981005 | CREATE UNIQUE INDEX uqx_8981005 ON citus_split_test_schema.table_with_index_rep_identity_8981005 USING btree (key)
(1 row)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981000
(2 rows)
-- Trigger five way way Split on Shard.
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981001 | fkey_table_to_dist_8981001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981004(measureid)
(1 row)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981001 | CREATE INDEX hash_index_on_sensors_8981001 ON citus_split_test_schema.sensors_8981001 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981001 | CREATE INDEX index_on_sensors_8981001 ON citus_split_test_schema.sensors_8981001 USING btree (lower((measureid)::text))
sensors_8981001 | CREATE INDEX index_with_include_on_sensors_8981001 ON citus_split_test_schema.sensors_8981001 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981001 | CREATE UNIQUE INDEX sensors_pkey_8981001 ON citus_split_test_schema.sensors_8981001 USING btree (measureid, eventdatetime, measure_data)
(4 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981006 | CREATE UNIQUE INDEX uqx_8981006 ON citus_split_test_schema.table_with_index_rep_identity_8981006 USING btree (key)
(1 row)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981001
(2 rows)
-- END : Display current state
-- BEGIN : Move one shard before we split it.
\c - postgres - :master_port
SET search_path TO "citus_split_test_schema";
SET citus.next_shard_id TO 8981007;
SET citus.defer_drop_after_shard_move TO OFF;
SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical');
citus_move_shard_placement
---------------------------------------------------------------------
(1 row)
-- END : Move one shard before we split it.
-- BEGIN : Set node id variables
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
SELECT * FROM citus_split_shard_by_split_points(
82060000,
ARRAY['268435455', '536870911', '1073741823', '1610612735'],
ARRAY[:worker_1_node, :worker_1_node, :worker_2_node, :worker_2_node, :worker_2_node],
-- END : Set node id variables
-- BEGIN : Split two shards : One with move and One without move.
-- Perform 2 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
8981000,
ARRAY['-1073741824'],
ARRAY[:worker_1_node, :worker_2_node],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
-- Row Count in Shard after Split (should be same as before)
SELECT COUNT(*) FROM orders_streaming;
count
-- Perform 3 way split
SELECT pg_catalog.citus_split_shard_by_split_points(
8981001,
ARRAY['536870911', '1610612735'],
ARRAY[:worker_1_node, :worker_1_node, :worker_2_node],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
7
(1 row)
SELECT COUNT(*) FROM lineitem_streaming;
count
-- END : Split two shards : One with move and One without move.
-- BEGIN : Move a shard post split.
SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes');
citus_move_shard_placement
---------------------------------------------------------------------
22
(1 row)
-- After Split, List shard and placement data.
SELECT shard.shardid, logicalrelid, shardstorage, shardminvalue, shardmaxvalue nodename, nodeport, placementid
-- END : Move a shard post split.
-- BEGIN : Display current state.
-- TODO(niupre): Can we refactor this to be a function?
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
WHERE node.noderole = 'primary' AND (logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass)
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardstorage | shardminvalue | nodename | nodeport | placementid
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
82060002 | lineitem_streaming | t | -2147483648 | 268435455 | 57637 | 119
82060003 | lineitem_streaming | t | 268435456 | 536870911 | 57637 | 120
82060004 | lineitem_streaming | t | 536870912 | 1073741823 | 57638 | 121
82060005 | lineitem_streaming | t | 1073741824 | 1610612735 | 57638 | 122
82060006 | lineitem_streaming | t | 1610612736 | 2147483647 | 57638 | 123
82060007 | orders_streaming | t | -2147483648 | 268435455 | 57637 | 124
82060008 | orders_streaming | t | 268435456 | 536870911 | 57637 | 125
82060009 | orders_streaming | t | 536870912 | 1073741823 | 57638 | 126
82060010 | orders_streaming | t | 1073741824 | 1610612735 | 57638 | 127
82060011 | orders_streaming | t | 1610612736 | 2147483647 | 57638 | 128
(10 rows)
8981007 | sensors | -2147483648 | -1073741824 | localhost | 57638
8981008 | sensors | -1073741823 | -1 | localhost | 57638
8981013 | sensors | 0 | 536870911 | localhost | 57637
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638
8981009 | colocated_dist_table | -2147483648 | -1073741824 | localhost | 57638
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638
8981011 | table_with_index_rep_identity | -2147483648 | -1073741824 | localhost | 57638
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638
(15 rows)
-- Cleanup for Test.
\c - - - :master_port
SET client_min_messages TO WARNING;
DROP SCHEMA citus_split_shard_by_split_points_blocking CASCADE;
ERROR: schema "citus_split_shard_by_split_points_blocking" does not exist
\c - - - :worker_1_port
SET search_path TO "citus_split_test_schema", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981013 | fkey_table_to_dist_8981013 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981016(measureid)
sensors_8981014 | fkey_table_to_dist_8981014 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981017(measureid)
(2 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981013 | CREATE INDEX hash_index_on_sensors_8981013 ON citus_split_test_schema.sensors_8981013 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981013 | CREATE INDEX index_on_sensors_8981013 ON citus_split_test_schema.sensors_8981013 USING btree (lower((measureid)::text))
sensors_8981013 | CREATE INDEX index_with_include_on_sensors_8981013 ON citus_split_test_schema.sensors_8981013 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981013 | CREATE UNIQUE INDEX sensors_pkey_8981013 ON citus_split_test_schema.sensors_8981013 USING btree (measureid, eventdatetime, measure_data)
sensors_8981014 | CREATE INDEX hash_index_on_sensors_8981014 ON citus_split_test_schema.sensors_8981014 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981014 | CREATE INDEX index_on_sensors_8981014 ON citus_split_test_schema.sensors_8981014 USING btree (lower((measureid)::text))
sensors_8981014 | CREATE INDEX index_with_include_on_sensors_8981014 ON citus_split_test_schema.sensors_8981014 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981014 | CREATE UNIQUE INDEX sensors_pkey_8981014 ON citus_split_test_schema.sensors_8981014 USING btree (measureid, eventdatetime, measure_data)
(8 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981019 | CREATE UNIQUE INDEX uqx_8981019 ON citus_split_test_schema.table_with_index_rep_identity_8981019 USING btree (key)
table_with_index_rep_identity_8981020 | CREATE UNIQUE INDEX uqx_8981020 ON citus_split_test_schema.table_with_index_rep_identity_8981020 USING btree (key)
(2 rows)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981013
stats_on_sensors_8981014
(3 rows)
\c - - - :worker_2_port
SET search_path TO "citus_split_test_schema", public, pg_catalog;
SET citus.show_shards_for_app_name_prefixes = '*';
SELECT tbl.relname, fk."Constraint", fk."Definition"
FROM pg_catalog.pg_class tbl
JOIN public.table_fkeys fk on tbl.oid = fk.relid
WHERE tbl.relname like 'sensors_%'
ORDER BY 1, 2;
relname | Constraint | Definition
---------------------------------------------------------------------
sensors_8981007 | fkey_table_to_dist_8981007 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981009(measureid)
sensors_8981008 | fkey_table_to_dist_8981008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981010(measureid)
sensors_8981015 | fkey_table_to_dist_8981015 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981018(measureid)
(3 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
sensors_8981007 | CREATE INDEX hash_index_on_sensors_8981007 ON citus_split_test_schema.sensors_8981007 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981007 | CREATE INDEX index_on_sensors_8981007 ON citus_split_test_schema.sensors_8981007 USING btree (lower((measureid)::text))
sensors_8981007 | CREATE INDEX index_with_include_on_sensors_8981007 ON citus_split_test_schema.sensors_8981007 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981007 | CREATE UNIQUE INDEX sensors_pkey_8981007 ON citus_split_test_schema.sensors_8981007 USING btree (measureid, eventdatetime, measure_data)
sensors_8981008 | CREATE INDEX hash_index_on_sensors_8981008 ON citus_split_test_schema.sensors_8981008 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981008 | CREATE INDEX index_on_sensors_8981008 ON citus_split_test_schema.sensors_8981008 USING btree (lower((measureid)::text))
sensors_8981008 | CREATE INDEX index_with_include_on_sensors_8981008 ON citus_split_test_schema.sensors_8981008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981008 | CREATE UNIQUE INDEX sensors_pkey_8981008 ON citus_split_test_schema.sensors_8981008 USING btree (measureid, eventdatetime, measure_data)
sensors_8981015 | CREATE INDEX hash_index_on_sensors_8981015 ON citus_split_test_schema.sensors_8981015 USING hash (((measure_data -> 'IsFailed'::text)))
sensors_8981015 | CREATE INDEX index_on_sensors_8981015 ON citus_split_test_schema.sensors_8981015 USING btree (lower((measureid)::text))
sensors_8981015 | CREATE INDEX index_with_include_on_sensors_8981015 ON citus_split_test_schema.sensors_8981015 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status)
sensors_8981015 | CREATE UNIQUE INDEX sensors_pkey_8981015 ON citus_split_test_schema.sensors_8981015 USING btree (measureid, eventdatetime, measure_data)
(12 rows)
SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2;
tablename | indexdef
---------------------------------------------------------------------
table_with_index_rep_identity_8981011 | CREATE UNIQUE INDEX uqx_8981011 ON citus_split_test_schema.table_with_index_rep_identity_8981011 USING btree (key)
table_with_index_rep_identity_8981012 | CREATE UNIQUE INDEX uqx_8981012 ON citus_split_test_schema.table_with_index_rep_identity_8981012 USING btree (key)
table_with_index_rep_identity_8981021 | CREATE UNIQUE INDEX uqx_8981021 ON citus_split_test_schema.table_with_index_rep_identity_8981021 USING btree (key)
(3 rows)
SELECT stxname FROM pg_statistic_ext
WHERE stxnamespace IN (
SELECT oid
FROM pg_namespace
WHERE nspname IN ('citus_split_test_schema')
)
ORDER BY stxname ASC;
stxname
---------------------------------------------------------------------
stats_on_sensors
stats_on_sensors_8981007
stats_on_sensors_8981008
stats_on_sensors_8981015
(4 rows)
-- END : Display current state
-- BEGIN: Should be able to change/drop constraints
\c - postgres - :master_port
SET search_path TO "citus_split_test_schema";
ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed;
ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200;
DROP STATISTICS stats_on_sensors;
DROP INDEX index_on_sensors_renamed;
ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist;
-- END: Should be able to change/drop constraints
-- BEGIN: Split second time on another schema
SET search_path TO public;
SET citus.next_shard_id TO 8981031;
SELECT pg_catalog.citus_split_shard_by_split_points(
8981007,
ARRAY['-2100000000'],
ARRAY[:worker_1_node, :worker_2_node],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
SET search_path TO "citus_split_test_schema";
SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport
FROM pg_dist_shard AS shard
INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid
INNER JOIN pg_dist_node node ON placement.groupid = node.groupid
INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid
WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass)
ORDER BY logicalrelid, shardminvalue::BIGINT;
shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport
---------------------------------------------------------------------
8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637
8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638
8981008 | sensors | -1073741823 | -1 | localhost | 57638
8981013 | sensors | 0 | 536870911 | localhost | 57637
8981014 | sensors | 536870912 | 1610612735 | localhost | 57637
8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638
8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637
8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638
8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638
8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637
8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637
8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638
8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637
8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638
8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638
8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637
8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637
8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638
(18 rows)
-- END: Split second time on another schema
-- BEGIN: Validate Data Count
SELECT COUNT(*) FROM sensors;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM reference_table;
count
---------------------------------------------------------------------
1001
(1 row)
SELECT COUNT(*) FROM colocated_dist_table;
count
---------------------------------------------------------------------
1001
(1 row)
-- END: Validate Data Count
--BEGIN : Cleanup
\c - postgres - :master_port
DROP SCHEMA "citus_split_test_schema" CASCADE;
NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table citus_split_test_schema.sensors
drop cascades to table citus_split_test_schema.reference_table
drop cascades to table citus_split_test_schema.colocated_dist_table
drop cascades to table citus_split_test_schema.table_with_index_rep_identity
--END : Cleanup

View File

@ -38,3 +38,73 @@ ALTER INDEX idx2 ALTER COLUMN 1 SET STATISTICS 1000;
-- test reindex
REINDEX INDEX idx1;
ALTER TABLE test_tbl REPLICA IDENTITY USING INDEX a_index;
-- postgres allows ALTER INDEX rename on tables, and so Citus..
-- and, also ALTER TABLE rename on indexes..
CREATE TABLE alter_idx_rename_test (a INT);
CREATE INDEX alter_idx_rename_test_idx ON alter_idx_rename_test (a);
CREATE TABLE alter_idx_rename_test_parted (a INT) PARTITION BY LIST (a);
CREATE INDEX alter_idx_rename_test_parted_idx ON alter_idx_rename_test_parted (a);
BEGIN;
-- rename index/table with weird syntax
ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2;
ALTER TABLE alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2;
ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2;
ALTER TABLE alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2;
-- also, rename index/table with proper syntax
ALTER INDEX alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3;
ALTER TABLE alter_idx_rename_test_2 RENAME TO alter_idx_rename_test_3;
ALTER INDEX alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3;
ALTER TABLE alter_idx_rename_test_parted_2 RENAME TO alter_idx_rename_test_parted_3;
SELECT 'alter_idx_rename_test_3'::regclass, 'alter_idx_rename_test_idx_3'::regclass;
regclass | regclass
---------------------------------------------------------------------
alter_idx_rename_test_3 | alter_idx_rename_test_idx_3
(1 row)
SELECT 'alter_idx_rename_test_parted_3'::regclass, 'alter_idx_rename_test_parted_idx_3'::regclass;
regclass | regclass
---------------------------------------------------------------------
alter_idx_rename_test_parted_3 | alter_idx_rename_test_parted_idx_3
(1 row)
ROLLBACK;
-- now, on distributed tables
SELECT create_distributed_table('alter_idx_rename_test', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT create_distributed_table('alter_idx_rename_test_parted', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
-- rename index/table with weird syntax
ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2;
ALTER TABLE alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2;
ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2;
ALTER TABLE alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2;
-- also, rename index/table with proper syntax
ALTER INDEX alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3;
ALTER TABLE alter_idx_rename_test_2 RENAME TO alter_idx_rename_test_3;
ALTER INDEX alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3;
ALTER TABLE alter_idx_rename_test_parted_2 RENAME TO alter_idx_rename_test_parted_3;
SELECT 'alter_idx_rename_test_3'::regclass, 'alter_idx_rename_test_idx_3'::regclass;
regclass | regclass
---------------------------------------------------------------------
alter_idx_rename_test_3 | alter_idx_rename_test_idx_3
(1 row)
SELECT 'alter_idx_rename_test_parted_3'::regclass, 'alter_idx_rename_test_parted_idx_3'::regclass;
regclass | regclass
---------------------------------------------------------------------
alter_idx_rename_test_parted_3 | alter_idx_rename_test_parted_idx_3
(1 row)
ALTER INDEX alter_idx_rename_test_idx_3 RENAME TO alter_idx_rename_test_idx_4;
DROP INDEX alter_idx_rename_test_idx_4;
DROP TABLE alter_idx_rename_test_3;
DROP INDEX alter_idx_rename_test_parted_idx_3;
DROP TABLE alter_idx_rename_test_parted_3;

View File

@ -51,7 +51,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -117,7 +117,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -174,7 +174,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -271,7 +271,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -336,7 +336,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -398,7 +398,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -455,7 +455,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -512,7 +512,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -569,7 +569,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -626,7 +626,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -683,7 +683,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -740,7 +740,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -797,7 +797,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -854,7 +854,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -911,7 +911,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -968,7 +968,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -1024,7 +1024,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -1081,7 +1081,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)
@ -1184,7 +1184,7 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
citus_remove_node
---------------------------------------------------------------------
(1 row)

View File

@ -73,11 +73,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-update s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -155,11 +150,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-delete s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -235,11 +225,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -307,8 +292,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -72,11 +72,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-update s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -153,11 +148,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-delete s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -233,11 +223,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -305,11 +290,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -377,8 +357,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -0,0 +1,951 @@
Parsed test spec with 2 sessions
starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-update s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
-- Indirect way to load cache.
TRUNCATE to_split_table;
step s1-insert:
-- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column
---------------------------------------------------------------------
1500002
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-update:
UPDATE to_split_table SET value = 111 WHERE id = 123456789;
<waiting ...>
step s2-commit:
COMMIT;
step s1-update: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 1
57638|1500004|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
123456789| 1
(1 row)
starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-delete s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
-- Indirect way to load cache.
TRUNCATE to_split_table;
step s1-insert:
-- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column
---------------------------------------------------------------------
1500002
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-delete:
DELETE FROM to_split_table WHERE id = 123456789;
<waiting ...>
step s2-commit:
COMMIT;
step s1-delete: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 1
57638|1500004|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
123456789| 1
(1 row)
starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
-- Indirect way to load cache.
TRUNCATE to_split_table;
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789;
count
---------------------------------------------------------------------
0
(1 row)
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-insert:
-- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1);
<waiting ...>
step s2-commit:
COMMIT;
step s1-insert: <... completed>
get_shard_id_for_distribution_column
---------------------------------------------------------------------
1500002
(1 row)
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 0
57638|1500004|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
-- Indirect way to load cache.
TRUNCATE to_split_table;
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789;
count
---------------------------------------------------------------------
0
(1 row)
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-copy:
COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
<waiting ...>
step s2-commit:
COMMIT;
step s1-copy: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 0
57638|1500004|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-update s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-insert:
-- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column
---------------------------------------------------------------------
1500002
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-update:
UPDATE to_split_table SET value = 111 WHERE id = 123456789;
<waiting ...>
step s2-commit:
COMMIT;
step s1-update: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 1
57638|1500004|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
123456789| 1
(1 row)
starting permutation: s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-delete s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-insert:
-- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column
---------------------------------------------------------------------
1500002
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789;
count
---------------------------------------------------------------------
1
(1 row)
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-delete:
DELETE FROM to_split_table WHERE id = 123456789;
<waiting ...>
step s2-commit:
COMMIT;
step s1-delete: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 1
57638|1500004|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
123456789| 1
(1 row)
starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789;
count
---------------------------------------------------------------------
0
(1 row)
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-insert:
-- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1);
<waiting ...>
step s2-commit:
COMMIT;
step s1-insert: <... completed>
get_shard_id_for_distribution_column
---------------------------------------------------------------------
1500002
(1 row)
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 0
57638|1500004|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789;
count
---------------------------------------------------------------------
0
(1 row)
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-copy:
COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
<waiting ...>
step s2-commit:
COMMIT;
step s1-copy: <... completed>
ERROR: could not find valid entry for shard xxxxx
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 0
57638|1500004|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s1-load-cache s1-insert s1-begin s1-blocking-shard-split s2-blocking-shard-split s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
-- Indirect way to load cache.
TRUNCATE to_split_table;
step s1-insert:
-- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column
---------------------------------------------------------------------
1500002
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500001,
ARRAY['-1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
<waiting ...>
step s1-commit:
COMMIT;
step s2-blocking-shard-split: <... completed>
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500003|t | 0
57637|1500005|t | 1
57638|1500004|t | 0
57638|1500006|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
123456789| 1
(1 row)
starting permutation: s1-insert s1-begin s1-blocking-shard-split s2-blocking-shard-split s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-insert:
-- Id '123456789' maps to shard xxxxx.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1);
get_shard_id_for_distribution_column
---------------------------------------------------------------------
1500002
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500001,
ARRAY['-1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
<waiting ...>
step s1-commit:
COMMIT;
step s2-blocking-shard-split: <... completed>
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500003|t | 0
57637|1500005|t | 1
57638|1500004|t | 0
57638|1500006|t | 0
(4 rows)
id|value
---------------------------------------------------------------------
123456789| 1
(1 row)
starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-ddl s2-commit s1-commit s2-print-cluster s2-print-index-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-load-cache:
-- Indirect way to load cache.
TRUNCATE to_split_table;
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789;
count
---------------------------------------------------------------------
0
(1 row)
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-ddl:
CREATE INDEX test_table_index ON to_split_table(id);
<waiting ...>
step s2-commit:
COMMIT;
step s1-ddl: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 0
57638|1500004|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
nodeport|success|result
---------------------------------------------------------------------
57637|t | 1
57637|t | 1
57638|t | 1
(3 rows)
starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-ddl s2-commit s1-commit s2-print-cluster s2-print-index-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-begin:
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
step s1-select:
SELECT count(*) FROM to_split_table WHERE id = 123456789;
count
---------------------------------------------------------------------
0
(1 row)
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-ddl:
CREATE INDEX test_table_index ON to_split_table(id);
<waiting ...>
step s2-commit:
COMMIT;
step s1-ddl: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500001|t | 0
57637|1500003|t | 0
57638|1500004|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)
step s2-print-index-count:
SELECT
nodeport, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
nodeport|success|result
---------------------------------------------------------------------
57637|t | 1
57637|t | 1
57638|t | 1
(3 rows)

View File

@ -0,0 +1,301 @@
Parsed test spec with 2 sessions
starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-delete s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s2-add-fkey:
ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['-1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-delete:
DELETE FROM reference_table WHERE id = 5;
<waiting ...>
step s2-commit:
COMMIT;
step s1-delete: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('table_to_split', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM table_to_split ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500004|t | 0
57638|1500003|t | 0
57638|1500005|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-update s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s2-add-fkey:
ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['-1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-update:
UPDATE reference_table SET value = 5 WHERE id = 5;
<waiting ...>
step s2-commit:
COMMIT;
step s1-update: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('table_to_split', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM table_to_split ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500004|t | 0
57638|1500003|t | 0
57638|1500005|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s2-add-fkey:
ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['-1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-insert:
INSERT INTO reference_table VALUES (5, 10);
<waiting ...>
step s2-commit:
COMMIT;
step s1-insert: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('table_to_split', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM table_to_split ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500004|t | 0
57638|1500003|t | 0
57638|1500005|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s2-add-fkey:
ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['-1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-copy:
COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
<waiting ...>
step s2-commit:
COMMIT;
step s1-copy: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('table_to_split', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM table_to_split ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500004|t | 0
57638|1500003|t | 0
57638|1500005|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)
starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-ddl s2-commit s1-commit s2-print-cluster
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s2-add-fkey:
ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s2-blocking-shard-split:
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['-1073741824'],
ARRAY[1, 2],
'blocking');
citus_split_shard_by_split_points
---------------------------------------------------------------------
(1 row)
step s1-ddl:
CREATE INDEX reference_table_index ON reference_table(id);
<waiting ...>
step s2-commit:
COMMIT;
step s1-ddl: <... completed>
step s1-commit:
COMMIT;
step s2-print-cluster:
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('table_to_split', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM table_to_split ORDER BY id, value;
nodeport|shardid|success|result
---------------------------------------------------------------------
57637|1500004|t | 0
57638|1500003|t | 0
57638|1500005|t | 0
(3 rows)
id|value
---------------------------------------------------------------------
(0 rows)

View File

@ -19,11 +19,6 @@ step s1-commit:
COMMIT;
step s2-update: <... completed>
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s2-update
create_distributed_table
@ -37,11 +32,6 @@ step s1-insert:
step s2-update:
UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-multi-insert s2-update s1-commit
create_distributed_table
@ -62,11 +52,6 @@ step s1-commit:
COMMIT;
step s2-update: <... completed>
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-multi-insert s2-multi-insert-overlap s1-commit
create_distributed_table
@ -86,11 +71,6 @@ step s2-multi-insert-overlap:
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s1-multi-insert s2-multi-insert s1-commit s2-commit
create_distributed_table
@ -116,8 +96,3 @@ step s1-commit:
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -89,11 +89,6 @@ count
11
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -150,11 +145,6 @@ step s3-select-count:
SELECT COUNT(*) FROM copy_table;
ERROR: relation "copy_table" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -245,8 +235,3 @@ count
8
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -22,11 +22,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-ddl-create-index-concurrently s1-commit s1-show-indexes
create_distributed_table
@ -48,11 +43,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-add-column s1-commit s2-commit s1-show-indexes s1-show-columns
create_distributed_table
@ -82,11 +72,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-rename-column s1-commit s2-commit s1-show-indexes s1-show-columns
create_distributed_table
@ -116,11 +101,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes
create_distributed_table
@ -150,11 +130,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes
create_distributed_table
@ -182,11 +157,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-add-column s1-commit s2-commit s1-show-columns
create_distributed_table
@ -209,11 +179,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns
create_distributed_table
@ -236,11 +201,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes
create_distributed_table
@ -270,11 +230,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes
create_distributed_table
@ -302,11 +257,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-add-column s1-commit s2-commit s1-show-columns
create_distributed_table
@ -329,11 +279,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns
create_distributed_table
@ -357,11 +302,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-table-size s1-commit s2-commit s1-show-indexes
create_distributed_table
@ -388,11 +328,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-master-modify-multiple-shards s1-commit s2-commit s1-show-indexes
create_distributed_table
@ -415,11 +350,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-create-index s2-distribute-table s1-commit s2-commit s1-show-indexes
create_distributed_table
@ -449,11 +379,6 @@ run_command_on_workers
(localhost,57638,t,4)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-table-size s1-commit s2-commit s1-show-columns
create_distributed_table
@ -481,11 +406,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns
create_distributed_table
@ -508,11 +428,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-add-column s2-distribute-table s1-commit s2-commit s1-show-columns
create_distributed_table
@ -542,11 +457,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-table-size s1-commit s2-commit s1-show-columns
create_distributed_table
@ -574,11 +484,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns
create_distributed_table
@ -601,11 +506,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-distribute-table s1-commit s2-commit s1-show-columns
create_distributed_table
@ -635,11 +535,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-create-index s1-commit s2-commit s1-show-indexes
create_distributed_table
@ -666,11 +561,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-create-index s1-commit s2-commit s1-show-indexes
create_distributed_table
@ -693,11 +583,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-create-index s1-commit s2-commit s1-show-indexes
create_distributed_table
@ -727,11 +612,6 @@ run_command_on_workers
(localhost,57638,t,4)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-table-size s2-ddl-create-index-concurrently s1-commit s1-show-indexes
create_distributed_table
@ -756,11 +636,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-ddl-create-index-concurrently s1-commit s1-show-indexes
create_distributed_table
@ -781,11 +656,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-ddl-create-index-concurrently s1-commit s1-show-indexes
create_distributed_table
@ -813,11 +683,6 @@ run_command_on_workers
(localhost,57638,t,4)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-add-column s1-commit s2-commit s1-show-columns
create_distributed_table
@ -844,11 +709,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-add-column s1-commit s2-commit s1-show-columns
create_distributed_table
@ -871,11 +731,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-add-column s1-commit s2-commit s1-show-columns
create_distributed_table
@ -905,11 +760,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-rename-column s1-commit s2-commit s1-show-columns
create_distributed_table
@ -936,11 +786,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-rename-column s1-commit s2-commit s1-show-columns
create_distributed_table
@ -963,11 +808,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-rename-column s1-commit s2-commit s1-show-columns
create_distributed_table
@ -997,8 +837,3 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -20,11 +20,6 @@ count
4
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table
@ -46,11 +41,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-drop s1-commit s2-commit s1-select-count
create_distributed_table
@ -68,11 +58,6 @@ step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
ERROR: relation "delete_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -101,11 +86,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-delete s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -135,11 +115,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-delete s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -166,11 +141,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -199,11 +169,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-delete s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -233,11 +198,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -266,11 +226,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-table-size s1-commit s2-commit s1-select-count
create_distributed_table
@ -296,11 +251,6 @@ count
4
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-delete s2-distribute-table s1-commit s2-commit s1-select-count
create_distributed_table
@ -329,11 +279,6 @@ count
8
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-delete s1-commit s2-commit s1-select-count
create_distributed_table
@ -355,11 +300,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-delete s1-commit s2-commit s1-select-count
create_distributed_table
@ -378,11 +318,6 @@ ERROR: relation "delete_hash" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM delete_hash;
ERROR: relation "delete_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -411,11 +346,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -445,11 +375,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -478,11 +403,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -512,11 +432,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -545,11 +460,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-delete s1-commit s2-commit s1-select-count
create_distributed_table
@ -575,11 +485,6 @@ count
4
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-delete s1-commit s2-commit s1-select-count
create_distributed_table
@ -608,8 +513,3 @@ count
8
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -99,11 +99,6 @@ id|value
2| 2
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
@ -200,11 +195,6 @@ id|value
2| 2
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
@ -306,11 +296,6 @@ id|value
2| 2
(4 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
@ -410,11 +395,6 @@ id|value
2| 2
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
@ -515,11 +495,6 @@ id|value
2| 2
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
@ -617,11 +592,6 @@ id|value
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select-for-udpate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display
step s1-start-session-level-connection:
@ -719,8 +689,3 @@ id|value
2| 2
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -85,11 +85,6 @@ count
6
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-index s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit s2-commit-worker s2-stop-connection
step s1-begin:
@ -141,11 +136,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -229,11 +219,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-flaky-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection
step s1-start-session-level-connection:
@ -279,8 +264,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -18,11 +18,6 @@ ERROR: table "drop_hash" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop-schema s2-drop-schema s1-commit s2-commit s1-select-count
create_distributed_table
@ -42,11 +37,6 @@ ERROR: schema "drop_tests" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop-schema s2-drop-schema-2 s1-commit s2-commit s1-select-count
create_distributed_table
@ -64,11 +54,6 @@ step s1-commit: COMMIT;
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -95,11 +80,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-ddl-create-index s1-begin s2-begin s1-drop s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -127,11 +107,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s1-drop s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -156,11 +131,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -187,11 +157,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-ddl-add-column s1-begin s2-begin s1-drop s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -219,11 +184,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -250,11 +210,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop s2-table-size s1-commit s2-commit s1-select-count
create_distributed_table
@ -274,11 +229,6 @@ ERROR: could not compute table size: relation does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s2-initialize s1-begin s2-begin s1-drop s2-distribute-table s1-commit s2-commit s1-select-count
create_distributed_table
@ -300,11 +250,6 @@ ERROR: could not create distributed table: relation does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-ddl-create-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -330,11 +275,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -361,11 +301,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-ddl-add-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -391,11 +326,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -422,11 +352,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-ddl-rename-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -452,11 +377,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-table-size s2-drop s1-commit s2-commit s1-select-count
create_distributed_table
@ -479,11 +399,6 @@ step s1-commit: COMMIT;
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s2-initialize s1-begin s2-begin s1-distribute-table s2-drop s1-commit s2-commit s1-select-count
create_distributed_table
@ -509,8 +424,3 @@ step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM drop_hash;
ERROR: relation "drop_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -56,11 +56,6 @@ step s2-update: <... completed>
step s2-abort:
ABORT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort
step s1-begin:
@ -137,8 +132,3 @@ step s3-update: <... completed>
step s3-abort:
ABORT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -17,11 +17,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count
create_distributed_table
@ -45,11 +40,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count
create_distributed_table
@ -77,11 +67,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count
create_distributed_table
@ -93,8 +78,8 @@ step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 &&
step s1-begin: BEGIN;
step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-adaptive-select:
SET citus.enable_repartition_joins TO ON;
SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
SET citus.enable_repartition_joins TO ON;
SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
id|data|int_data|id|data|int_data
---------------------------------------------------------------------
@ -112,11 +97,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count
create_distributed_table
@ -135,11 +115,6 @@ count
11
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
create_distributed_table
@ -158,11 +133,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
create_distributed_table
@ -181,11 +151,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
create_distributed_table
@ -204,11 +169,6 @@ count
9
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count
create_distributed_table
@ -228,11 +188,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count
create_distributed_table
@ -248,11 +203,6 @@ step s1-commit: COMMIT;
step s2-drop: <... completed>
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
ERROR: relation "hash_copy" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -279,11 +229,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -311,11 +256,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-flaky-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -341,11 +281,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -372,11 +307,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -404,11 +334,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -435,11 +360,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
create_distributed_table
@ -463,11 +383,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count
create_distributed_table
@ -486,11 +401,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count
create_distributed_table
@ -515,11 +425,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count
create_distributed_table
@ -546,11 +451,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
create_distributed_table
@ -559,10 +459,10 @@ create_distributed_table
(1 row)
step s1-recreate-with-replication-2:
DROP TABLE hash_copy;
SET citus.shard_replication_factor TO 2;
CREATE TABLE hash_copy(id integer, data text, int_data int);
SELECT create_distributed_table('hash_copy', 'id');
DROP TABLE hash_copy;
SET citus.shard_replication_factor TO 2;
CREATE TABLE hash_copy(id integer, data text, int_data int);
SELECT create_distributed_table('hash_copy', 'id');
create_distributed_table
---------------------------------------------------------------------
@ -581,11 +481,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
create_distributed_table
@ -594,10 +489,10 @@ create_distributed_table
(1 row)
step s1-recreate-with-replication-2:
DROP TABLE hash_copy;
SET citus.shard_replication_factor TO 2;
CREATE TABLE hash_copy(id integer, data text, int_data int);
SELECT create_distributed_table('hash_copy', 'id');
DROP TABLE hash_copy;
SET citus.shard_replication_factor TO 2;
CREATE TABLE hash_copy(id integer, data text, int_data int);
SELECT create_distributed_table('hash_copy', 'id');
create_distributed_table
---------------------------------------------------------------------
@ -616,11 +511,6 @@ count
9
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
create_distributed_table
@ -629,10 +519,10 @@ create_distributed_table
(1 row)
step s1-recreate-with-replication-2:
DROP TABLE hash_copy;
SET citus.shard_replication_factor TO 2;
CREATE TABLE hash_copy(id integer, data text, int_data int);
SELECT create_distributed_table('hash_copy', 'id');
DROP TABLE hash_copy;
SET citus.shard_replication_factor TO 2;
CREATE TABLE hash_copy(id integer, data text, int_data int);
SELECT create_distributed_table('hash_copy', 'id');
create_distributed_table
---------------------------------------------------------------------
@ -651,11 +541,6 @@ count
20
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count
create_distributed_table
@ -664,10 +549,10 @@ create_distributed_table
(1 row)
step s1-recreate-with-replication-2:
DROP TABLE hash_copy;
SET citus.shard_replication_factor TO 2;
CREATE TABLE hash_copy(id integer, data text, int_data int);
SELECT create_distributed_table('hash_copy', 'id');
DROP TABLE hash_copy;
SET citus.shard_replication_factor TO 2;
CREATE TABLE hash_copy(id integer, data text, int_data int);
SELECT create_distributed_table('hash_copy', 'id');
create_distributed_table
---------------------------------------------------------------------
@ -686,11 +571,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count
create_distributed_table
@ -714,11 +594,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count
create_distributed_table
@ -746,11 +621,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count
create_distributed_table
@ -761,8 +631,8 @@ create_distributed_table
step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-adaptive-select:
SET citus.enable_repartition_joins TO ON;
SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
SET citus.enable_repartition_joins TO ON;
SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
id|data|int_data|id|data|int_data
---------------------------------------------------------------------
@ -781,11 +651,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count
create_distributed_table
@ -804,11 +669,6 @@ count
11
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count
create_distributed_table
@ -827,11 +687,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count
create_distributed_table
@ -850,11 +705,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count
create_distributed_table
@ -873,11 +723,6 @@ count
9
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count
create_distributed_table
@ -897,11 +742,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count
create_distributed_table
@ -918,11 +758,6 @@ step s2-copy: <... completed>
ERROR: relation "hash_copy" does not exist
step s1-select-count: SELECT COUNT(*) FROM hash_copy;
ERROR: relation "hash_copy" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -949,11 +784,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -981,11 +811,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1013,11 +838,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1045,11 +865,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
@ -1076,11 +891,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
create_distributed_table
@ -1104,11 +914,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count
create_distributed_table
@ -1127,11 +932,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count
create_distributed_table
@ -1157,11 +957,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count
create_distributed_table
@ -1188,8 +983,3 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -7,21 +7,21 @@ create_distributed_table
(1 row)
step s1-begin:
SET citus.shard_replication_factor to 1;
BEGIN;
SET citus.shard_replication_factor to 1;
BEGIN;
step s1-insert-into-select-conflict-update:
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
col_1|col_2
---------------------------------------------------------------------
@ -33,22 +33,17 @@ col_1|col_2
(5 rows)
step s2-begin:
BEGIN;
BEGIN;
step s2-update:
UPDATE target_table SET col_2 = 5;
UPDATE target_table SET col_2 = 5;
<waiting ...>
step s1-commit:
COMMIT;
COMMIT;
step s2-update: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
COMMIT;
starting permutation: s1-begin s1-insert-into-select-conflict-do-nothing s2-begin s2-delete s1-commit s2-commit
@ -58,39 +53,34 @@ create_distributed_table
(1 row)
step s1-begin:
SET citus.shard_replication_factor to 1;
BEGIN;
SET citus.shard_replication_factor to 1;
BEGIN;
step s1-insert-into-select-conflict-do-nothing:
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT DO NOTHING;
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT DO NOTHING;
step s2-begin:
BEGIN;
BEGIN;
step s2-delete:
DELETE FROM target_table;
DELETE FROM target_table;
<waiting ...>
step s1-commit:
COMMIT;
COMMIT;
step s2-delete: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
COMMIT;
starting permutation: s1-begin s1-insert-into-select-conflict-do-nothing s2-begin s2-insert-into-select-conflict-update s1-commit s2-commit
@ -100,40 +90,40 @@ create_distributed_table
(1 row)
step s1-begin:
SET citus.shard_replication_factor to 1;
BEGIN;
SET citus.shard_replication_factor to 1;
BEGIN;
step s1-insert-into-select-conflict-do-nothing:
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT DO NOTHING;
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT DO NOTHING;
step s2-begin:
BEGIN;
BEGIN;
step s2-insert-into-select-conflict-update:
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
<waiting ...>
step s1-commit:
COMMIT;
COMMIT;
step s2-insert-into-select-conflict-update: <... completed>
col_1|col_2
@ -146,12 +136,7 @@ col_1|col_2
(5 rows)
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
COMMIT;
starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-insert-into-select-conflict-update s1-commit s2-commit
@ -161,21 +146,21 @@ create_distributed_table
(1 row)
step s1-begin:
SET citus.shard_replication_factor to 1;
BEGIN;
SET citus.shard_replication_factor to 1;
BEGIN;
step s1-insert-into-select-conflict-update:
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
col_1|col_2
---------------------------------------------------------------------
@ -187,23 +172,23 @@ col_1|col_2
(5 rows)
step s2-begin:
BEGIN;
BEGIN;
step s2-insert-into-select-conflict-update:
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
<waiting ...>
step s1-commit:
COMMIT;
COMMIT;
step s2-insert-into-select-conflict-update: <... completed>
col_1|col_2
@ -216,12 +201,7 @@ col_1|col_2
(5 rows)
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
COMMIT;
starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-insert-into-select-conflict-do-nothing s1-commit s2-commit
@ -231,21 +211,21 @@ create_distributed_table
(1 row)
step s1-begin:
SET citus.shard_replication_factor to 1;
BEGIN;
SET citus.shard_replication_factor to 1;
BEGIN;
step s1-insert-into-select-conflict-update:
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
col_1|col_2
---------------------------------------------------------------------
@ -257,32 +237,27 @@ col_1|col_2
(5 rows)
step s2-begin:
BEGIN;
BEGIN;
step s2-insert-into-select-conflict-do-nothing:
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT DO NOTHING;
INSERT INTO target_table
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT DO NOTHING;
<waiting ...>
step s1-commit:
COMMIT;
COMMIT;
step s2-insert-into-select-conflict-do-nothing: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
COMMIT;
starting permutation: s1-begin-replication-factor-2 s1-insert-into-select-conflict-update-replication-factor-2 s2-begin-replication-factor-2 s2-insert-into-select-conflict-update-replication-factor-2 s1-commit s2-commit
@ -292,21 +267,21 @@ create_distributed_table
(1 row)
step s1-begin-replication-factor-2:
SET citus.shard_replication_factor to 2;
BEGIN;
SET citus.shard_replication_factor to 2;
BEGIN;
step s1-insert-into-select-conflict-update-replication-factor-2:
INSERT INTO target_table_2
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
INSERT INTO target_table_2
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
col_1|col_2|col_3
---------------------------------------------------------------------
@ -318,24 +293,24 @@ col_1|col_2|col_3
(5 rows)
step s2-begin-replication-factor-2:
SET citus.shard_replication_factor to 2;
BEGIN;
SET citus.shard_replication_factor to 2;
BEGIN;
step s2-insert-into-select-conflict-update-replication-factor-2:
INSERT INTO target_table_2
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
INSERT INTO target_table_2
SELECT
col_1, col_2
FROM (
SELECT
col_1, col_2, col_3
FROM
source_table
LIMIT 5
) as foo
ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *;
<waiting ...>
step s1-commit:
COMMIT;
COMMIT;
step s2-insert-into-select-conflict-update-replication-factor-2: <... completed>
col_1|col_2|col_3
@ -348,10 +323,5 @@ col_1|col_2|col_3
(5 rows)
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
COMMIT;

View File

@ -89,11 +89,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-colocated-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -184,11 +179,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -279,11 +269,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -374,11 +359,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -469,11 +449,6 @@ count
12
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -564,11 +539,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -659,11 +629,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -754,11 +719,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -849,11 +809,6 @@ count
13
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -944,11 +899,6 @@ count
13
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -1013,11 +963,6 @@ step s3-select-count:
SELECT COUNT(*) FROM dist_table;
ERROR: relation "dist_table" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -1082,11 +1027,6 @@ step s3-select-count:
SELECT COUNT(*) FROM dist_table;
ERROR: relation "dist_table" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -1177,11 +1117,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -1272,8 +1207,3 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -89,11 +89,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -184,11 +179,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -279,11 +269,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -374,11 +359,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -469,11 +449,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -564,11 +539,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -659,11 +629,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-update-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -754,11 +719,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -849,11 +809,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -945,11 +900,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -1040,8 +990,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -89,11 +89,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-update s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -187,11 +182,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-delete s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -283,11 +273,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-select s2-commit-worker s3-release-advisory-lock s1-commit s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -372,11 +357,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-start-session-level-connection s2-begin-on-worker s2-insert s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-begin:
@ -451,11 +431,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-update s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -533,11 +508,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-delete s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -613,11 +583,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -685,8 +650,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -88,11 +88,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-update s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -185,11 +180,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-delete s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -281,11 +271,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-select s2-commit-worker s3-release-advisory-lock s1-commit s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -370,11 +355,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s2-commit-worker s3-release-advisory-lock s1-commit s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -459,11 +439,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-start-session-level-connection s2-begin-on-worker s2-insert s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-begin:
@ -537,11 +512,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-update s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -618,11 +588,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-delete s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -698,11 +663,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -770,11 +730,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection
step s1-insert:
@ -843,8 +798,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -117,8 +117,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -5,7 +5,7 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s2-modify_with_subquery_v1:
UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id;
@ -14,76 +14,61 @@ step s1-insert_to_events_test_table:
INSERT INTO events_test_table VALUES(4,6,8,10);
<waiting ...>
step s2-commit:
COMMIT;
COMMIT;
step s1-insert_to_events_test_table: <... completed>
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-modify_with_subquery_v1 s1-update_events_test_table s2-commit s1-commit
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s2-modify_with_subquery_v1:
UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id;
step s1-update_events_test_table:
UPDATE users_test_table SET value_1 = 3;
UPDATE users_test_table SET value_1 = 3;
<waiting ...>
step s2-commit:
COMMIT;
COMMIT;
step s1-update_events_test_table: <... completed>
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-modify_with_subquery_v1 s1-delete_events_test_table s2-commit s1-commit
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s2-modify_with_subquery_v1:
UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id;
step s1-delete_events_test_table:
DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3;
DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3;
<waiting ...>
step s2-commit:
COMMIT;
COMMIT;
step s1-delete_events_test_table: <... completed>
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s1-insert_to_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s1-insert_to_events_test_table:
INSERT INTO events_test_table VALUES(4,6,8,10);
@ -96,12 +81,7 @@ step s1-commit:
step s2-modify_with_subquery_v1: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
COMMIT;
starting permutation: s1-begin s2-begin s1-update_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit
@ -109,10 +89,10 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s1-update_events_test_table:
UPDATE users_test_table SET value_1 = 3;
UPDATE users_test_table SET value_1 = 3;
step s2-modify_with_subquery_v1:
UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id;
@ -122,12 +102,7 @@ step s1-commit:
step s2-modify_with_subquery_v1: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
COMMIT;
starting permutation: s1-begin s2-begin s1-delete_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit
@ -135,10 +110,10 @@ step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
BEGIN;
step s1-delete_events_test_table:
DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3;
DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3;
step s2-modify_with_subquery_v1:
UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id;
@ -148,10 +123,5 @@ step s1-commit:
step s2-modify_with_subquery_v1: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
COMMIT;

View File

@ -38,11 +38,6 @@ mode|count
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-begin s2-delete-table-1 s1-view-locks s2-rollback s1-view-locks
step s2-begin:
@ -76,11 +71,6 @@ mode|count
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-begin s2-update-table-2 s1-view-locks s2-rollback s1-view-locks
step s2-begin:
@ -114,11 +104,6 @@ mode|count
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-begin s2-delete-table-2 s1-view-locks s2-rollback s1-view-locks
step s2-begin:
@ -152,11 +137,6 @@ mode|count
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-begin s2-update-table-3 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks
step s2-begin:
@ -196,11 +176,6 @@ mode|count
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-begin s2-delete-table-3 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks
step s2-begin:
@ -240,11 +215,6 @@ mode|count
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-begin s2-insert-table-1 s1-view-locks s2-rollback s1-view-locks
step s2-begin:
@ -278,11 +248,6 @@ mode|count
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-begin s2-insert-table-2 s1-view-locks s2-rollback s1-view-locks
step s2-begin:
@ -316,11 +281,6 @@ mode|count
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-begin s2-insert-table-3 s1-view-locks s2-rollback s1-view-locks
step s2-begin:
@ -354,11 +314,6 @@ mode|count
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-update-table-1 s1-delete-table-2 s2-commit s1-commit s1-select-table-2
step s1-begin:
@ -389,11 +344,6 @@ id|value
5| 5
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-update-table-1 s1-insert-table-2 s2-commit s1-commit s1-select-table-2
step s1-begin:
@ -426,11 +376,6 @@ id|value
7| 2
(4 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-update-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-2
step s1-begin:
@ -462,11 +407,6 @@ id|value
5| 5
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-delete-table-1 s1-delete-table-2 s2-commit s1-commit s1-select-table-2
step s1-begin:
@ -497,11 +437,6 @@ id|value
5| 5
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-delete-table-1 s1-insert-table-2 s2-commit s1-commit s1-select-table-2
step s1-begin:
@ -533,11 +468,6 @@ id|value
5| 5
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-delete-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-2
step s1-begin:
@ -568,11 +498,6 @@ id|value
5| 5
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-delete-table-1 s1-delete-table-3 s2-commit s1-commit s1-select-table-3
step s1-begin:
@ -607,11 +532,6 @@ id|value
5| 5
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-delete-table-1 s1-insert-table-3 s2-commit s1-commit s1-select-table-3
step s1-begin:
@ -643,11 +563,6 @@ id|value
5| 5
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-delete-table-1 s1-update-table-3 s2-commit s1-commit s1-select-table-3
step s1-begin:
@ -682,11 +597,6 @@ id|value
5| 5
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-insert-table-1 s1-update-table-3 s2-commit s1-commit s1-select-table-3
step s1-begin:
@ -723,11 +633,6 @@ id|value
5| 5
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s1-update-table-3 s2-insert-table-1 s1-commit s2-commit s1-select-table-3
step s1-begin:
@ -764,11 +669,6 @@ id|value
5| 5
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-insert-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-3
step s1-begin:
@ -800,11 +700,6 @@ id|value
5| 5
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s1-update-table-2 s2-insert-table-1 s1-commit s2-commit s1-select-table-3
step s1-begin:
@ -836,11 +731,6 @@ id|value
5| 5
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-insert-table-2 s1-update-table-3 s2-commit s1-commit s1-select-table-3
step s1-begin:
@ -877,11 +767,6 @@ id|value
5| 5
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s1-update-table-3 s2-insert-table-2 s1-commit s2-commit s1-select-table-3
step s1-begin:
@ -918,11 +803,6 @@ id|value
5| 5
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-1 s2-commit s1-commit
step s1-begin:
@ -950,11 +830,6 @@ step s2-commit:
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-2 s2-commit s1-commit
step s1-begin:
@ -982,11 +857,6 @@ step s2-commit:
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-3 s2-commit s1-commit
step s1-begin:
@ -1014,11 +884,6 @@ step s2-commit:
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-1 s2-commit s1-commit
step s1-begin:
@ -1046,11 +911,6 @@ step s2-commit:
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-2 s2-commit s1-commit
step s1-begin:
@ -1078,11 +938,6 @@ step s2-commit:
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-3 s2-commit s1-commit
step s1-begin:
@ -1110,11 +965,6 @@ step s2-commit:
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-1 s2-commit s1-commit
step s1-begin:
@ -1142,11 +992,6 @@ step s2-commit:
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-2 s2-commit s1-commit
step s1-begin:
@ -1174,11 +1019,6 @@ step s2-commit:
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-3 s2-commit s1-commit
step s1-begin:
@ -1206,8 +1046,3 @@ step s2-commit:
step s1-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -87,11 +87,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
@ -180,11 +175,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
@ -273,11 +263,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
@ -366,11 +351,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
@ -459,11 +439,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
@ -552,11 +527,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
@ -645,11 +615,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
@ -738,11 +703,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection
step s2-start-session-level-connection:
@ -831,8 +791,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -87,11 +87,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
create_reference_table
@ -188,11 +183,6 @@ count
2
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
create_reference_table
@ -280,11 +270,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-insert-select-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
create_reference_table
@ -373,11 +358,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
create_reference_table
@ -474,11 +454,6 @@ count
2
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-alter s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
create_reference_table
@ -562,11 +537,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
create_reference_table
@ -655,11 +625,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection
create_reference_table
@ -710,8 +675,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -97,11 +97,6 @@ count
2
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
create_reference_table
@ -198,11 +193,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-add-primary-key s1-start-session-level-connection s1-begin-on-worker s1-upsert s2-start-session-level-connection s2-begin-on-worker s2-drop s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
create_reference_table
@ -293,11 +283,6 @@ step s3-select-count:
SELECT COUNT(*) FROM ref_table;
ERROR: relation "ref_table" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
create_reference_table
@ -394,8 +379,3 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -17,11 +17,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count
create_reference_table
@ -45,11 +40,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count
create_reference_table
@ -77,11 +67,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count
create_reference_table
@ -111,11 +96,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count
create_reference_table
@ -134,11 +114,6 @@ count
11
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
create_reference_table
@ -158,11 +133,6 @@ count
20
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
create_reference_table
@ -182,11 +152,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
create_reference_table
@ -206,11 +171,6 @@ count
9
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count
create_reference_table
@ -230,11 +190,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count
create_reference_table
@ -250,11 +205,6 @@ step s1-commit: COMMIT;
step s2-drop: <... completed>
step s1-select-count: SELECT COUNT(*) FROM reference_copy;
ERROR: relation "reference_copy" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes
create_reference_table
@ -281,11 +231,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_reference_table
@ -313,11 +258,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-flaky-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_reference_table
@ -343,11 +283,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_reference_table
@ -374,11 +309,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_reference_table
@ -406,11 +336,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_reference_table
@ -437,11 +362,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
create_reference_table
@ -465,11 +385,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count
create_reference_table
@ -489,11 +404,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count
create_reference_table
@ -520,11 +430,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count
create_reference_table
@ -548,11 +453,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count
create_reference_table
@ -580,11 +480,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count
create_reference_table
@ -614,11 +509,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count
create_reference_table
@ -637,11 +527,6 @@ count
11
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count
create_reference_table
@ -661,11 +546,6 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count
create_reference_table
@ -685,11 +565,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count
create_reference_table
@ -709,11 +584,6 @@ count
9
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count
create_reference_table
@ -733,11 +603,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count
create_reference_table
@ -754,11 +619,6 @@ step s2-copy: <... completed>
ERROR: relation "reference_copy" does not exist
step s1-select-count: SELECT COUNT(*) FROM reference_copy;
ERROR: relation "reference_copy" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes
create_reference_table
@ -785,11 +645,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
create_reference_table
@ -817,11 +672,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
create_reference_table
@ -849,11 +699,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
create_reference_table
@ -881,11 +726,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
create_reference_table
@ -912,11 +752,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
create_reference_table
@ -940,11 +775,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count
create_reference_table
@ -964,11 +794,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count
create_reference_table
@ -995,8 +820,3 @@ count
15
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -82,11 +82,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -170,11 +165,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -258,11 +248,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -345,11 +330,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -433,11 +413,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -520,11 +495,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -607,11 +577,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -695,11 +660,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection
step s2-start-session-level-connection:
@ -752,11 +712,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-select-from-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection
step s2-start-session-level-connection:
@ -809,8 +764,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -22,8 +22,3 @@ ERROR: duplicate key value violates unique constraint "test_locking_a_key_14000
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

File diff suppressed because it is too large Load Diff

View File

@ -29,11 +29,6 @@ step s2-update-t1: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-t1-t2-for-share s2-begin s2-delete-t1 s1-commit s2-commit
step s1-begin:
@ -64,11 +59,6 @@ step s2-delete-t1: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-t1-rt-for-update s2-begin s2-update-t1 s1-commit s2-commit
step s1-begin:
@ -99,11 +89,6 @@ step s2-update-t1: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-t1-rt-with-lc-for-update s2-begin s2-update-rt s1-commit s2-commit
step s1-begin:
@ -135,11 +120,6 @@ step s2-update-rt: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-t1-rt-with-lc-for-update s2-begin s2-update-t1 s1-commit s2-commit
step s1-begin:
@ -170,11 +150,6 @@ step s1-commit:
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-t1-t2-for-share s2-begin s2-select-from-t1-t2-for-share s1-commit s2-commit
step s1-begin:
@ -213,11 +188,6 @@ step s1-commit:
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-t1-rt-for-update s2-begin s2-select-from-t1-t2-for-update s1-commit s2-commit
step s1-begin:
@ -257,11 +227,6 @@ id|val_1|id|val_1
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-t1-within-cte s2-begin s2-select-from-t1-t2-for-update s1-commit s2-commit
step s1-begin:
@ -298,11 +263,6 @@ id|val_1|id|val_1
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-t1-within-cte s2-begin s2-update-t1 s1-commit s2-commit
step s1-begin:
@ -330,11 +290,6 @@ step s2-update-t1: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-t1-with-subquery s2-begin s2-update-t1 s1-commit s2-commit
step s1-begin:
@ -365,11 +320,6 @@ step s2-update-t1: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-rt-with-subquery s2-begin s2-update-rt s1-commit s2-commit
step s1-begin:
@ -396,11 +346,6 @@ step s2-update-rt: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-select-from-t1-with-view s2-begin s2-update-t1 s1-commit s2-commit
step s1-begin:
@ -427,11 +372,6 @@ step s2-update-t1: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-update-rt-with-cte-select-from-rt s2-begin s2-update-rt s1-commit s2-commit
step s1-begin:
@ -454,8 +394,3 @@ step s2-update-rt: <... completed>
step s2-commit:
COMMIT;
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

File diff suppressed because it is too large Load Diff

View File

@ -81,11 +81,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -176,11 +171,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-delete s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -271,11 +261,6 @@ count
4
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -366,11 +351,6 @@ count
7
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-begin s2-index s1-commit-worker s2-commit s1-stop-connection
step s1-start-session-level-connection:
@ -422,11 +402,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
step s1-start-session-level-connection:
@ -509,11 +484,6 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-disable-binary-protocol-on-worker s1-select s2-flaky-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection
step s1-start-session-level-connection:
@ -568,8 +538,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -0,0 +1,19 @@
Parsed test spec with 1 sessions
starting permutation: setup
step setup:
-- Replace pg_isolation_test_session_is_blocked so that all isolation tests are run with Citus implementation.
--
-- Vanilla PG only checks for local blocks, whereas citus implementation also checks worker jobs in distributed
-- transactions.
--
-- We have some tests that do not produce deterministic outputs when we use the Citus UDFs. They restore this
-- function in the setup phase and replace it again on the teardown phase so that the remainder of the tests can
-- keep using the Citus alternatives. Those tests should never be run concurrently with other isolation tests.
SELECT citus_internal.replace_isolation_tester_func();
replace_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -144,8 +144,3 @@ table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname
---------------------------------------------------------------------
(0 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -20,11 +20,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -46,11 +41,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-drop s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -68,11 +58,6 @@ step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM truncate_append;
ERROR: relation "truncate_append" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
master_create_empty_shard
@ -101,11 +86,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-truncate s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
master_create_empty_shard
@ -135,11 +115,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-truncate s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
master_create_empty_shard
@ -166,11 +141,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
master_create_empty_shard
@ -199,11 +169,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-truncate s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
master_create_empty_shard
@ -233,11 +198,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
master_create_empty_shard
@ -266,11 +226,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-table-size s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -297,11 +252,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -323,11 +273,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-drop-all-shards s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -354,11 +299,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-truncate s2-distribute-table s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -386,11 +326,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -412,11 +347,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-truncate s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -435,11 +365,6 @@ ERROR: relation "truncate_append" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM truncate_append;
ERROR: relation "truncate_append" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes
master_create_empty_shard
@ -468,11 +393,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes
master_create_empty_shard
@ -502,11 +422,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns
master_create_empty_shard
@ -535,11 +450,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns
master_create_empty_shard
@ -569,11 +479,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns
master_create_empty_shard
@ -602,11 +507,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-truncate s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -632,11 +532,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-truncate s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -658,11 +553,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-master-drop-all-shards s2-truncate s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -689,11 +579,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-distribute-table s2-truncate s1-commit s2-commit s1-select-count
master_create_empty_shard
@ -721,8 +606,3 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -90,11 +90,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -186,11 +181,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -282,11 +272,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -378,11 +363,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -474,11 +454,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-alter s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit s2-commit-worker s2-stop-connection s3-select-count
step s1-begin:
@ -539,11 +514,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
step s1-start-session-level-connection:
@ -635,11 +605,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-truncate s3-select-count-from-ref-table s1-commit-worker s1-stop-connection
step s1-start-session-level-connection:
@ -691,8 +656,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -20,11 +20,6 @@ step s1-commit:
step s2-undistribute: <... completed>
ERROR: cannot complete operation because no such table exists
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-undistribute s2-select s1-commit
step s1-begin:
@ -52,11 +47,6 @@ a|b
5|6
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-undistribute s2-insert s1-commit s2-select
step s1-begin:
@ -89,11 +79,6 @@ a| b
9|10
(5 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-undistribute s2-insert-select s1-commit s2-select
step s1-begin:
@ -127,11 +112,6 @@ a|b
5|6
(6 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-undistribute s2-delete s1-commit s2-select
step s1-begin:
@ -161,11 +141,6 @@ a|b
5|6
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-undistribute s2-copy s1-commit s2-select
step s1-begin:
@ -198,11 +173,6 @@ step s2-select:
13|14
(5 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-undistribute s2-drop s1-commit s2-select
step s1-begin:
@ -227,11 +197,6 @@ step s2-select:
SELECT * FROM dist_table ORDER BY 1, 2;
ERROR: relation "dist_table" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-undistribute s2-truncate s1-commit s2-select
step s1-begin:
@ -264,11 +229,6 @@ a|b
5|6
(3 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-undistribute s2-select-for-update s1-commit
step s1-begin:
@ -294,11 +254,6 @@ a|b
5|6
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-begin s1-undistribute s2-create-index-concurrently s1-commit
step s1-begin:
@ -319,8 +274,3 @@ step s1-commit:
COMMIT;
step s2-create-index-concurrently: <... completed>
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -94,11 +94,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
create_distributed_table
@ -194,11 +189,6 @@ count
3
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-alter-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count
create_distributed_table
@ -290,11 +280,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection
create_distributed_table
@ -382,8 +367,3 @@ stop_session_level_connection_to_node
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -8,7 +8,7 @@ nodeid|nodename |nodeport
(2 rows)
step s1-begin:
BEGIN;
BEGIN;
step s1-update-node-1:
SELECT 1 FROM master_update_node(
@ -28,7 +28,7 @@ step s2-update-node-2:
58638);
<waiting ...>
step s1-commit:
COMMIT;
COMMIT;
step s2-update-node-2: <... completed>
?column?
@ -98,7 +98,7 @@ nodeid|nodename |nodeport
(2 rows)
step s1-begin:
BEGIN;
BEGIN;
step s1-update-node-1:
SELECT 1 FROM master_update_node(
@ -112,7 +112,7 @@ step s1-update-node-1:
(1 row)
step s2-begin:
BEGIN;
BEGIN;
step s2-update-node-1:
SELECT 1 FROM master_update_node(
@ -121,7 +121,7 @@ step s2-update-node-1:
58637);
<waiting ...>
step s1-commit:
COMMIT;
COMMIT;
step s2-update-node-1: <... completed>
?column?
@ -130,7 +130,7 @@ step s2-update-node-1: <... completed>
(1 row)
step s2-abort:
ABORT;
ABORT;
step s1-show-nodes:
SELECT nodeid, nodename, nodeport, isactive
@ -192,7 +192,7 @@ create_distributed_table
(1 row)
step s1-begin:
BEGIN;
BEGIN;
step s1-update-node-nonexistent:
SELECT 1 FROM master_update_node(

View File

@ -20,11 +20,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-update-cte s1-commit s2-commit s1-select-count
create_distributed_table
@ -56,11 +51,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update s2-delete s1-commit s2-commit s1-select-count
create_distributed_table
@ -82,11 +72,6 @@ count
4
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table
@ -108,11 +93,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update s2-drop s1-commit s2-commit s1-select-count
create_distributed_table
@ -130,11 +110,6 @@ step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM update_hash;
ERROR: relation "update_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -163,11 +138,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-update s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -197,11 +167,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-update s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -228,11 +193,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -261,11 +221,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-update s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -295,11 +250,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -328,11 +278,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update s2-table-size s1-commit s2-commit s1-select-count
create_distributed_table
@ -358,11 +303,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count
create_distributed_table
@ -384,11 +324,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-update s2-distribute-table s1-commit s2-commit s1-select-count
create_distributed_table
@ -417,11 +352,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-delete s1-commit s2-commit s1-select-count
create_distributed_table
@ -448,11 +378,6 @@ count
4
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table
@ -479,11 +404,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-drop s1-commit s2-commit s1-select-count
create_distributed_table
@ -506,11 +426,6 @@ step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM update_hash;
ERROR: relation "update_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -544,11 +459,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-update-cte s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -583,11 +493,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-update-cte s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -619,11 +524,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -657,11 +557,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-update-cte s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -696,11 +591,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -734,11 +624,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-table-size s1-commit s2-commit s1-select-count
create_distributed_table
@ -769,11 +654,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count
create_distributed_table
@ -800,11 +680,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-update s1-commit s2-commit s1-select-count
create_distributed_table
@ -826,11 +701,6 @@ count
4
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-update s1-commit s2-commit s1-select-count
create_distributed_table
@ -852,11 +722,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-update s1-commit s2-commit s1-select-count
create_distributed_table
@ -875,11 +740,6 @@ ERROR: relation "update_hash" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM update_hash;
ERROR: relation "update_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-update s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -908,11 +768,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-update s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -942,11 +797,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-update s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -975,11 +825,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-update s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -1009,11 +854,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-update s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -1043,11 +883,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-update s1-commit s2-commit s1-select-count
create_distributed_table
@ -1073,11 +908,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-update s1-commit s2-commit s1-select-count
create_distributed_table
@ -1099,11 +929,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-update s1-commit s2-commit s1-select-count
create_distributed_table
@ -1132,11 +957,6 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-update-cte s1-commit s2-commit s1-select-count
create_distributed_table
@ -1162,11 +982,6 @@ count
4
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-update-cte s1-commit s2-commit s1-select-count
create_distributed_table
@ -1192,11 +1007,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-update-cte s1-commit s2-commit s1-select-count
create_distributed_table
@ -1215,11 +1025,6 @@ ERROR: relation "update_hash" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM update_hash;
ERROR: relation "update_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-update-cte s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -1253,11 +1058,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-update-cte s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -1292,11 +1092,6 @@ run_command_on_workers
(localhost,57638,t,0)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-update-cte s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -1330,11 +1125,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-update-cte s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -1369,11 +1159,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-update-cte s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -1403,11 +1188,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-update-cte s1-commit s2-commit s1-select-count
create_distributed_table
@ -1438,11 +1218,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-update-cte s1-commit s2-commit s1-select-count
create_distributed_table
@ -1468,11 +1243,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-update-cte s1-commit s2-commit s1-select-count
create_distributed_table
@ -1507,8 +1277,3 @@ count
10
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -20,11 +20,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-update s1-commit s2-commit s1-select-count
create_distributed_table
@ -46,11 +41,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-delete s1-commit s2-commit s1-select-count
create_distributed_table
@ -72,11 +62,6 @@ count
4
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table
@ -98,11 +83,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-drop s1-commit s2-commit s1-select-count
create_distributed_table
@ -120,11 +100,6 @@ step s2-drop: <... completed>
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM upsert_hash;
ERROR: relation "upsert_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -153,11 +128,6 @@ run_command_on_workers
(localhost,57638,t,4)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-upsert s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -187,11 +157,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s1-upsert s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
@ -218,11 +183,6 @@ run_command_on_workers
(localhost,57638,t,4)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -251,11 +211,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-upsert s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -285,11 +240,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -318,11 +268,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-table-size s1-commit s2-commit s1-select-count
create_distributed_table
@ -348,11 +293,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count
create_distributed_table
@ -374,11 +314,6 @@ count
0
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-upsert s2-distribute-table s1-commit s2-commit s1-select-count
create_distributed_table
@ -407,11 +342,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-update s2-upsert s1-commit s2-commit s1-select-count
create_distributed_table
@ -433,11 +363,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-upsert s1-commit s2-commit s1-select-count
create_distributed_table
@ -459,11 +384,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-upsert s1-commit s2-commit s1-select-count
create_distributed_table
@ -485,11 +405,6 @@ count
1
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-upsert s1-commit s2-commit s1-select-count
create_distributed_table
@ -508,11 +423,6 @@ ERROR: relation "upsert_hash" does not exist
step s2-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM upsert_hash;
ERROR: relation "upsert_hash" does not exist
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-upsert s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -541,11 +451,6 @@ run_command_on_workers
(localhost,57638,t,4)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-upsert s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table
@ -575,11 +480,6 @@ run_command_on_workers
(localhost,57638,t,2)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -608,11 +508,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -642,11 +537,6 @@ run_command_on_workers
(localhost,57638,t,"")
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table
@ -676,11 +566,6 @@ run_command_on_workers
(localhost,57638,t,new_column)
(2 rows)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-upsert s1-commit s2-commit s1-select-count
create_distributed_table
@ -706,11 +591,6 @@ count
5
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-upsert s1-commit s2-commit s1-select-count
create_distributed_table
@ -732,8 +612,3 @@ count
1
(1 row)
restore_isolation_tester_func
---------------------------------------------------------------------
(1 row)

View File

@ -0,0 +1,171 @@
Parsed test spec with 2 sessions
starting permutation: lock_share vac_specified commit
step lock_share:
BEGIN;
LOCK part1 IN SHARE MODE;
s2: WARNING: skipping vacuum of "part1" --- lock not available
step vac_specified: VACUUM (SKIP_LOCKED) part1, part2;
step commit:
COMMIT;
starting permutation: lock_share vac_all_parts commit
step lock_share:
BEGIN;
LOCK part1 IN SHARE MODE;
step vac_all_parts: VACUUM (SKIP_LOCKED) parted;
step commit:
COMMIT;
starting permutation: lock_share analyze_specified commit
step lock_share:
BEGIN;
LOCK part1 IN SHARE MODE;
s2: WARNING: skipping analyze of "part1" --- lock not available
step analyze_specified: ANALYZE (SKIP_LOCKED) part1, part2;
step commit:
COMMIT;
starting permutation: lock_share analyze_all_parts commit
step lock_share:
BEGIN;
LOCK part1 IN SHARE MODE;
step analyze_all_parts: ANALYZE (SKIP_LOCKED) parted;
step commit:
COMMIT;
starting permutation: lock_share vac_analyze_specified commit
step lock_share:
BEGIN;
LOCK part1 IN SHARE MODE;
s2: WARNING: skipping vacuum of "part1" --- lock not available
step vac_analyze_specified: VACUUM (ANALYZE, SKIP_LOCKED) part1, part2;
step commit:
COMMIT;
starting permutation: lock_share vac_analyze_all_parts commit
step lock_share:
BEGIN;
LOCK part1 IN SHARE MODE;
step vac_analyze_all_parts: VACUUM (ANALYZE, SKIP_LOCKED) parted;
step commit:
COMMIT;
starting permutation: lock_share vac_full_specified commit
step lock_share:
BEGIN;
LOCK part1 IN SHARE MODE;
s2: WARNING: skipping vacuum of "part1" --- lock not available
step vac_full_specified: VACUUM (SKIP_LOCKED, FULL) part1, part2;
step commit:
COMMIT;
starting permutation: lock_share vac_full_all_parts commit
step lock_share:
BEGIN;
LOCK part1 IN SHARE MODE;
step vac_full_all_parts: VACUUM (SKIP_LOCKED, FULL) parted;
step commit:
COMMIT;
starting permutation: lock_access_exclusive vac_specified commit
step lock_access_exclusive:
BEGIN;
LOCK part1 IN ACCESS EXCLUSIVE MODE;
s2: WARNING: skipping vacuum of "part1" --- lock not available
step vac_specified: VACUUM (SKIP_LOCKED) part1, part2;
step commit:
COMMIT;
starting permutation: lock_access_exclusive vac_all_parts commit
step lock_access_exclusive:
BEGIN;
LOCK part1 IN ACCESS EXCLUSIVE MODE;
step vac_all_parts: VACUUM (SKIP_LOCKED) parted;
step commit:
COMMIT;
starting permutation: lock_access_exclusive analyze_specified commit
step lock_access_exclusive:
BEGIN;
LOCK part1 IN ACCESS EXCLUSIVE MODE;
s2: WARNING: skipping analyze of "part1" --- lock not available
step analyze_specified: ANALYZE (SKIP_LOCKED) part1, part2;
step commit:
COMMIT;
starting permutation: lock_access_exclusive analyze_all_parts commit
step lock_access_exclusive:
BEGIN;
LOCK part1 IN ACCESS EXCLUSIVE MODE;
step analyze_all_parts: ANALYZE (SKIP_LOCKED) parted; <waiting ...>
step commit:
COMMIT;
step analyze_all_parts: <... completed>
starting permutation: lock_access_exclusive vac_analyze_specified commit
step lock_access_exclusive:
BEGIN;
LOCK part1 IN ACCESS EXCLUSIVE MODE;
s2: WARNING: skipping vacuum of "part1" --- lock not available
step vac_analyze_specified: VACUUM (ANALYZE, SKIP_LOCKED) part1, part2;
step commit:
COMMIT;
starting permutation: lock_access_exclusive vac_analyze_all_parts commit
step lock_access_exclusive:
BEGIN;
LOCK part1 IN ACCESS EXCLUSIVE MODE;
step vac_analyze_all_parts: VACUUM (ANALYZE, SKIP_LOCKED) parted; <waiting ...>
step commit:
COMMIT;
step vac_analyze_all_parts: <... completed>
starting permutation: lock_access_exclusive vac_full_specified commit
step lock_access_exclusive:
BEGIN;
LOCK part1 IN ACCESS EXCLUSIVE MODE;
s2: WARNING: skipping vacuum of "part1" --- lock not available
step vac_full_specified: VACUUM (SKIP_LOCKED, FULL) part1, part2;
step commit:
COMMIT;
starting permutation: lock_access_exclusive vac_full_all_parts commit
step lock_access_exclusive:
BEGIN;
LOCK part1 IN ACCESS EXCLUSIVE MODE;
step vac_full_all_parts: VACUUM (SKIP_LOCKED, FULL) parted;
step commit:
COMMIT;

View File

@ -426,20 +426,20 @@ SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDE
ALTER EXTENSION citus UPDATE TO '9.4-2';
-- should see the old source code
SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1;
prosrc
prosrc
---------------------------------------------------------------------
+
DECLARE +
colocated_tables regclass[]; +
BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM +
master_update_shard_statistics(shardid) +
FROM +
pg_dist_shard +
WHERE +
logicalrelid = ANY (colocated_tables); +
END; +
+
DECLARE +
colocated_tables regclass[]; +
BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM +
master_update_shard_statistics(shardid) +
FROM +
pg_dist_shard +
WHERE +
logicalrelid = ANY (colocated_tables); +
END; +
(1 row)
@ -467,20 +467,20 @@ SELECT * FROM multi_extension.print_extension_changes();
ALTER EXTENSION citus UPDATE TO '9.4-1';
-- should see the old source code
SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1;
prosrc
prosrc
---------------------------------------------------------------------
+
DECLARE +
colocated_tables regclass[]; +
BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM +
master_update_shard_statistics(shardid) +
FROM +
pg_dist_shard +
WHERE +
logicalrelid = ANY (colocated_tables); +
END; +
+
DECLARE +
colocated_tables regclass[]; +
BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM +
master_update_shard_statistics(shardid) +
FROM +
pg_dist_shard +
WHERE +
logicalrelid = ANY (colocated_tables); +
END; +
(1 row)
@ -563,20 +563,20 @@ SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDE
ALTER EXTENSION citus UPDATE TO '9.5-2';
-- should see the old source code
SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1;
prosrc
prosrc
---------------------------------------------------------------------
+
DECLARE +
colocated_tables regclass[]; +
BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM +
master_update_shard_statistics(shardid) +
FROM +
pg_dist_shard +
WHERE +
logicalrelid = ANY (colocated_tables); +
END; +
+
DECLARE +
colocated_tables regclass[]; +
BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM +
master_update_shard_statistics(shardid) +
FROM +
pg_dist_shard +
WHERE +
logicalrelid = ANY (colocated_tables); +
END; +
(1 row)
@ -604,20 +604,20 @@ SELECT * FROM multi_extension.print_extension_changes();
ALTER EXTENSION citus UPDATE TO '9.5-1';
-- should see the old source code
SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1;
prosrc
prosrc
---------------------------------------------------------------------
+
DECLARE +
colocated_tables regclass[]; +
BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM +
master_update_shard_statistics(shardid) +
FROM +
pg_dist_shard +
WHERE +
logicalrelid = ANY (colocated_tables); +
END; +
+
DECLARE +
colocated_tables regclass[]; +
BEGIN +
SELECT get_colocated_table_array(relation) INTO colocated_tables;+
PERFORM +
master_update_shard_statistics(shardid) +
FROM +
pg_dist_shard +
WHERE +
logicalrelid = ANY (colocated_tables); +
END; +
(1 row)
@ -1031,6 +1031,15 @@ SELECT * FROM multi_extension.print_extension_changes();
| view citus_stat_activity
(41 rows)
-- Test downgrade to 11.0-1 from 11.0-2
ALTER EXTENSION citus UPDATE TO '11.0-2';
ALTER EXTENSION citus UPDATE TO '11.0-1';
-- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
---------------------------------------------------------------------
(0 rows)
-- Snapshot of state at 11.0-2
ALTER EXTENSION citus UPDATE TO '11.0-2';
SELECT * FROM multi_extension.print_extension_changes();
@ -1042,9 +1051,26 @@ SELECT * FROM multi_extension.print_extension_changes();
| function start_metadata_sync_to_all_nodes() boolean
(4 rows)
-- Test downgrade script (result should be empty)
ALTER EXTENSION citus UPDATE TO '11.0-1';
-- Test downgrade to 11.0-2 from 11.0-3
ALTER EXTENSION citus UPDATE TO '11.0-3';
ALTER EXTENSION citus UPDATE TO '11.0-2';
-- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
---------------------------------------------------------------------
(0 rows)
-- Snapshot of state at 11.0-3
ALTER EXTENSION citus UPDATE TO '11.0-3';
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
---------------------------------------------------------------------
(0 rows)
-- Test downgrade to 11.0-3 from 11.1-1
ALTER EXTENSION citus UPDATE TO '11.1-1';
ALTER EXTENSION citus UPDATE TO '11.0-3';
-- Should be empty result since upgrade+downgrade should be a no-op
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
---------------------------------------------------------------------
@ -1069,28 +1095,24 @@ SELECT * FROM multi_extension.print_extension_changes();
table columnar.chunk_group |
table columnar.options |
table columnar.stripe |
| function citus_split_shard_by_split_points(bigint,text[],integer[],citus.split_mode) void
| function columnar.get_storage_id(regclass) bigint
| function columnar_internal.columnar_handler(internal) table_am_handler
| function worker_split_copy(bigint,citus.split_copy_info[]) void
| schema columnar_internal
| sequence columnar_internal.storageid_seq
| table columnar_internal.chunk
| table columnar_internal.chunk_group
| table columnar_internal.options
| table columnar_internal.stripe
| type citus.split_copy_info
| type citus.split_mode
| view columnar.chunk
| view columnar.chunk_group
| view columnar.options
| view columnar.storage
| view columnar.stripe
(27 rows)
-- Test downgrade script (result should be empty)
ALTER EXTENSION citus UPDATE TO '11.0-2';
ALTER EXTENSION citus UPDATE TO '11.1-1';
SELECT * FROM multi_extension.print_extension_changes();
previous_object | current_object
---------------------------------------------------------------------
(0 rows)
(31 rows)
DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff;
-- show running version

View File

@ -75,7 +75,7 @@ SELECT citus_table_size('customer_copy_hash'),
citus_table_size('supplier');
citus_table_size | citus_table_size | citus_table_size
---------------------------------------------------------------------
548864 | 548864 | 425984
548864 | 548864 | 442368
(1 row)
CREATE INDEX index_1 on customer_copy_hash(c_custkey);

View File

@ -246,10 +246,6 @@ WHERE tablename = 'dustbunnies_990002' ORDER BY attname;
\c - - :master_host :master_port
SET citus.log_remote_commands TO ON;
-- verify warning for unqualified VACUUM
VACUUM;
WARNING: not propagating VACUUM command to worker nodes
HINT: Provide a specific table in order to VACUUM distributed tables.
-- check for multiple table vacuum
VACUUM dustbunnies, second_dustbunnies;
NOTICE: issuing VACUUM public.dustbunnies_990002
@ -260,14 +256,10 @@ NOTICE: issuing VACUUM public.second_dustbunnies_990003
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM public.second_dustbunnies_990003
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- and warning when using targeted VACUUM without DDL propagation
-- and do not propagate when using targeted VACUUM without DDL propagation
SET citus.enable_ddl_propagation to false;
VACUUM dustbunnies;
WARNING: not propagating VACUUM command to worker nodes
HINT: Set citus.enable_ddl_propagation to true in order to send targeted VACUUM commands to worker nodes.
ANALYZE dustbunnies;
WARNING: not propagating ANALYZE command to worker nodes
HINT: Set citus.enable_ddl_propagation to true in order to send targeted ANALYZE commands to worker nodes.
SET citus.enable_ddl_propagation to DEFAULT;
-- test worker_hash
SELECT worker_hash(123);
@ -314,3 +306,269 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
1
(1 row)
SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 970000;
SET citus.log_remote_commands TO OFF;
CREATE TABLE local_vacuum_table(id int primary key, b text);
CREATE TABLE reference_vacuum_table(id int);
SELECT create_reference_table('reference_vacuum_table');
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE distributed_vacuum_table(id int);
SELECT create_distributed_table('distributed_vacuum_table', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SET citus.log_remote_commands TO ON;
-- should propagate to all workers because no table is specified
VACUUM;
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should not propagate because no distributed table is specified
insert into local_vacuum_table select i from generate_series(1,1000000) i;
delete from local_vacuum_table;
VACUUM local_vacuum_table;
SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') );
pg_size_pretty
---------------------------------------------------------------------
21 MB
(1 row)
-- vacuum full deallocates pages of dead tuples whereas normal vacuum only marks dead tuples on visibility map
VACUUM FULL local_vacuum_table;
SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') );
pg_size_pretty
---------------------------------------------------------------------
16 kB
(1 row)
-- should propagate to all workers because table is reference table
VACUUM reference_vacuum_table;
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should propagate to all workers because table is distributed table
VACUUM distributed_vacuum_table;
NOTICE: issuing VACUUM public.distributed_vacuum_table_970001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- only distributed_vacuum_table and reference_vacuum_table should propagate
VACUUM distributed_vacuum_table, local_vacuum_table, reference_vacuum_table;
NOTICE: issuing VACUUM public.distributed_vacuum_table_970001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- only reference_vacuum_table should propagate
VACUUM local_vacuum_table, reference_vacuum_table;
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM public.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- vacuum (disable_page_skipping) aggressively process pages of the relation, it does not respect visibility map
VACUUM (DISABLE_PAGE_SKIPPING true) local_vacuum_table;
VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table;
-- vacuum (index_cleanup on, parallel 1) should execute index vacuuming and index cleanup phases in parallel
insert into local_vacuum_table select i from generate_series(1,1000000) i;
delete from local_vacuum_table;
VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table;
SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') );
pg_size_pretty
---------------------------------------------------------------------
56 MB
(1 row)
insert into local_vacuum_table select i from generate_series(1,1000000) i;
delete from local_vacuum_table;
VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table;
SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') );
pg_size_pretty
---------------------------------------------------------------------
21 MB
(1 row)
-- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true)
insert into local_vacuum_table select i from generate_series(1,1000000) i;
delete from local_vacuum_table;
vacuum (TRUNCATE false) local_vacuum_table;
SELECT pg_total_relation_size('local_vacuum_table') as size1 \gset
insert into local_vacuum_table select i from generate_series(1,1000000) i;
delete from local_vacuum_table;
vacuum (TRUNCATE true) local_vacuum_table;
SELECT pg_total_relation_size('local_vacuum_table') as size2 \gset
SELECT :size1 > :size2 as truncate_less_size;
truncate_less_size
---------------------------------------------------------------------
t
(1 row)
-- vacuum (analyze) should be analyzing the table to generate statistics after vacuuming
select analyze_count from pg_stat_all_tables where relname = 'local_vacuum_table' or relname = 'reference_vacuum_table';
analyze_count
---------------------------------------------------------------------
0
0
(2 rows)
vacuum (analyze) local_vacuum_table, reference_vacuum_table;
NOTICE: issuing VACUUM (ANALYZE) public.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (ANALYZE) public.reference_vacuum_table_970000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- give enough time for stats to be updated.(updated per 500ms by default)
select pg_sleep(1);
pg_sleep
---------------------------------------------------------------------
(1 row)
select analyze_count from pg_stat_all_tables where relname = 'local_vacuum_table' or relname = 'reference_vacuum_table';
analyze_count
---------------------------------------------------------------------
1
1
(2 rows)
-- should not propagate because ddl propagation is disabled
SET citus.enable_ddl_propagation TO OFF;
VACUUM distributed_vacuum_table;
SET citus.enable_ddl_propagation TO ON;
SET citus.log_remote_commands TO OFF;
-- ANALYZE tests
CREATE TABLE local_analyze_table(id int);
CREATE TABLE reference_analyze_table(id int);
SELECT create_reference_table('reference_analyze_table');
create_reference_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE distributed_analyze_table(id int);
SELECT create_distributed_table('distributed_analyze_table', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE loc (a INT, b INT);
CREATE TABLE dist (a INT);
SELECT create_distributed_table ('dist', 'a');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SET citus.log_remote_commands TO ON;
-- should propagate to all workers because no table is specified
ANALYZE;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'off'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing SET citus.enable_ddl_propagation TO 'on'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should not propagate because no distributed table is specified
ANALYZE local_analyze_table;
-- should propagate to all workers because table is reference table
ANALYZE reference_analyze_table;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should propagate to all workers because table is distributed table
ANALYZE distributed_analyze_table;
NOTICE: issuing ANALYZE public.distributed_analyze_table_970003
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- only distributed_analyze_table and reference_analyze_table should propagate
ANALYZE distributed_analyze_table, local_analyze_table, reference_analyze_table;
NOTICE: issuing ANALYZE public.distributed_analyze_table_970003
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- only reference_analyze_table should propagate
ANALYZE local_analyze_table, reference_analyze_table;
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx');
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing ANALYZE public.reference_analyze_table_970002
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx'
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- should not propagate because ddl propagation is disabled
SET citus.enable_ddl_propagation TO OFF;
ANALYZE distributed_analyze_table;
SET citus.enable_ddl_propagation TO ON;
-- analyze only specified columns for corresponding tables
ANALYZE loc(b), dist(a);
NOTICE: issuing ANALYZE public.dist_970004 (a)
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx

View File

@ -10,7 +10,7 @@ set search_path to pg14;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 980000;
SET citus.shard_count TO 2;
-- test the new vacuum option, process_toast
-- test the new vacuum option, process_toast and also auto option for index_cleanup
CREATE TABLE t1 (a int);
SELECT create_distributed_table('t1','a');
create_distributed_table
@ -41,6 +41,69 @@ NOTICE: issuing VACUUM pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP AUTO) t1;
NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP) t1;
NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP AuTo) t1;
NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP false) t1;
NOTICE: issuing VACUUM (INDEX_CLEANUP false) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (INDEX_CLEANUP false) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP true) t1;
NOTICE: issuing VACUUM (INDEX_CLEANUP true) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (INDEX_CLEANUP true) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (INDEX_CLEANUP "AUTOX") t1;
ERROR: index_cleanup requires a Boolean value
VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1;
NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,PROCESS_TOAST,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,PROCESS_TOAST,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1;
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
NOTICE: issuing VACUUM (FULL,PROCESS_TOAST,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001
DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
-- vacuum (process_toast true) should be vacuuming toast tables (default is true)
CREATE TABLE local_vacuum_table(name text);
select reltoastrelid from pg_class where relname='local_vacuum_table'
\gset
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
\gset
VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table;
SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class
WHERE oid=:reltoastrelid::regclass;
frozen_performed
---------------------------------------------------------------------
t
(1 row)
-- vacuum (process_toast false) should not be vacuuming toast tables (default is true)
SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass
\gset
VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table;
SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class
WHERE oid=:reltoastrelid::regclass;
frozen_not_performed
---------------------------------------------------------------------
t
(1 row)
DROP TABLE local_vacuum_table;
SET citus.log_remote_commands TO OFF;
create table dist(a int, b int);
select create_distributed_table('dist','a');
@ -1376,6 +1439,35 @@ SELECT create_distributed_table('ctlt1', 'a');
(1 row)
CREATE TABLE ctlt_all_2 (LIKE ctlt1 INCLUDING ALL);
CREATE TABLE compression_and_defaults (
data text COMPRESSION lz4 DEFAULT '"{}"'::text COLLATE "C" NOT NULL PRIMARY KEY,
rev text
)
WITH (
autovacuum_vacuum_scale_factor='0.01',
fillfactor='75'
);
SELECT create_distributed_table('compression_and_defaults', 'data', colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
CREATE TABLE compression_and_generated_col (
data text COMPRESSION lz4 GENERATED ALWAYS AS (rev || '{]') STORED COLLATE "C" NOT NULL,
rev text
)
WITH (
autovacuum_vacuum_scale_factor='0.01',
fillfactor='75'
);
SELECT create_distributed_table('compression_and_generated_col', 'rev', colocate_with:='none');
create_distributed_table
---------------------------------------------------------------------
(1 row)
DROP TABLE compression_and_defaults, compression_and_generated_col;
-- cleanup
set client_min_messages to error;
drop extension postgres_fdw cascade;

View File

@ -113,7 +113,7 @@ ORDER BY 1;
function citus_shard_indexes_on_worker()
function citus_shard_sizes()
function citus_shards_on_worker()
function citus_split_shard_by_split_points(bigint,integer[],integer[])
function citus_split_shard_by_split_points(bigint,text[],integer[],citus.split_mode)
function citus_stat_activity()
function citus_stat_statements()
function citus_stat_statements_reset()
@ -243,6 +243,7 @@ ORDER BY 1;
function worker_record_sequence_dependency(regclass,regclass,name)
function worker_save_query_explain_analyze(text,jsonb)
function worker_split_shard_replication_setup(citus.split_shard_info[])
function worker_split_copy(bigint,citus.split_copy_info[])
schema citus
schema citus_internal
schema columnar
@ -272,6 +273,8 @@ ORDER BY 1;
type citus.distribution_type
type citus.shard_transfer_mode
type citus.split_shard_info
type citus.split_copy_info
type citus.split_mode
type citus_copy_format
type noderole
view citus_dist_stat_activity
@ -289,5 +292,5 @@ ORDER BY 1;
view columnar.stripe
view pg_dist_shard_placement
view time_partitions
(270 rows)
(274 rows)

View File

@ -0,0 +1,96 @@
CREATE SCHEMA worker_split_copy_test;
SET search_path TO worker_split_copy_test;
SET citus.shard_count TO 2;
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 81070000;
-- BEGIN: Create distributed table and insert data.
CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table"(id int primary key, value char);
SELECT create_distributed_table('"test !/ \n _""dist_123_table"', 'id');
create_distributed_table
---------------------------------------------------------------------
(1 row)
INSERT INTO "test !/ \n _""dist_123_table" (id, value) (SELECT g.id, 'N' FROM generate_series(1, 1000) AS g(id));
-- END: Create distributed table and insert data.
-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy.
\c - - - :worker_1_port
CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070015"(id int primary key, value char);
CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070016"(id int primary key, value char);
-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy.
-- BEGIN: List row count for source shard and targets shard in Worker1.
\c - - - :worker_1_port
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070000";
count
---------------------------------------------------------------------
510
(1 row)
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015";
count
---------------------------------------------------------------------
0
(1 row)
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016";
count
---------------------------------------------------------------------
0
(1 row)
\c - - - :worker_2_port
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070001";
count
---------------------------------------------------------------------
490
(1 row)
-- END: List row count for source shard and targets shard in Worker1.
-- BEGIN: Set worker_1_node and worker_2_node
\c - - - :worker_1_port
SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset
SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset
-- END: Set worker_1_node and worker_2_node
-- BEGIN: Trigger 2-way local shard split copy.
-- Ensure we will perform text copy.
SET citus.enable_binary_protocol = false;
SELECT * from worker_split_copy(
81070000, -- source shard id to copy
ARRAY[
-- split copy info for split children 1
ROW(81070015, -- destination shard id
-2147483648, -- split range begin
-1073741824, --split range end
:worker_1_node)::citus.split_copy_info,
-- split copy info for split children 2
ROW(81070016, --destination shard id
-1073741823, --split range begin
-1, --split range end
:worker_1_node)::citus.split_copy_info
]
);
worker_split_copy
---------------------------------------------------------------------
(1 row)
-- END: Trigger 2-way local shard split copy.
-- BEGIN: List updated row count for local targets shard.
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015";
count
---------------------------------------------------------------------
247
(1 row)
SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016";
count
---------------------------------------------------------------------
263
(1 row)
-- END: List updated row count for local targets shard.
-- BEGIN: CLEANUP.
\c - - - :master_port
SET client_min_messages TO WARNING;
DROP SCHEMA worker_split_copy_test CASCADE;
-- END: CLEANUP.

View File

@ -1,3 +1,4 @@
test: isolation_setup
test: isolation_add_remove_node
test: isolation_update_node
test: isolation_update_node_lock_writes
@ -27,7 +28,7 @@ test: isolation_dml_vs_repair isolation_copy_placement_vs_copy_placement
test: isolation_concurrent_dml isolation_data_migration
test: isolation_drop_shards isolation_copy_placement_vs_modification
test: isolation_insert_vs_vacuum isolation_transaction_recovery
test: isolation_insert_vs_vacuum isolation_transaction_recovery isolation_vacuum_skip_locked
test: isolation_progress_monitoring
test: isolation_dump_local_wait_edges

View File

@ -17,9 +17,18 @@ Automated Failure Testing works by inserting a network proxy (mitmproxy) between
## Getting Started
First off, to use this you'll need mitmproxy, I recommend version `7.0.4`, and I also recommend running it with `python 3.9`. This script integrates pretty deeply with mitmproxy so other versions might fail to work.
First off, to use this you'll need mitmproxy.
Currently, we rely on a [fork](https://github.com/thanodnl/mitmproxy/tree/fix/tcp-flow-kill) to run the failure tests.
We recommned using pipenv to setup your failure testing environment since that will handle installing the fork
and other dependencies which may be updated/changed.
I highly recommend using pipenv to install mitmproxy. It lets you easily manage isolated environments (instead of installing python packages globally). If you've heard of virtualenv, pipenv is that but much easier to use.
Setting up pipenv is easy if you already have python and pip set up:
```bash
pip install pipenv
```
If the Pipfile requires a version you do not have, simply install that python version and retry.
Pipenv should be able to find the newly installed python and set up the environment.
Once you've installed it:

View File

@ -51,8 +51,6 @@ teardown
DROP TABLE partitioned_table;
SELECT citus_remove_node('localhost', 57636);
SELECT citus_internal.restore_isolation_tester_func();
}
// coordinator session

View File

@ -3,9 +3,6 @@
setup
{
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1;
@ -18,8 +15,6 @@ setup
teardown
{
SELECT citus_internal.restore_isolation_tester_func();
DROP TABLE selected_shard;
DROP TABLE logical_replicate_placement;
}

View File

@ -20,9 +20,6 @@ setup
AS 'citus', $$stop_session_level_connection_to_node$$;
RESET citus.enable_metadata_sync;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
@ -48,8 +45,6 @@ teardown
{
DROP TABLE selected_shard;
DROP TABLE logical_replicate_placement;
SELECT citus_internal.restore_isolation_tester_func();
}

View File

@ -2,9 +2,6 @@
// so setting the corresponding shard here is useful
setup
{
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
SET citus.shard_count TO 8;
SET citus.shard_replication_factor TO 1;
@ -16,8 +13,6 @@ setup
teardown
{
SELECT citus_internal.restore_isolation_tester_func();
DROP TABLE selected_shard;
DROP TABLE logical_replicate_placement;
}

View File

@ -19,9 +19,6 @@ setup
AS 'citus', $$stop_session_level_connection_to_node$$;
RESET citus.enable_ddl_propagation;
SELECT citus_internal.replace_isolation_tester_func();
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
-- start_metadata_sync_to_node can not be run inside a transaction block
-- following is a workaround to overcome that
-- port numbers are hard coded at the moment
@ -46,8 +43,6 @@ teardown
{
DROP TABLE selected_shard;
DROP TABLE logical_replicate_placement;
SELECT citus_internal.restore_isolation_tester_func();
}

View File

@ -0,0 +1,146 @@
setup
{
SET citus.shard_count to 2;
SET citus.shard_replication_factor to 1;
SELECT setval('pg_dist_shardid_seq', 1500000);
CREATE TABLE to_split_table (id int, value int);
SELECT create_distributed_table('to_split_table', 'id');
}
teardown
{
DROP TABLE to_split_table;
}
session "s1"
step "s1-begin"
{
BEGIN;
-- the tests are written with the logic where single shard SELECTs
-- do not to open transaction blocks
SET citus.select_opens_transaction_block TO false;
}
// cache all placements
step "s1-load-cache"
{
-- Indirect way to load cache.
TRUNCATE to_split_table;
}
step "s1-insert"
{
-- Id '123456789' maps to shard 1500002.
SELECT get_shard_id_for_distribution_column('to_split_table', 123456789);
INSERT INTO to_split_table VALUES (123456789, 1);
}
step "s1-update"
{
UPDATE to_split_table SET value = 111 WHERE id = 123456789;
}
step "s1-delete"
{
DELETE FROM to_split_table WHERE id = 123456789;
}
step "s1-select"
{
SELECT count(*) FROM to_split_table WHERE id = 123456789;
}
step "s1-ddl"
{
CREATE INDEX test_table_index ON to_split_table(id);
}
step "s1-copy"
{
COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
}
step "s1-blocking-shard-split"
{
SELECT pg_catalog.citus_split_shard_by_split_points(
1500001,
ARRAY['-1073741824'],
ARRAY[1, 2],
'blocking');
}
step "s1-commit"
{
COMMIT;
}
session "s2"
step "s2-begin"
{
BEGIN;
}
step "s2-blocking-shard-split"
{
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['1073741824'],
ARRAY[1, 2],
'blocking');
}
step "s2-commit"
{
COMMIT;
}
step "s2-print-cluster"
{
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM to_split_table ORDER BY id, value;
}
step "s2-print-index-count"
{
SELECT
nodeport, success, result
FROM
run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''')
ORDER BY
nodeport;
}
// Run shard split while concurrently performing DML and index creation
// We expect DML,Copy to fail because the shard they are waiting for is destroyed.
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster"
// The same tests without loading the cache at first
permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster"
// Concurrent shard split blocks on different shards of the same table (or any colocated table)
permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster"
// The same test above without loading the cache at first
permutation "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster"
// Concurrent DDL blocks on different shards of the same table (or any colocated table)
permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count"
// The same tests without loading the cache at first
permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count"

View File

@ -0,0 +1,104 @@
setup
{
SELECT setval('pg_dist_shardid_seq', 1500000);
SET citus.shard_count to 2;
SET citus.shard_replication_factor to 1;
CREATE TABLE reference_table (id int PRIMARY KEY, value int);
SELECT create_reference_table('reference_table');
CREATE TABLE table_to_split (id int, value int);
SELECT create_distributed_table('table_to_split', 'id');
}
teardown
{
DROP TABLE table_to_split CASCADE;
DROP TABLE reference_table CASCADE;
}
session "s1"
step "s1-begin"
{
BEGIN;
}
step "s1-insert"
{
INSERT INTO reference_table VALUES (5, 10);
}
step "s1-update"
{
UPDATE reference_table SET value = 5 WHERE id = 5;
}
step "s1-delete"
{
DELETE FROM reference_table WHERE id = 5;
}
step "s1-ddl"
{
CREATE INDEX reference_table_index ON reference_table(id);
}
step "s1-copy"
{
COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV;
}
step "s1-commit"
{
COMMIT;
}
session "s2"
step "s2-begin"
{
BEGIN;
}
step "s2-blocking-shard-split"
{
SELECT pg_catalog.citus_split_shard_by_split_points(
1500002,
ARRAY['-1073741824'],
ARRAY[1, 2],
'blocking');
}
step "s2-add-fkey"
{
ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id);
}
step "s2-commit"
{
COMMIT;
}
step "s2-print-cluster"
{
-- row count per shard
SELECT
nodeport, shardid, success, result
FROM
run_command_on_placements('table_to_split', 'select count(*) from %s')
ORDER BY
nodeport, shardid;
-- rows
SELECT id, value FROM table_to_split ORDER BY id, value;
}
// Run shard split while concurrently performing an DML and index creation on the
// reference table which the distributed table have a foreign key to.
// All modifications should block on shard split.
permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster"
permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster"

Some files were not shown because too many files have changed in this diff Show More