diff --git a/CHANGELOG.md b/CHANGELOG.md index 35c5d6d38..e27f92824 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +### citus v11.0.3 (July 5, 2022) ### + +* Fixes a bug that prevents adding local tables with materialized views to + Citus metadata + +* Fixes a bug that prevents using `COMPRESSION` and `CONSTRAINT` on a column + +* Fixes upgrades to Citus 11 when there are no nodes in the metadata + ### citus v11.0.2 (June 15, 2022) ### * Drops support for PostgreSQL 12 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9ca4ef6c7..7253c92ed 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -35,6 +35,12 @@ why we ask this as well as instructions for how to proceed, see the # Optionally, you might instead want to use `make install-all` # since `multi_extension` regression test would fail due to missing downgrade scripts. cd src/test/regress + + pip install pipenv + pipenv --rm + pipenv install + pipenv shell + make check ``` @@ -53,7 +59,7 @@ why we ask this as well as instructions for how to proceed, see the autoconf flex git libcurl4-gnutls-dev libicu-dev \ libkrb5-dev liblz4-dev libpam0g-dev libreadline-dev \ libselinux1-dev libssl-dev libxslt1-dev libzstd-dev \ - make uuid-dev mitmproxy + make uuid-dev ``` 2. Get, build, and test the code @@ -67,6 +73,12 @@ why we ask this as well as instructions for how to proceed, see the # Optionally, you might instead want to use `sudo make install-all` # since `multi_extension` regression test would fail due to missing downgrade scripts. cd src/test/regress + + pip install pipenv + pipenv --rm + pipenv install + pipenv shell + make check ``` @@ -111,6 +123,12 @@ why we ask this as well as instructions for how to proceed, see the # Optionally, you might instead want to use `sudo make install-all` # since `multi_extension` regression test would fail due to missing downgrade scripts. cd src/test/regress + + pip install pipenv + pipenv --rm + pipenv install + pipenv shell + make check ``` diff --git a/src/backend/columnar/columnar_customscan.c b/src/backend/columnar/columnar_customscan.c index e2451f89d..ddb2104f8 100644 --- a/src/backend/columnar/columnar_customscan.c +++ b/src/backend/columnar/columnar_customscan.c @@ -277,6 +277,11 @@ ColumnarSetRelPathlistHook(PlannerInfo *root, RelOptInfo *rel, Index rti, * into the scan of the table to minimize the data read. */ Relation relation = RelationIdGetRelation(rte->relid); + if (!RelationIsValid(relation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", rte->relid))); + } + if (relation->rd_tableam == GetColumnarTableAmRoutine()) { if (rte->tablesample != NULL) @@ -501,6 +506,11 @@ ColumnarIndexScanAdditionalCost(PlannerInfo *root, RelOptInfo *rel, &indexCorrelation, &fakeIndexPages); Relation relation = RelationIdGetRelation(relationId); + if (!RelationIsValid(relation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", relationId))); + } + uint64 rowCount = ColumnarTableRowCount(relation); RelationClose(relation); double estimatedRows = rowCount * indexSelectivity; @@ -596,6 +606,11 @@ static int RelationIdGetNumberOfAttributes(Oid relationId) { Relation relation = RelationIdGetRelation(relationId); + if (!RelationIsValid(relation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", relationId))); + } + int nattrs = relation->rd_att->natts; RelationClose(relation); return nattrs; @@ -1399,6 +1414,11 @@ static Cost ColumnarPerStripeScanCost(RelOptInfo *rel, Oid relationId, int numberOfColumnsRead) { Relation relation = RelationIdGetRelation(relationId); + if (!RelationIsValid(relation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", relationId))); + } + List *stripeList = StripesForRelfilenode(relation->rd_node); RelationClose(relation); @@ -1451,6 +1471,11 @@ static uint64 ColumnarTableStripeCount(Oid relationId) { Relation relation = RelationIdGetRelation(relationId); + if (!RelationIsValid(relation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", relationId))); + } + List *stripeList = StripesForRelfilenode(relation->rd_node); int stripeCount = list_length(stripeList); RelationClose(relation); diff --git a/src/backend/columnar/sql/columnar--11.0-2--11.0-3.sql b/src/backend/columnar/sql/columnar--11.0-2--11.0-3.sql new file mode 100644 index 000000000..b74717ebc --- /dev/null +++ b/src/backend/columnar/sql/columnar--11.0-2--11.0-3.sql @@ -0,0 +1 @@ +-- no changes needed diff --git a/src/backend/columnar/sql/columnar--11.0-2--11.1-1.sql b/src/backend/columnar/sql/columnar--11.0-3--11.1-1.sql similarity index 100% rename from src/backend/columnar/sql/columnar--11.0-2--11.1-1.sql rename to src/backend/columnar/sql/columnar--11.0-3--11.1-1.sql diff --git a/src/backend/columnar/sql/downgrades/citus--11.0-3--11.0-2.sql b/src/backend/columnar/sql/downgrades/citus--11.0-3--11.0-2.sql new file mode 100644 index 000000000..b74717ebc --- /dev/null +++ b/src/backend/columnar/sql/downgrades/citus--11.0-3--11.0-2.sql @@ -0,0 +1 @@ +-- no changes needed diff --git a/src/backend/columnar/sql/downgrades/columnar--11.1-1--11.0-2.sql b/src/backend/columnar/sql/downgrades/columnar--11.1-1--11.0-3.sql similarity index 100% rename from src/backend/columnar/sql/downgrades/columnar--11.1-1--11.0-2.sql rename to src/backend/columnar/sql/downgrades/columnar--11.1-1--11.0-3.sql diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index d6791c9ee..9c0f07be3 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -207,6 +207,7 @@ static char * CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaNa char *sourceName, char *targetSchemaName, char *targetName); +static void ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid); static char * CreateMaterializedViewDDLCommand(Oid matViewOid); static char * GetAccessMethodForMatViewIfExists(Oid viewOid); static bool WillRecreateForeignKeyToReferenceTable(Oid relationId, @@ -223,6 +224,8 @@ PG_FUNCTION_INFO_V1(worker_change_sequence_dependency); /* global variable keeping track of whether we are in a table type conversion function */ bool InTableTypeConversionFunctionCall = false; +/* controlled by GUC, in MB */ +int MaxMatViewSizeToAutoRecreate = 1024; /* * undistribute_table gets a distributed table name and @@ -854,6 +857,11 @@ static void DropIndexesNotSupportedByColumnar(Oid relationId, bool suppressNoticeMessages) { Relation columnarRelation = RelationIdGetRelation(relationId); + if (!RelationIsValid(columnarRelation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", relationId))); + } + List *indexIdList = RelationGetIndexList(columnarRelation); /* @@ -1357,6 +1365,7 @@ List * GetViewCreationCommandsOfTable(Oid relationId) { List *views = GetDependingViews(relationId); + List *commands = NIL; Oid viewOid = InvalidOid; @@ -1367,6 +1376,8 @@ GetViewCreationCommandsOfTable(Oid relationId) /* See comments on CreateMaterializedViewDDLCommand for its limitations */ if (get_rel_relkind(viewOid) == RELKIND_MATVIEW) { + ErrorIfMatViewSizeExceedsTheLimit(viewOid); + char *matViewCreateCommands = CreateMaterializedViewDDLCommand(viewOid); appendStringInfoString(query, matViewCreateCommands); } @@ -1406,6 +1417,42 @@ GetViewCreationTableDDLCommandsOfTable(Oid relationId) } +/* + * ErrorIfMatViewSizeExceedsTheLimit takes the oid of a materialized view and errors + * out if the size of the matview exceeds the limit set by the GUC + * citus.max_matview_size_to_auto_recreate. + */ +static void +ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid) +{ + if (MaxMatViewSizeToAutoRecreate >= 0) + { + /* if it's below 0, it means the user has removed the limit */ + Datum relSizeDatum = DirectFunctionCall1(pg_total_relation_size, + ObjectIdGetDatum(matViewOid)); + uint64 matViewSize = DatumGetInt64(relSizeDatum); + + /* convert from MB to bytes */ + uint64 limitSizeInBytes = MaxMatViewSizeToAutoRecreate * 1024L * 1024L; + + if (matViewSize > limitSizeInBytes) + { + ereport(ERROR, (errmsg("size of the materialized view %s exceeds " + "citus.max_matview_size_to_auto_recreate " + "(currently %d MB)", get_rel_name(matViewOid), + MaxMatViewSizeToAutoRecreate), + errdetail("Citus restricts automatically recreating " + "materialized views that are larger than the " + "limit, because it could take too long."), + errhint( + "Consider increasing the size limit by setting " + "citus.max_matview_size_to_auto_recreate; " + "or you can remove the limit by setting it to -1"))); + } + } +} + + /* * CreateMaterializedViewDDLCommand creates the command to create materialized view. * Note that this function doesn't support @@ -1967,6 +2014,19 @@ ExecuteQueryViaSPI(char *query, int SPIOK) } +/* + * ExecuteAndLogQueryViaSPI is a wrapper around ExecuteQueryViaSPI, that logs + * the query to be executed, with the given log level. + */ +void +ExecuteAndLogQueryViaSPI(char *query, int SPIOK, int logLevel) +{ + ereport(logLevel, (errmsg("executing \"%s\"", query))); + + ExecuteQueryViaSPI(query, SPIOK); +} + + /* * SwitchToSequentialAndLocalExecutionIfRelationNameTooLong generates the longest shard name * on the shards of a distributed table, and if exceeds the limit switches to sequential and diff --git a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c index a31fd3cc1..5b22ecbab 100644 --- a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c +++ b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c @@ -26,6 +26,7 @@ #include "distributed/reference_table_utils.h" #include "distributed/relation_access_tracking.h" #include "distributed/worker_protocol.h" +#include "executor/spi.h" #include "miscadmin.h" #include "utils/builtins.h" #include "utils/lsyscache.h" @@ -513,12 +514,12 @@ ExecuteCascadeOperationForRelationIdList(List *relationIdList, /* - * ExecuteAndLogUtilityCommandListInTableTypeConversion is a wrapper function - * around ExecuteAndLogUtilityCommandList, that makes it execute with the flag - * InTableTypeConversionFunctionCall is set to true. + * ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI is a wrapper function + * around ExecuteAndLogQueryViaSPI, that executes view creation commands + * with the flag InTableTypeConversionFunctionCall set to true. */ void -ExecuteAndLogUtilityCommandListInTableTypeConversion(List *utilityCommandList) +ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(List *utilityCommandList) { bool oldValue = InTableTypeConversionFunctionCall; InTableTypeConversionFunctionCall = true; @@ -526,7 +527,15 @@ ExecuteAndLogUtilityCommandListInTableTypeConversion(List *utilityCommandList) MemoryContext savedMemoryContext = CurrentMemoryContext; PG_TRY(); { - ExecuteAndLogUtilityCommandList(utilityCommandList); + char *utilityCommand = NULL; + foreach_ptr(utilityCommand, utilityCommandList) + { + /* + * CREATE MATERIALIZED VIEW commands need to be parsed/transformed, + * which SPI does for us. + */ + ExecuteAndLogQueryViaSPI(utilityCommand, SPI_OK_UTILITY, DEBUG1); + } } PG_CATCH(); { diff --git a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c index d9335fcbc..9d58cc716 100644 --- a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c +++ b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c @@ -349,7 +349,7 @@ CreateCitusLocalTable(Oid relationId, bool cascadeViaForeignKeys, bool autoConve * Execute the view creation commands with the shell table. * Views will be distributed via FinalizeCitusLocalTableCreation below. */ - ExecuteAndLogUtilityCommandListInTableTypeConversion(tableViewCreationCommands); + ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI(tableViewCreationCommands); /* * Set shellRelationId as the relation with relationId now points @@ -891,6 +891,11 @@ static void RenameShardRelationStatistics(Oid shardRelationId, uint64 shardId) { Relation shardRelation = RelationIdGetRelation(shardRelationId); + if (!RelationIsValid(shardRelation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", shardRelationId))); + } + List *statsOidList = RelationGetStatExtList(shardRelation); RelationClose(shardRelation); @@ -1053,7 +1058,9 @@ DropViewsOnTable(Oid relationId) char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); StringInfo dropCommand = makeStringInfo(); - appendStringInfo(dropCommand, "DROP VIEW IF EXISTS %s", + appendStringInfo(dropCommand, "DROP %sVIEW IF EXISTS %s", + get_rel_relkind(viewId) == RELKIND_MATVIEW ? "MATERIALIZED " : + "", qualifiedViewName); ExecuteAndLogUtilityCommand(dropCommand->data); diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index d516ad777..24afc0a0a 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -1024,6 +1024,14 @@ static DistributeObjectOps Type_Rename = { .address = RenameTypeStmtObjectAddress, .markDistributed = false, }; +static DistributeObjectOps Vacuum_Analyze = { + .deparse = NULL, + .qualify = NULL, + .preprocess = NULL, + .postprocess = PostprocessVacuumStmt, + .address = NULL, + .markDistributed = false, +}; /* * PreprocessRenameViewStmt function can be called internally by ALTER TABLE view_name @@ -1653,6 +1661,11 @@ GetDistributeObjectOps(Node *node) return &Any_Reindex; } + case T_VacuumStmt: + { + return &Vacuum_Analyze; + } + case T_RenameStmt: { RenameStmt *stmt = castNode(RenameStmt, node); diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index ac04d9701..2e9d2d58d 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -321,6 +321,11 @@ ExecuteFunctionOnEachTableIndex(Oid relationId, PGIndexProcessor pgIndexProcesso List *result = NIL; Relation relation = RelationIdGetRelation(relationId); + if (!RelationIsValid(relation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", relationId))); + } + List *indexIdList = RelationGetIndexList(relation); Oid indexId = InvalidOid; foreach_oid(indexId, indexIdList) diff --git a/src/backend/distributed/commands/rename.c b/src/backend/distributed/commands/rename.c index 6511aed81..5e313d68c 100644 --- a/src/backend/distributed/commands/rename.c +++ b/src/backend/distributed/commands/rename.c @@ -48,11 +48,12 @@ PreprocessRenameStmt(Node *node, const char *renameCommand, /* * The lock levels here should be same as the ones taken in - * RenameRelation(), renameatt() and RenameConstraint(). However, since all - * four statements have identical lock levels, we just use a single statement. + * RenameRelation(), renameatt() and RenameConstraint(). All statements + * have identical lock levels except alter index rename. */ - objectRelationId = RangeVarGetRelid(renameStmt->relation, - AccessExclusiveLock, + LOCKMODE lockmode = (IsIndexRenameStmt(renameStmt)) ? + ShareUpdateExclusiveLock : AccessExclusiveLock; + objectRelationId = RangeVarGetRelid(renameStmt->relation, lockmode, renameStmt->missing_ok); /* @@ -99,6 +100,18 @@ PreprocessRenameStmt(Node *node, const char *renameCommand, case OBJECT_TABCONSTRAINT: case OBJECT_POLICY: { + if (relKind == RELKIND_INDEX || + relKind == RELKIND_PARTITIONED_INDEX) + { + /* + * Although weird, postgres allows ALTER TABLE .. RENAME command + * on indexes. We don't want to break non-distributed tables, + * so allow. + */ + tableRelationId = IndexGetRelation(objectRelationId, false); + break; + } + /* the target object is our tableRelationId. */ tableRelationId = objectRelationId; break; @@ -106,6 +119,25 @@ PreprocessRenameStmt(Node *node, const char *renameCommand, case OBJECT_INDEX: { + if (relKind == RELKIND_RELATION || + relKind == RELKIND_PARTITIONED_TABLE) + { + /* + * Although weird, postgres allows ALTER INDEX .. RENAME command + * on tables. We don't want to break non-distributed tables, + * so allow. + * Because of the weird syntax, we locked with wrong level, so relock + * the relation to acquire true level of lock. Same logic + * can be found in the function RenameRelation(RenameStmt) at tablecmds.c + */ + UnlockRelationOid(objectRelationId, lockmode); + objectRelationId = RangeVarGetRelid(renameStmt->relation, + AccessExclusiveLock, + renameStmt->missing_ok); + tableRelationId = objectRelationId; + break; + } + /* * here, objRelationId points to the index relation entry, and we * are interested into the entry of the table on which the index is diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index 6f8e6df54..bf48505a5 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -469,6 +469,11 @@ GetExplicitStatisticsCommandList(Oid relationId) List *explicitStatisticsCommandList = NIL; Relation relation = RelationIdGetRelation(relationId); + if (!RelationIsValid(relation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", relationId))); + } + List *statisticsIdList = RelationGetStatExtList(relation); RelationClose(relation); @@ -540,6 +545,11 @@ GetExplicitStatisticsSchemaIdList(Oid relationId) List *schemaIdList = NIL; Relation relation = RelationIdGetRelation(relationId); + if (!RelationIsValid(relation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", relationId))); + } + List *statsIdList = RelationGetStatExtList(relation); RelationClose(relation); diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index fa78e70aa..cb6ab86a3 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -830,14 +830,6 @@ ProcessUtilityInternal(PlannedStmt *pstmt, } } - /* TODO: fold VACUUM's processing into the above block */ - if (IsA(parsetree, VacuumStmt)) - { - VacuumStmt *vacuumStmt = (VacuumStmt *) parsetree; - - PostprocessVacuumStmt(vacuumStmt, queryString); - } - if (!IsDropCitusExtensionStmt(parsetree) && !IsA(parsetree, DropdbStmt)) { /* diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index 9b1e0bfb3..374e52bda 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -20,7 +20,7 @@ #include "distributed/deparse_shard_query.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" -#include "distributed/multi_executor.h" +#include "distributed/metadata_sync.h" #include "distributed/resource_lock.h" #include "distributed/transaction_management.h" #include "distributed/version_compat.h" @@ -48,7 +48,7 @@ typedef struct CitusVacuumParams } CitusVacuumParams; /* Local functions forward declarations for processing distributed table commands */ -static bool IsDistributedVacuumStmt(int vacuumOptions, List *VacuumCitusRelationIdList); +static bool IsDistributedVacuumStmt(List *vacuumRelationIdList); static List * VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColumnList); static char * DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams); @@ -57,44 +57,28 @@ static List * VacuumColumnList(VacuumStmt *vacuumStmt, int relationIndex); static List * ExtractVacuumTargetRels(VacuumStmt *vacuumStmt); static void ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList, CitusVacuumParams vacuumParams); +static void ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt, + CitusVacuumParams vacuumParams); static CitusVacuumParams VacuumStmtParams(VacuumStmt *vacstmt); -static List * VacuumCitusRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams - vacuumParams); +static List * VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams + vacuumParams); /* * PostprocessVacuumStmt processes vacuum statements that may need propagation to - * distributed tables. If a VACUUM or ANALYZE command references a distributed - * table, it is propagated to all involved nodes; otherwise, this function will - * immediately exit after some error checking. + * citus tables only if ddl propagation is enabled. If a VACUUM or ANALYZE command + * references a citus table or no table, it is propagated to all involved nodes; otherwise, + * the statements will not be propagated. * * Unlike most other Process functions within this file, this function does not * return a modified parse node, as it is expected that the local VACUUM or * ANALYZE has already been processed. */ -void -PostprocessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand) +List * +PostprocessVacuumStmt(Node *node, const char *vacuumCommand) { + VacuumStmt *vacuumStmt = castNode(VacuumStmt, node); + CitusVacuumParams vacuumParams = VacuumStmtParams(vacuumStmt); - const char *stmtName = (vacuumParams.options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE"; - - /* - * No table in the vacuum statement means vacuuming all relations - * which is not supported by citus. - */ - if (list_length(vacuumStmt->rels) == 0) - { - /* WARN for unqualified VACUUM commands */ - ereport(WARNING, (errmsg("not propagating %s command to worker nodes", stmtName), - errhint("Provide a specific table in order to %s " - "distributed tables.", stmtName))); - } - - - List *citusRelationIdList = VacuumCitusRelationIdList(vacuumStmt, vacuumParams); - if (list_length(citusRelationIdList) == 0) - { - return; - } if (vacuumParams.options & VACOPT_VACUUM) { @@ -109,32 +93,42 @@ PostprocessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand) } /* - * Here we get the relation list again because we might have - * closed the current transaction and the memory context got reset. - * Vacuum's context is PortalContext, which lasts for the whole session - * so committing/starting a new transaction doesn't affect it. + * when no table is specified propagate the command as it is; + * otherwise, only propagate when there is at least 1 citus table */ - citusRelationIdList = VacuumCitusRelationIdList(vacuumStmt, vacuumParams); - bool distributedVacuumStmt = IsDistributedVacuumStmt(vacuumParams.options, - citusRelationIdList); - if (!distributedVacuumStmt) + List *relationIdList = VacuumRelationIdList(vacuumStmt, vacuumParams); + + if (list_length(vacuumStmt->rels) == 0) { - return; + /* no table is specified (unqualified vacuum) */ + + ExecuteUnqualifiedVacuumTasks(vacuumStmt, vacuumParams); + } + else if (IsDistributedVacuumStmt(relationIdList)) + { + /* there is at least 1 citus table specified */ + + ExecuteVacuumOnDistributedTables(vacuumStmt, relationIdList, + vacuumParams); } - ExecuteVacuumOnDistributedTables(vacuumStmt, citusRelationIdList, vacuumParams); + /* else only local tables are specified */ + + return NIL; } /* - * VacuumCitusRelationIdList returns the oid of the relations in the given vacuum statement. + * VacuumRelationIdList returns the oid of the relations in the given vacuum statement. */ static List * -VacuumCitusRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams) +VacuumRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams) { LOCKMODE lockMode = (vacuumParams.options & VACOPT_FULL) ? AccessExclusiveLock : ShareUpdateExclusiveLock; + bool skipLocked = (vacuumParams.options & VACOPT_SKIP_LOCKED); + List *vacuumRelationList = ExtractVacuumTargetRels(vacuumStmt); List *relationIdList = NIL; @@ -142,18 +136,45 @@ VacuumCitusRelationIdList(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams RangeVar *vacuumRelation = NULL; foreach_ptr(vacuumRelation, vacuumRelationList) { - Oid relationId = RangeVarGetRelid(vacuumRelation, lockMode, false); - if (!IsCitusTable(relationId)) + /* + * If skip_locked option is enabled, we are skipping that relation + * if the lock for it is currently not available; else, we get the lock. + */ + Oid relationId = RangeVarGetRelidExtended(vacuumRelation, + lockMode, + skipLocked ? RVR_SKIP_LOCKED : 0, NULL, + NULL); + + if (OidIsValid(relationId)) { - continue; + relationIdList = lappend_oid(relationIdList, relationId); } - relationIdList = lappend_oid(relationIdList, relationId); } return relationIdList; } +/* + * IsDistributedVacuumStmt returns true if there is any citus table in the relation id list; + * otherwise, it returns false. + */ +static bool +IsDistributedVacuumStmt(List *vacuumRelationIdList) +{ + Oid relationId = InvalidOid; + foreach_oid(relationId, vacuumRelationIdList) + { + if (OidIsValid(relationId) && IsCitusTable(relationId)) + { + return true; + } + } + + return false; +} + + /* * ExecuteVacuumOnDistributedTables executes the vacuum for the shard placements of given tables * if they are citus tables. @@ -183,53 +204,6 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList, } -/* - * IsDistributedVacuumStmt returns whether distributed execution of a - * given VacuumStmt is supported. The provided relationId list represents - * the list of tables targeted by the provided statement. - * - * Returns true if the statement requires distributed execution and returns - * false otherwise. - */ -static bool -IsDistributedVacuumStmt(int vacuumOptions, List *VacuumCitusRelationIdList) -{ - bool distributeStmt = false; - int distributedRelationCount = 0; - - const char *stmtName = (vacuumOptions & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE"; - - - Oid relationId = InvalidOid; - foreach_oid(relationId, VacuumCitusRelationIdList) - { - if (OidIsValid(relationId) && IsCitusTable(relationId)) - { - distributedRelationCount++; - } - } - - if (distributedRelationCount == 0) - { - /* nothing to do here */ - } - else if (!EnableDDLPropagation) - { - /* WARN if DDL propagation is not enabled */ - ereport(WARNING, (errmsg("not propagating %s command to worker nodes", stmtName), - errhint("Set citus.enable_ddl_propagation to true in order to " - "send targeted %s commands to worker nodes.", - stmtName))); - } - else - { - distributeStmt = true; - } - - return distributeStmt; -} - - /* * VacuumTaskList returns a list of tasks to be executed as part of processing * a VacuumStmt which targets a distributed relation. @@ -237,6 +211,9 @@ IsDistributedVacuumStmt(int vacuumOptions, List *VacuumCitusRelationIdList) static List * VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColumnList) { + LOCKMODE lockMode = (vacuumParams.options & VACOPT_FULL) ? AccessExclusiveLock : + ShareUpdateExclusiveLock; + /* resulting task list */ List *taskList = NIL; @@ -255,8 +232,20 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum * RowExclusiveLock. However if VACUUM FULL is used, we already obtain * AccessExclusiveLock before reaching to that point and INSERT's will be * blocked anyway. This is inline with PostgreSQL's own behaviour. + * Also note that if skip locked option is enabled, we try to acquire the lock + * in nonblocking way. If lock is not available, vacuum just skip that relation. */ - LockRelationOid(relationId, ShareUpdateExclusiveLock); + if (!(vacuumParams.options & VACOPT_SKIP_LOCKED)) + { + LockRelationOid(relationId, lockMode); + } + else + { + if (!ConditionalLockRelationOid(relationId, lockMode)) + { + return NIL; + } + } List *shardIntervalList = LoadShardIntervalList(relationId); @@ -391,10 +380,33 @@ DeparseVacuumStmtPrefix(CitusVacuumParams vacuumParams) if (vacuumParams.index_cleanup != VACOPTVALUE_UNSPECIFIED) { - appendStringInfoString(vacuumPrefix, - vacuumParams.index_cleanup == VACOPTVALUE_ENABLED ? - "INDEX_CLEANUP," : "INDEX_CLEANUP false," - ); + switch (vacuumParams.index_cleanup) + { + case VACOPTVALUE_ENABLED: + { + appendStringInfoString(vacuumPrefix, "INDEX_CLEANUP true,"); + break; + } + + case VACOPTVALUE_DISABLED: + { + appendStringInfoString(vacuumPrefix, "INDEX_CLEANUP false,"); + break; + } + + #if PG_VERSION_NUM >= PG_VERSION_14 + case VACOPTVALUE_AUTO: + { + appendStringInfoString(vacuumPrefix, "INDEX_CLEANUP auto,"); + break; + } + #endif + + default: + { + break; + } + } } #if PG_VERSION_NUM >= PG_VERSION_13 @@ -552,8 +564,32 @@ VacuumStmtParams(VacuumStmt *vacstmt) #endif else if (strcmp(opt->defname, "index_cleanup") == 0) { + #if PG_VERSION_NUM >= PG_VERSION_14 + + /* Interpret no string as the default, which is 'auto' */ + if (!opt->arg) + { + params.index_cleanup = VACOPTVALUE_AUTO; + } + else + { + char *sval = defGetString(opt); + + /* Try matching on 'auto' string, or fall back on boolean */ + if (pg_strcasecmp(sval, "auto") == 0) + { + params.index_cleanup = VACOPTVALUE_AUTO; + } + else + { + params.index_cleanup = defGetBoolean(opt) ? VACOPTVALUE_ENABLED : + VACOPTVALUE_DISABLED; + } + } + #else params.index_cleanup = defGetBoolean(opt) ? VACOPTVALUE_ENABLED : VACOPTVALUE_DISABLED; + #endif } else if (strcmp(opt->defname, "truncate") == 0) { @@ -606,3 +642,62 @@ VacuumStmtParams(VacuumStmt *vacstmt) (disable_page_skipping ? VACOPT_DISABLE_PAGE_SKIPPING : 0); return params; } + + +/* + * ExecuteUnqualifiedVacuumTasks executes tasks for unqualified vacuum commands + */ +static void +ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumParams) +{ + /* don't allow concurrent node list changes that require an exclusive lock */ + List *workerNodes = TargetWorkerSetNodeList(ALL_SHARD_NODES, RowShareLock); + + if (list_length(workerNodes) == 0) + { + return; + } + + const char *vacuumStringPrefix = DeparseVacuumStmtPrefix(vacuumParams); + + StringInfo vacuumCommand = makeStringInfo(); + appendStringInfoString(vacuumCommand, vacuumStringPrefix); + + List *unqualifiedVacuumCommands = list_make3(DISABLE_DDL_PROPAGATION, + vacuumCommand->data, + ENABLE_DDL_PROPAGATION); + + Task *task = CitusMakeNode(Task); + task->jobId = INVALID_JOB_ID; + task->taskType = VACUUM_ANALYZE_TASK; + SetTaskQueryStringList(task, unqualifiedVacuumCommands); + task->dependentTaskList = NULL; + task->replicationModel = REPLICATION_MODEL_INVALID; + task->cannotBeExecutedInTransction = ((vacuumParams.options) & VACOPT_VACUUM); + + + bool hasPeerWorker = false; + int32 localNodeGroupId = GetLocalGroupId(); + + WorkerNode *workerNode = NULL; + foreach_ptr(workerNode, workerNodes) + { + if (workerNode->groupId != localNodeGroupId) + { + ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement); + targetPlacement->nodeName = workerNode->workerName; + targetPlacement->nodePort = workerNode->workerPort; + targetPlacement->groupId = workerNode->groupId; + + task->taskPlacementList = lappend(task->taskPlacementList, + targetPlacement); + hasPeerWorker = true; + } + } + + if (hasPeerWorker) + { + bool localExecution = false; + ExecuteUtilityTaskList(list_make1(task), localExecution); + } +} diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index d3434d775..0cc3c1e0e 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -377,6 +377,14 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults atttypmod); appendStringInfoString(&buffer, attributeTypeName); +#if PG_VERSION_NUM >= PG_VERSION_14 + if (CompressionMethodIsValid(attributeForm->attcompression)) + { + appendStringInfo(&buffer, " COMPRESSION %s", + GetCompressionMethodName(attributeForm->attcompression)); + } +#endif + /* if this column has a default value, append the default value */ if (attributeForm->atthasdef) { @@ -448,14 +456,6 @@ pg_get_tableschemadef_string(Oid tableRelationId, IncludeSequenceDefaults appendStringInfoString(&buffer, " NOT NULL"); } -#if PG_VERSION_NUM >= PG_VERSION_14 - if (CompressionMethodIsValid(attributeForm->attcompression)) - { - appendStringInfo(&buffer, " COMPRESSION %s", - GetCompressionMethodName(attributeForm->attcompression)); - } -#endif - if (attributeForm->attcollation != InvalidOid && attributeForm->attcollation != DEFAULT_COLLATION_OID) { diff --git a/src/backend/distributed/operations/citus_split_shard_by_split_points.c b/src/backend/distributed/operations/citus_split_shard_by_split_points.c index 7674f7a2b..ca1ee5cca 100644 --- a/src/backend/distributed/operations/citus_split_shard_by_split_points.c +++ b/src/backend/distributed/operations/citus_split_shard_by_split_points.c @@ -77,6 +77,7 @@ LookupSplitMode(Oid shardSplitModeOid) Datum enumLabelDatum = DirectFunctionCall1(enum_out, shardSplitModeOid); char *enumLabel = DatumGetCString(enumLabelDatum); + /* Extend with other modes as we support them */ if (strncmp(enumLabel, "blocking", NAMEDATALEN) == 0) { shardSplitMode = BLOCKING_SPLIT; @@ -88,7 +89,8 @@ LookupSplitMode(Oid shardSplitModeOid) /* Extend with other modes as we support them */ else { - ereport(ERROR, (errmsg("Invalid label for enum: %s", enumLabel))); + ereport(ERROR, (errmsg("Invalid split mode: %s. Expected split mode is blocking.", + enumLabel))); } return shardSplitMode; diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c index d8a27e1b4..e886566e5 100644 --- a/src/backend/distributed/operations/shard_split.c +++ b/src/backend/distributed/operations/shard_split.c @@ -40,9 +40,11 @@ static void ErrorIfCannotSplitShardExtended(SplitOperation splitOperation, ShardInterval *shardIntervalToSplit, List *shardSplitPointsList, List *nodeIdsForPlacementList); -static void CreateSplitShardsForShardGroup(WorkerNode *sourceShardNode, - List *sourceColocatedShardIntervalList, - List *shardGroupSplitIntervalListList, +static void CreateAndCopySplitShardsForShardGroup(WorkerNode *sourceShardNode, + List *sourceColocatedShardIntervalList, + List *shardGroupSplitIntervalListList, + List *workersForPlacementList); +static void CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList, List *workersForPlacementList); static void CreateSplitShardsForShardGroupTwo(WorkerNode *sourceShardNode, List *sourceColocatedShardIntervalList, @@ -57,6 +59,8 @@ static void SplitShardReplicationSetup(List *sourceColocatedShardIntervalList, WorkerNode *sourceWorkerNode, List *workersForPlacementList); static HTAB * CreateWorkerForPlacementSet(List *workersForPlacementList); +static void CreateAuxiliaryStructuresForShardGroup(List *shardGroupSplitIntervalListList, + List *workersForPlacementList); static void CreateObjectOnPlacement(List *objectCreationCommandList, WorkerNode *workerNode); static List * CreateSplitIntervalsForShardGroup(List *sourceColocatedShardList, @@ -273,10 +277,13 @@ ErrorIfCannotSplitShardExtended(SplitOperation splitOperation, { int32 shardSplitPointValue = DatumGetInt32(shardSplitPoint); - /* All Split points should lie within the shard interval range. */ - int splitPointShardIndex = FindShardIntervalIndex(shardSplitPoint, - cachedTableEntry); - if (shardIntervalToSplit->shardIndex != splitPointShardIndex) + /* + * 1) All Split points should lie within the shard interval range. + * 2) Given our split points inclusive, you cannot specify the max value in a range as a split point. + * Example: Shard 81060002 range is from (0,1073741823). '1073741823' as split point is invalid. + * '1073741822' is correct and will split shard to: (0, 1073741822) and (1073741823, 1073741823). + */ + if (shardSplitPointValue < minValue || shardSplitPointValue > maxValue) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), @@ -287,6 +294,16 @@ ErrorIfCannotSplitShardExtended(SplitOperation splitOperation, DatumGetInt32(shardIntervalToSplit->maxValue), shardIntervalToSplit->shardId))); } + else if (maxValue == shardSplitPointValue) + { + int32 validSplitPoint = shardIntervalToSplit->maxValue - 1; + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg( + "Invalid split point %d, as split points should be inclusive. Please use %d instead.", + maxValue, + validSplitPoint))); + } /* Split points should be in strictly increasing order */ int32 lastShardSplitPointValue = DatumGetInt32(lastShardSplitPoint.value); @@ -302,22 +319,6 @@ ErrorIfCannotSplitShardExtended(SplitOperation splitOperation, shardSplitPointValue))); } - /* - * Given our split points inclusive, you cannot specify the max value in a range as a split point. - * Example: Shard 81060002 range is from (0,1073741823). '1073741823' as split point is invalid. - * '1073741822' is correct and will split shard to: (0, 1073741822) and (1073741823, 1073741823). - */ - if (maxValue == shardSplitPointValue) - { - int32 validSplitPoint = shardIntervalToSplit->maxValue - 1; - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg( - "Invalid split point %d, as split points should be inclusive. Please use %d instead.", - maxValue, - validSplitPoint))); - } - lastShardSplitPoint = (NullableDatum) { shardSplitPoint, false }; @@ -446,11 +447,11 @@ BlockingShardSplit(SplitOperation splitOperation, PG_TRY(); { /* - * Physically create split children, perform split copy and create auxillary structures. + * Physically create split children, perform split copy and create auxiliary structures. * This includes: indexes, replicaIdentity. triggers and statistics. * Foreign key constraints are created after Metadata changes (see CreateForeignKeyConstraints). */ - CreateSplitShardsForShardGroup( + CreateAndCopySplitShardsForShardGroup( sourceShardToCopyNode, sourceColocatedShardIntervalList, shardGroupSplitIntervalListList, @@ -491,9 +492,7 @@ BlockingShardSplit(SplitOperation splitOperation, /* Create ShardGroup split children on a list of corresponding workers. */ static void -CreateSplitShardsForShardGroup(WorkerNode *sourceShardNode, - List *sourceColocatedShardIntervalList, - List *shardGroupSplitIntervalListList, +CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList, List *workersForPlacementList) { /* Iterate on shard interval list for shard group */ @@ -519,14 +518,20 @@ CreateSplitShardsForShardGroup(WorkerNode *sourceShardNode, CreateObjectOnPlacement(splitShardCreationCommandList, workerPlacementNode); } } +} - /* Perform Split Copy */ - DoSplitCopy(sourceShardNode, sourceColocatedShardIntervalList, - shardGroupSplitIntervalListList, workersForPlacementList); +/* Create ShardGroup auxiliary structures (indexes, stats, replicaindentities, triggers) + * on a list of corresponding workers. + */ +static void +CreateAuxiliaryStructuresForShardGroup(List *shardGroupSplitIntervalListList, + List *workersForPlacementList) +{ /* - * Create auxillary structures post copy. + * Create auxiliary structures post copy. */ + List *shardIntervalList = NULL; foreach_ptr(shardIntervalList, shardGroupSplitIntervalListList) { /* Iterate on split shard interval list and corresponding placement worker */ @@ -549,6 +554,28 @@ CreateSplitShardsForShardGroup(WorkerNode *sourceShardNode, } +/* + * Create ShardGroup split children, perform copy and create auxiliary structures + * on a list of corresponding workers. + */ +static void +CreateAndCopySplitShardsForShardGroup(WorkerNode *sourceShardNode, + List *sourceColocatedShardIntervalList, + List *shardGroupSplitIntervalListList, + List *workersForPlacementList) +{ + CreateSplitShardsForShardGroup(shardGroupSplitIntervalListList, + workersForPlacementList); + + DoSplitCopy(sourceShardNode, sourceColocatedShardIntervalList, + shardGroupSplitIntervalListList, workersForPlacementList); + + /* Create auxiliary structures (indexes, stats, replicaindentities, triggers) */ + CreateAuxiliaryStructuresForShardGroup(shardGroupSplitIntervalListList, + workersForPlacementList); +} + + /* * Perform Split Copy from source shard(s) to split children. * 'sourceShardNode' : Source shard worker node. @@ -648,7 +675,7 @@ CreateSplitCopyCommand(ShardInterval *sourceShardSplitInterval, appendStringInfo(splitCopyInfoArray, "]"); StringInfo splitCopyUdf = makeStringInfo(); - appendStringInfo(splitCopyUdf, "SELECT worker_split_copy(%lu, %s);", + appendStringInfo(splitCopyUdf, "SELECT pg_catalog.worker_split_copy(%lu, %s);", sourceShardSplitInterval->shardId, splitCopyInfoArray->data); @@ -673,6 +700,14 @@ CreateObjectOnPlacement(List *objectCreationCommandList, /* * Create split children intervals for a shardgroup given list of split points. + * Example: + * 'sourceColocatedShardIntervalList': Colocated shard S1[-2147483648, 2147483647] & S2[-2147483648, 2147483647] + * 'splitPointsForShard': [0] (2 way split) + * 'shardGroupSplitIntervalListList': + * [ + * [ S1_1(-2147483648, 0), S1_2(1, 2147483647) ], // Split Interval List for S1. + * [ S2_1(-2147483648, 0), S2_2(1, 2147483647) ] // Split Interval List for S2. + * ] */ static List * CreateSplitIntervalsForShardGroup(List *sourceColocatedShardIntervalList, @@ -774,10 +809,10 @@ InsertSplitChildrenShardMetadata(List *shardGroupSplitIntervalListList, 0, /* shard length (zero for HashDistributed Table) */ workerPlacementNode->groupId); - if (ShouldSyncTableMetadata(shardInterval->relationId)) - { - syncedShardList = lappend(syncedShardList, shardInterval); - } + if (ShouldSyncTableMetadata(shardInterval->relationId)) + { + syncedShardList = lappend(syncedShardList, shardInterval); + } } } @@ -817,15 +852,20 @@ CreateForeignKeyConstraints(List *shardGroupSplitIntervalListList, & referenceTableForeignConstraintList); - List *commandList = NIL; - commandList = list_concat(commandList, shardForeignConstraintCommandList); - commandList = list_concat(commandList, referenceTableForeignConstraintList); + List *constraintCommandList = NIL; + constraintCommandList = list_concat(constraintCommandList, + shardForeignConstraintCommandList); + constraintCommandList = list_concat(constraintCommandList, + referenceTableForeignConstraintList); - SendCommandListToWorkerOutsideTransaction( - workerPlacementNode->workerName, - workerPlacementNode->workerPort, - TableOwner(shardInterval->relationId), - commandList); + char *constraintCommand = NULL; + foreach_ptr(constraintCommand, constraintCommandList) + { + SendCommandToWorker( + workerPlacementNode->workerName, + workerPlacementNode->workerPort, + constraintCommand); + } } } } diff --git a/src/backend/distributed/operations/worker_shard_copy.c b/src/backend/distributed/operations/worker_shard_copy.c index ea601fba5..473f3b670 100644 --- a/src/backend/distributed/operations/worker_shard_copy.c +++ b/src/backend/distributed/operations/worker_shard_copy.c @@ -74,7 +74,6 @@ static bool CanUseLocalCopy(uint64 destinationNodeId); static StringInfo ConstructCopyStatement(List *destinationShardFullyQualifiedName, bool useBinaryFormat); static void WriteLocalTuple(TupleTableSlot *slot, ShardCopyDestReceiver *copyDest); -static bool ShouldSendCopyNow(StringInfo buffer); static int ReadFromLocalBufferCallback(void *outBuf, int minRead, int maxRead); static void LocalCopyToShard(ShardCopyDestReceiver *copyDest, CopyOutState localCopyOutState); @@ -88,18 +87,6 @@ CanUseLocalCopy(uint64 destinationNodeId) } -/* - * ShouldSendCopyNow returns true if the given buffer size exceeds the - * local copy buffer size threshold. - */ -static bool -ShouldSendCopyNow(StringInfo buffer) -{ - /* LocalCopyFlushThreshold is in bytes */ - return buffer->len > LocalCopyFlushThresholdByte; -} - - /* Connect to node with source shard and trigger copy start. */ static void ConnectToRemoteAndStartCopy(ShardCopyDestReceiver *copyDest) @@ -197,7 +184,7 @@ ShardCopyDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest) if (copyDest->useLocalCopy) { WriteLocalTuple(slot, copyDest); - if (ShouldSendCopyNow(copyOutState->fe_msgbuf)) + if (copyOutState->fe_msgbuf->len > LocalCopyFlushThresholdByte) { LocalCopyToShard(copyDest, copyOutState); } diff --git a/src/backend/distributed/operations/worker_split_copy_udf.c b/src/backend/distributed/operations/worker_split_copy_udf.c index 0314875b0..63f7656fd 100644 --- a/src/backend/distributed/operations/worker_split_copy_udf.c +++ b/src/backend/distributed/operations/worker_split_copy_udf.c @@ -81,13 +81,18 @@ worker_split_copy(PG_FUNCTION_ARGS) shardIntervalToSplitCopy, splitCopyInfoList); - char *sourceShardToCopyName = generate_qualified_relation_name( + Oid sourceShardToCopySchemaOId = get_rel_namespace( shardIntervalToSplitCopy->relationId); + char *sourceShardToCopySchemaName = get_namespace_name(sourceShardToCopySchemaOId); + char *sourceShardToCopyName = get_rel_name(shardIntervalToSplitCopy->relationId); AppendShardIdToName(&sourceShardToCopyName, shardIdToSplitCopy); + char *sourceShardToCopyQualifiedName = quote_qualified_identifier( + sourceShardToCopySchemaName, + sourceShardToCopyName); StringInfo selectShardQueryForCopy = makeStringInfo(); appendStringInfo(selectShardQueryForCopy, - "SELECT * FROM %s;", sourceShardToCopyName); + "SELECT * FROM %s;", sourceShardToCopyQualifiedName); ParamListInfo params = NULL; ExecuteQueryStringIntoDestReceiver(selectShardQueryForCopy->data, params, @@ -197,9 +202,9 @@ CreateShardCopyDestReceivers(EState *estate, ShardInterval *shardIntervalToSplit char *sourceShardNamePrefix = get_rel_name(shardIntervalToSplitCopy->relationId); foreach_ptr(splitCopyInfo, splitCopyInfoList) { - char *destinationShardSchemaName = get_namespace_name(get_rel_namespace( - shardIntervalToSplitCopy - ->relationId)); + Oid destinationShardSchemaOid = get_rel_namespace( + shardIntervalToSplitCopy->relationId); + char *destinationShardSchemaName = get_namespace_name(destinationShardSchemaOid); char *destinationShardNameCopy = pstrdup(sourceShardNamePrefix); AppendShardIdToName(&destinationShardNameCopy, splitCopyInfo->destinationShardId); diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index f9af95297..05cff074a 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -1462,6 +1462,17 @@ RegisterCitusConfigVariables(void) GUC_UNIT_KB | GUC_STANDARD, NULL, NULL, NULL); + DefineCustomIntVariable( + "citus.max_matview_size_to_auto_recreate", + gettext_noop("Sets the maximum size of materialized views in MB to " + "automatically distribute them."), + NULL, + &MaxMatViewSizeToAutoRecreate, + 1024, -1, INT_MAX, + PGC_USERSET, + GUC_UNIT_MB | GUC_STANDARD, + NULL, NULL, NULL); + DefineCustomIntVariable( "citus.max_rebalancer_logged_ignored_moves", gettext_noop("Sets the maximum number of ignored moves the rebalance logs"), diff --git a/src/backend/distributed/sql/citus--11.0-2--11.0-3.sql b/src/backend/distributed/sql/citus--11.0-2--11.0-3.sql new file mode 100644 index 000000000..32e9650a6 --- /dev/null +++ b/src/backend/distributed/sql/citus--11.0-2--11.0-3.sql @@ -0,0 +1 @@ +#include "udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql" diff --git a/src/backend/distributed/sql/citus--11.0-2--11.1-1.sql b/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql similarity index 56% rename from src/backend/distributed/sql/citus--11.0-2--11.1-1.sql rename to src/backend/distributed/sql/citus--11.0-3--11.1-1.sql index 0b1475e1e..356605424 100644 --- a/src/backend/distributed/sql/citus--11.0-2--11.1-1.sql +++ b/src/backend/distributed/sql/citus--11.0-3--11.1-1.sql @@ -7,7 +7,12 @@ DROP FUNCTION pg_catalog.worker_merge_files_into_table(bigint, integer, text[], DROP FUNCTION pg_catalog.worker_range_partition_table(bigint, integer, text, text, oid, anyarray); DROP FUNCTION pg_catalog.worker_repartition_cleanup(bigint); -#include "../../columnar/sql/columnar--11.0-2--11.1-1.sql" -#include "udfs/citus_split_shard_by_split_points/11.0-2.sql" -#include "udfs/worker_split_copy/11.0-2.sql" +#include "../../columnar/sql/columnar--11.0-3--11.1-1.sql" + +DROP FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8); +#include "udfs/get_all_active_transactions/11.1-1.sql" +#include "udfs/citus_split_shard_by_split_points/11.1-1.sql" +#include "udfs/worker_split_copy/11.1-1.sql" #include "udfs/worker_split_shard_replication_setup/11.0-2.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--11.0-2--11.0-1.sql b/src/backend/distributed/sql/downgrades/citus--11.0-2--11.0-1.sql index 7743d5179..3cbbcc883 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.0-2--11.0-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.0-2--11.0-1.sql @@ -1,5 +1,6 @@ #include "../udfs/citus_shards_on_worker/11.0-1.sql" #include "../udfs/citus_shard_indexes_on_worker/11.0-1.sql" +#include "../udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql" DROP FUNCTION pg_catalog.citus_disable_node(text, integer, bool); CREATE FUNCTION pg_catalog.citus_disable_node(nodename text, nodeport integer, force bool default false) @@ -15,5 +16,4 @@ DROP FUNCTION pg_catalog.citus_is_coordinator(); DROP FUNCTION pg_catalog.run_command_on_coordinator(text,boolean); DROP FUNCTION pg_catalog.start_metadata_sync_to_all_nodes(); -DROP FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(boolean); DROP PROCEDURE pg_catalog.citus_finish_citus_upgrade(); diff --git a/src/backend/distributed/sql/downgrades/citus--11.0-3--11.0-2.sql b/src/backend/distributed/sql/downgrades/citus--11.0-3--11.0-2.sql new file mode 100644 index 000000000..d27417a60 --- /dev/null +++ b/src/backend/distributed/sql/downgrades/citus--11.0-3--11.0-2.sql @@ -0,0 +1 @@ +#include "../udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-2.sql b/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql similarity index 80% rename from src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-2.sql rename to src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql index 0a6ec8599..f7dc5ca07 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-2.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.1-1--11.0-3.sql @@ -46,8 +46,6 @@ CREATE FUNCTION pg_catalog.worker_repartition_cleanup(bigint) STRICT AS 'MODULE_PATHNAME', $function$worker_repartition_cleanup$function$; -DROP TYPE IF EXISTS citus.split_mode; -DROP TYPE IF EXISTS citus.split_copy_info; DROP FUNCTION pg_catalog.citus_split_shard_by_split_points( shard_id bigint, split_points text[], @@ -56,5 +54,12 @@ DROP FUNCTION pg_catalog.citus_split_shard_by_split_points( DROP FUNCTION pg_catalog.worker_split_copy( source_shard_id bigint, splitCopyInfos citus.split_copy_info[]); +DROP TYPE citus.split_mode; +DROP TYPE citus.split_copy_info; -#include "../../../columnar/sql/downgrades/columnar--11.1-1--11.0-2.sql" +#include "../../../columnar/sql/downgrades/columnar--11.1-1--11.0-3.sql" + +DROP FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8); +#include "../udfs/get_all_active_transactions/11.0-1.sql" diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql new file mode 100644 index 000000000..fae94a04a --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql @@ -0,0 +1,224 @@ +-- citus_finalize_upgrade_to_citus11() is a helper UDF ensures +-- the upgrade to Citus 11 is finished successfully. Upgrade to +-- Citus 11 requires all active primary worker nodes to get the +-- metadata. And, this function's job is to sync the metadata to +-- the nodes that does not already have +-- once the function finishes without any errors and returns true +-- the cluster is ready for running distributed queries from +-- the worker nodes. When debug is enabled, the function provides +-- more information to the user. +CREATE OR REPLACE FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(enforce_version_check bool default true) + RETURNS bool + LANGUAGE plpgsql + AS $$ +BEGIN + + --------------------------------------------- + -- This script consists of N stages + -- Each step is documented, and if log level + -- is reduced to DEBUG1, each step is logged + -- as well + --------------------------------------------- + +------------------------------------------------------------------------------------------ + -- STAGE 0: Ensure no concurrent node metadata changing operation happens while this + -- script is running via acquiring a strong lock on the pg_dist_node +------------------------------------------------------------------------------------------ +BEGIN + LOCK TABLE pg_dist_node IN EXCLUSIVE MODE NOWAIT; + + EXCEPTION WHEN OTHERS THEN + RAISE 'Another node metadata changing operation is in progress, try again.'; +END; + +------------------------------------------------------------------------------------------ + -- STAGE 1: We want all the commands to run in the same transaction block. Without + -- sequential mode, metadata syncing cannot be done in a transaction block along with + -- other commands +------------------------------------------------------------------------------------------ + SET LOCAL citus.multi_shard_modify_mode TO 'sequential'; + +------------------------------------------------------------------------------------------ + -- STAGE 2: Ensure we have the prerequisites + -- (a) only superuser can run this script + -- (b) cannot be executed when enable_ddl_propagation is False + -- (c) can only be executed from the coordinator +------------------------------------------------------------------------------------------ +DECLARE + is_superuser_running boolean := False; + enable_ddl_prop boolean:= False; + local_group_id int := 0; +BEGIN + SELECT rolsuper INTO is_superuser_running FROM pg_roles WHERE rolname = current_user; + IF is_superuser_running IS NOT True THEN + RAISE EXCEPTION 'This operation can only be initiated by superuser'; + END IF; + + SELECT current_setting('citus.enable_ddl_propagation') INTO enable_ddl_prop; + IF enable_ddl_prop IS NOT True THEN + RAISE EXCEPTION 'This operation cannot be completed when citus.enable_ddl_propagation is False.'; + END IF; + + SELECT groupid INTO local_group_id FROM pg_dist_local_group; + + IF local_group_id != 0 THEN + RAISE EXCEPTION 'Operation is not allowed on this node. Connect to the coordinator and run it again.'; + ELSE + RAISE DEBUG 'We are on the coordinator, continue to sync metadata'; + END IF; +END; + + + ------------------------------------------------------------------------------------------ + -- STAGE 3: Ensure all primary nodes are active + ------------------------------------------------------------------------------------------ + DECLARE + primary_disabled_worker_node_count int := 0; + BEGIN + SELECT count(*) INTO primary_disabled_worker_node_count FROM pg_dist_node + WHERE groupid != 0 AND noderole = 'primary' AND NOT isactive; + + IF primary_disabled_worker_node_count != 0 THEN + RAISE EXCEPTION 'There are inactive primary worker nodes, you need to activate the nodes first.' + 'Use SELECT citus_activate_node() to activate the disabled nodes'; + ELSE + RAISE DEBUG 'There are no disabled worker nodes, continue to sync metadata'; + END IF; + END; + + ------------------------------------------------------------------------------------------ + -- STAGE 4: Ensure there is no connectivity issues in the cluster + ------------------------------------------------------------------------------------------ + DECLARE + all_nodes_can_connect_to_each_other boolean := False; + BEGIN + SELECT bool_and(coalesce(result, false)) INTO all_nodes_can_connect_to_each_other FROM citus_check_cluster_node_health(); + + IF all_nodes_can_connect_to_each_other != True THEN + RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all ' + 'nodes are up and runnnig. Also, make sure that all nodes can connect ' + 'to each other. Use SELECT * FROM citus_check_cluster_node_health(); ' + 'to check the cluster health'; + ELSE + RAISE DEBUG 'Cluster is healthy, all nodes can connect to each other'; + END IF; + END; + + ------------------------------------------------------------------------------------------ + -- STAGE 5: Ensure all nodes are on the same version + ------------------------------------------------------------------------------------------ + DECLARE + coordinator_version text := ''; + worker_node_version text := ''; + worker_node_version_count int := 0; + + BEGIN + SELECT extversion INTO coordinator_version from pg_extension WHERE extname = 'citus'; + + -- first, check if all nodes have the same versions + SELECT + count(distinct result) INTO worker_node_version_count + FROM + run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'''); + + IF enforce_version_check AND worker_node_version_count = 0 THEN + RAISE DEBUG 'There are no worker nodes'; + ELSIF enforce_version_check AND worker_node_version_count != 1 THEN + RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently ' + 'some of the workers have different versions.'; + ELSE + RAISE DEBUG 'All worker nodes have the same Citus version'; + END IF; + + -- second, check if all nodes have the same versions + SELECT + result INTO worker_node_version + FROM + run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'';') + GROUP BY result; + + IF enforce_version_check AND coordinator_version != worker_node_version THEN + RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently ' + 'the coordinator has version % and the worker(s) has %', + coordinator_version, worker_node_version; + ELSE + RAISE DEBUG 'All nodes have the same Citus version'; + END IF; + END; + + ------------------------------------------------------------------------------------------ + -- STAGE 6: Ensure all the partitioned tables have the proper naming structure + -- As described on https://github.com/citusdata/citus/issues/4962 + -- existing indexes on partitioned distributed tables can collide + -- with the index names exists on the shards + -- luckily, we know how to fix it. + -- And, note that we should do this even if the cluster is a basic plan + -- (e.g., single node Citus) such that when cluster scaled out, everything + -- works as intended + -- And, this should be done only ONCE for a cluster as it can be a pretty + -- time consuming operation. Thus, even if the function is called multiple time, + -- we keep track of it and do not re-execute this part if not needed. + ------------------------------------------------------------------------------------------ + DECLARE + partitioned_table_exists_pre_11 boolean:=False; + BEGIN + + -- we recorded if partitioned tables exists during upgrade to Citus 11 + SELECT metadata->>'partitioned_citus_table_exists_pre_11' INTO partitioned_table_exists_pre_11 + FROM pg_dist_node_metadata; + + IF partitioned_table_exists_pre_11 IS NOT NULL AND partitioned_table_exists_pre_11 THEN + + -- this might take long depending on the number of partitions and shards... + RAISE NOTICE 'Preparing all the existing partitioned table indexes'; + PERFORM pg_catalog.fix_all_partition_shard_index_names(); + + -- great, we are done with fixing the existing wrong index names + -- so, lets remove this + UPDATE pg_dist_node_metadata + SET metadata=jsonb_delete(metadata, 'partitioned_citus_table_exists_pre_11'); + ELSE + RAISE DEBUG 'There are no partitioned tables that should be fixed'; + END IF; + END; + + ------------------------------------------------------------------------------------------ + -- STAGE 7: Return early if there are no primary worker nodes + -- We don't strictly need this step, but it gives a nicer notice message + ------------------------------------------------------------------------------------------ + DECLARE + primary_worker_node_count bigint :=0; + BEGIN + SELECT count(*) INTO primary_worker_node_count FROM pg_dist_node WHERE groupid != 0 AND noderole = 'primary'; + + IF primary_worker_node_count = 0 THEN + RAISE NOTICE 'There are no primary worker nodes, no need to sync metadata to any node'; + RETURN true; + ELSE + RAISE DEBUG 'There are % primary worker nodes, continue to sync metadata', primary_worker_node_count; + END IF; + END; + + ------------------------------------------------------------------------------------------ + -- STAGE 8: Do the actual metadata & object syncing to the worker nodes + -- For the "already synced" metadata nodes, we do not strictly need to + -- sync the objects & metadata, but there is no harm to do it anyway + -- it'll only cost some execution time but makes sure that we have a + -- a consistent metadata & objects across all the nodes + ------------------------------------------------------------------------------------------ + DECLARE + BEGIN + + -- this might take long depending on the number of tables & objects ... + RAISE NOTICE 'Preparing to sync the metadata to all nodes'; + + PERFORM start_metadata_sync_to_all_nodes(); + END; + + RETURN true; +END; +$$; +COMMENT ON FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(bool) + IS 'finalizes upgrade to Citus'; + +REVOKE ALL ON FUNCTION pg_catalog.citus_finalize_upgrade_to_citus11(bool) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql index 2b4bb17f6..fae94a04a 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql @@ -120,7 +120,10 @@ END; count(distinct result) INTO worker_node_version_count FROM run_command_on_workers('SELECT extversion from pg_extension WHERE extname = ''citus'''); - IF enforce_version_check AND worker_node_version_count != 1 THEN + + IF enforce_version_check AND worker_node_version_count = 0 THEN + RAISE DEBUG 'There are no worker nodes'; + ELSIF enforce_version_check AND worker_node_version_count != 1 THEN RAISE EXCEPTION 'All nodes should have the same Citus version installed. Currently ' 'some of the workers have different versions.'; ELSE diff --git a/src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/11.0-2.sql b/src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/11.1-1.sql similarity index 100% rename from src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/11.0-2.sql rename to src/backend/distributed/sql/udfs/citus_split_shard_by_split_points/11.1-1.sql diff --git a/src/backend/distributed/sql/udfs/get_all_active_transactions/11.1-1.sql b/src/backend/distributed/sql/udfs/get_all_active_transactions/11.1-1.sql new file mode 100644 index 000000000..c8517cc0c --- /dev/null +++ b/src/backend/distributed/sql/udfs/get_all_active_transactions/11.1-1.sql @@ -0,0 +1,12 @@ +DROP FUNCTION IF EXISTS pg_catalog.get_all_active_transactions(); +CREATE OR REPLACE FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$get_all_active_transactions$$; + +COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, + OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, + OUT global_pid int8) +IS 'returns transaction information for all Citus initiated transactions'; diff --git a/src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql b/src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql index 636abb0dd..c8517cc0c 100644 --- a/src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql +++ b/src/backend/distributed/sql/udfs/get_all_active_transactions/latest.sql @@ -6,7 +6,7 @@ RETURNS SETOF RECORD LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$get_all_active_transactions$$; -COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT datname text, OUT process_id int, OUT initiator_node_identifier int4, +COMMENT ON FUNCTION pg_catalog.get_all_active_transactions(OUT datid oid, OUT process_id int, OUT initiator_node_identifier int4, OUT worker_query BOOL, OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT global_pid int8) IS 'returns transaction information for all Citus initiated transactions'; diff --git a/src/backend/distributed/sql/udfs/worker_split_copy/11.0-2.sql b/src/backend/distributed/sql/udfs/worker_split_copy/11.1-1.sql similarity index 100% rename from src/backend/distributed/sql/udfs/worker_split_copy/11.0-2.sql rename to src/backend/distributed/sql/udfs/worker_split_copy/11.1-1.sql diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index e11bc5419..8a046dcd7 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -506,6 +506,11 @@ CreateFixPartitionShardIndexNamesTaskList(Oid parentRelationId, Oid partitionRel } Relation parentRelation = RelationIdGetRelation(parentRelationId); + if (!RelationIsValid(parentRelation)) + { + ereport(ERROR, (errmsg("could not open relation with OID %u", parentRelationId))); + } + List *parentIndexIdList = NIL; if (parentIndexOid != InvalidOid) diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 1db02fd3c..ad5c4eb5d 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -29,6 +29,7 @@ extern bool EnableLocalReferenceForeignKeys; extern bool EnableUnsafeTriggers; +extern int MaxMatViewSizeToAutoRecreate; extern void SwitchToSequentialAndLocalExecutionIfRelationNameTooLong(Oid relationId, char * @@ -553,7 +554,7 @@ extern void UpdateFunctionDistributionInfo(const ObjectAddress *distAddress, bool *forceDelegation); /* vacuum.c - forward declarations */ -extern void PostprocessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand); +extern List * PostprocessVacuumStmt(Node *node, const char *vacuumCommand); /* view.c - forward declarations */ extern List * PreprocessViewStmt(Node *node, const char *queryString, @@ -642,7 +643,7 @@ extern bool RelationIdListHasReferenceTable(List *relationIdList); extern List * GetFKeyCreationCommandsForRelationIdList(List *relationIdList); extern void DropRelationForeignKeys(Oid relationId, int flags); extern void SetLocalEnableLocalReferenceForeignKeys(bool state); -extern void ExecuteAndLogUtilityCommandListInTableTypeConversion( +extern void ExecuteAndLogUtilityCommandListInTableTypeConversionViaSPI( List *utilityCommandList); extern void ExecuteAndLogUtilityCommandList(List *ddlCommandList); extern void ExecuteAndLogUtilityCommand(const char *commandString); diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index 4e48b5cc0..e5b0877d7 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -305,6 +305,7 @@ extern bool GetNodeDiskSpaceStatsForConnection(MultiConnection *connection, uint64 *availableBytes, uint64 *totalBytes); extern void ExecuteQueryViaSPI(char *query, int SPIOK); +extern void ExecuteAndLogQueryViaSPI(char *query, int SPIOK, int logLevel); extern void EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid ownerRelationId); extern void AlterSequenceType(Oid seqOid, Oid typeOid); diff --git a/src/test/regress/base_isolation_schedule b/src/test/regress/base_isolation_schedule index 7e86b998c..1fc2f7de5 100644 --- a/src/test/regress/base_isolation_schedule +++ b/src/test/regress/base_isolation_schedule @@ -1,4 +1,5 @@ # ---------- # isolation setup steps # ---------- +test: isolation_setup test: isolation_cluster_management diff --git a/src/test/regress/enterprise_isolation_logicalrep_2_schedule b/src/test/regress/enterprise_isolation_logicalrep_2_schedule index 1863d4dc5..a350ea2f1 100644 --- a/src/test/regress/enterprise_isolation_logicalrep_2_schedule +++ b/src/test/regress/enterprise_isolation_logicalrep_2_schedule @@ -1,3 +1,5 @@ +test: isolation_setup + # tests that change node metadata should precede # isolation_cluster_management such that tests # that come later can be parallelized diff --git a/src/test/regress/enterprise_isolation_schedule b/src/test/regress/enterprise_isolation_schedule index ef64eff92..641f41d4d 100644 --- a/src/test/regress/enterprise_isolation_schedule +++ b/src/test/regress/enterprise_isolation_schedule @@ -11,3 +11,5 @@ test: isolation_ref2ref_foreign_keys_enterprise test: isolation_pg_send_cancellation test: isolation_shard_move_vs_start_metadata_sync test: isolation_tenant_isolation +test: isolation_blocking_shard_split +test: isolation_blocking_shard_split_with_fkey_to_reference diff --git a/src/test/regress/expected/alter_table_set_access_method.out b/src/test/regress/expected/alter_table_set_access_method.out index da6cbd868..7165877d5 100644 --- a/src/test/regress/expected/alter_table_set_access_method.out +++ b/src/test/regress/expected/alter_table_set_access_method.out @@ -522,7 +522,7 @@ SELECT table_name, citus_table_type, distribution_column, shard_count, access_me (2 rows) SELECT c.relname, a.amname FROM pg_class c, pg_am a where c.relname SIMILAR TO 'table_type\D*' AND c.relnamespace = 'alter_table_set_access_method'::regnamespace AND c.relam = a.oid; - relname | amname + relname | amname --------------------------------------------------------------------- table_type_citus_local | columnar table_type_dist | columnar diff --git a/src/test/regress/expected/citus_local_tables_mx.out b/src/test/regress/expected/citus_local_tables_mx.out index d8effc8c6..8d7a3fd39 100644 --- a/src/test/regress/expected/citus_local_tables_mx.out +++ b/src/test/regress/expected/citus_local_tables_mx.out @@ -876,7 +876,29 @@ CREATE TABLE loc_tb (a int ); CREATE VIEW v100 AS SELECT * FROM loc_tb; CREATE VIEW v101 AS SELECT * FROM loc_tb JOIN ref_tb USING (a); CREATE VIEW v102 AS SELECT * FROM v101; +-- a regular matview that depends on local table +CREATE MATERIALIZED VIEW matview_101 AS SELECT * from loc_tb; +-- a matview and a view that depend on the local table + each other +CREATE VIEW v103 AS SELECT * from loc_tb; +CREATE MATERIALIZED VIEW matview_102 AS SELECT * from loc_tb JOIN v103 USING (a); +CREATE OR REPLACE VIEW v103 AS SELECT * from loc_tb JOIN matview_102 USING (a); +SET client_min_messages TO DEBUG1; +-- auto undistribute ALTER TABLE loc_tb ADD CONSTRAINT fkey FOREIGN KEY (a) references ref_tb(a); +DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v100 (a) AS SELECT loc_tb.a + FROM citus_local_tables_mx.loc_tb; ALTER VIEW citus_local_tables_mx.v100 OWNER TO postgres" +DEBUG: "view v100" has dependency to "table loc_tb" that is not in Citus' metadata +DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v101 (a) AS SELECT loc_tb.a + FROM (citus_local_tables_mx.loc_tb + JOIN citus_local_tables_mx.ref_tb USING (a)); ALTER VIEW citus_local_tables_mx.v101 OWNER TO postgres" +DEBUG: "view v101" has dependency to "table loc_tb" that is not in Citus' metadata +DEBUG: executing "CREATE MATERIALIZED VIEW citus_local_tables_mx.matview_101 USING heap AS SELECT loc_tb.a + FROM citus_local_tables_mx.loc_tb;ALTER MATERIALIZED VIEW citus_local_tables_mx.matview_101 OWNER TO postgres" +DEBUG: executing "CREATE OR REPLACE VIEW citus_local_tables_mx.v102 (a) AS SELECT v101.a + FROM citus_local_tables_mx.v101; ALTER VIEW citus_local_tables_mx.v102 OWNER TO postgres" +DEBUG: "view v102" has dependency to "table loc_tb" that is not in Citus' metadata +DEBUG: validating foreign key constraint "fkey_xxxxxxx" +SET client_min_messages TO WARNING; -- works fine select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100, citus_local_tables_mx.v101, citus_local_tables_mx.v102$$); run_command_on_workers @@ -908,6 +930,152 @@ select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100$ (localhost,57638,f,"ERROR: relation ""citus_local_tables_mx.v102"" does not exist") (2 rows) +INSERT INTO loc_tb VALUES (1), (2); +-- test a matview with columnar +CREATE MATERIALIZED VIEW matview_columnar USING COLUMNAR AS SELECT * FROM loc_tb WITH DATA; +-- cant recreate matviews, because the size limit is set to zero, by the GUC +SET citus.max_matview_size_to_auto_recreate TO 0; +SELECT citus_add_local_table_to_metadata('loc_tb', true); +ERROR: size of the materialized view matview_columnar exceeds citus.max_matview_size_to_auto_recreate (currently 0 MB) +-- remove the limit +SET citus.max_matview_size_to_auto_recreate TO -1; +SELECT citus_add_local_table_to_metadata('loc_tb', true); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +-- test REFRESH MAT VIEW +SELECT * FROM matview_101 ORDER BY a; + a +--------------------------------------------------------------------- +(0 rows) + +REFRESH MATERIALIZED VIEW matview_101; +SELECT * FROM matview_101 ORDER BY a; + a +--------------------------------------------------------------------- + 1 + 2 +(2 rows) + +-- verify columnar matview works on a table added to metadata +SELECT * FROM matview_columnar; + a +--------------------------------------------------------------------- +(0 rows) + +REFRESH MATERIALIZED VIEW matview_columnar; +SELECT * FROM matview_columnar ORDER BY a; + a +--------------------------------------------------------------------- + 1 + 2 +(2 rows) + +-- test with partitioned tables +SET citus.use_citus_managed_tables TO ON; +CREATE TABLE parent_1 (a INT UNIQUE) PARTITION BY RANGE(a); +SET citus.use_citus_managed_tables TO OFF; +CREATE MATERIALIZED VIEW part_matview1 as SELECT count(*) FROM parent_1 JOIN parent_1 p2 ON (true); +CREATE MATERIALIZED VIEW part_matview2 as SELECT count(*) FROM parent_1 JOIN part_matview1 on (true); +SELECT count(*) FROM citus_local_tables_mx.part_matview1 JOIN citus_local_tables_mx.part_matview2 ON (true); + count +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE TABLE parent_1_child_1 (a int); +CREATE TABLE parent_1_child_2 (a int); +-- create matviews on partition tables +CREATE MATERIALIZED VIEW mv1 AS SELECT * FROM parent_1_child_1; +CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM parent_1_child_2; +CREATE MATERIALIZED VIEW mv3 AS SELECT parent_1_child_2.* FROM parent_1_child_2 JOIN parent_1_child_1 USING(a); +CREATE MATERIALIZED VIEW mv4 AS SELECT * FROM mv3; +alter table parent_1 attach partition parent_1_child_1 FOR VALUES FROM (0) TO (10) ; +-- all matviews work +SELECT count(*) FROM citus_local_tables_mx.mv1; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM citus_local_tables_mx.mv2; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM citus_local_tables_mx.mv3; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM citus_local_tables_mx.mv4; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- recreate matviews and verify they still work +alter table parent_1 attach partition parent_1_child_2 FOR VALUES FROM (10) TO (20); +SELECT count(*) FROM citus_local_tables_mx.mv1; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM citus_local_tables_mx.mv2; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM citus_local_tables_mx.mv3; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM citus_local_tables_mx.mv4; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- verify matviews work after undistributing +SELECT undistribute_table('parent_1'); + undistribute_table +--------------------------------------------------------------------- + +(1 row) + +SELECT count(*) FROM citus_local_tables_mx.mv1; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM citus_local_tables_mx.mv2; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM citus_local_tables_mx.mv3; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT count(*) FROM citus_local_tables_mx.mv4; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- todo: add more matview tests once 5968 and 6028 are fixed -- cleanup at exit set client_min_messages to error; DROP SCHEMA citus_local_tables_mx CASCADE; diff --git a/src/test/regress/expected/citus_split_shard_by_split_points.out b/src/test/regress/expected/citus_split_shard_by_split_points.out index dba6a3d71..8adbb5f69 100644 --- a/src/test/regress/expected/citus_split_shard_by_split_points.out +++ b/src/test/regress/expected/citus_split_shard_by_split_points.out @@ -1,133 +1,456 @@ --- Split Shards by Split Points tests. --- Setup for Test. -CREATE SCHEMA citus_split_shard_by_split_points; -SET search_path TO citus_split_shard_by_split_points; -SET citus.shard_count TO 1; +/* +Citus Shard Split Test.The test is model similar to 'shard_move_constraints'. +Here is a high level overview of test plan: + 1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table. + 2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors. + 3. Create Foreign key constraints between the two co-located distributed tables. + 4. Load data into the three tables. + 5. Move one of the shards for 'sensors' to test ShardMove -> Split. + 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. + 7. Move one of the split shard to test Split -> ShardMove. + 8. Split an already split shard second time on a different schema. +*/ +CREATE SCHEMA "citus_split_test_schema"; +SET search_path TO "citus_split_test_schema"; +SET citus.next_shard_id TO 8981000; +SET citus.next_placement_id TO 8610000; +SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 1; -SET citus.next_shard_id TO 82060000; --- Create distributed table 'lineitem_streaming' -CREATE TABLE lineitem_streaming ( - l_orderkey bigint not null, - l_partkey integer not null, - l_suppkey integer not null, - l_linenumber integer not null, - l_quantity decimal(15, 2) not null, - l_extendedprice decimal(15, 2) not null, - l_discount decimal(15, 2) not null, - l_tax decimal(15, 2) not null, - l_returnflag char(1) not null, - l_linestatus char(1) not null, - l_shipdate date not null, - l_commitdate date not null, - l_receiptdate date not null, - l_shipinstruct char(25) not null, - l_shipmode char(10) not null, - l_comment varchar(44) not null); -SELECT create_distributed_table('lineitem_streaming', 'l_orderkey'); +-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. +CREATE TABLE sensors( + measureid integer, + eventdatetime date, + measure_data jsonb, + meaure_quantity decimal(15, 2), + measure_status char(1), + measure_comment varchar(44), + PRIMARY KEY (measureid, eventdatetime, measure_data)); +CREATE INDEX index_on_sensors ON sensors(lower(measureid::text)); +ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000; +CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed')); +CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status); +CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors; +SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); create_distributed_table --------------------------------------------------------------------- (1 row) --- Create distributed table 'orders_streaming' -CREATE TABLE orders_streaming ( - o_orderkey bigint not null primary key, - o_custkey integer not null, - o_orderstatus char(1) not null, - o_totalprice decimal(15,2) not null, - o_orderdate date not null, - o_orderpriority char(15) not null, - o_clerk char(15) not null, - o_shippriority integer not null, - o_comment varchar(79) not null); -SELECT create_distributed_table('orders_streaming', 'o_orderkey'); +-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. +-- BEGIN: Create co-located distributed and reference tables. +CREATE TABLE reference_table (measureid integer PRIMARY KEY); +SELECT create_reference_table('reference_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); +CLUSTER colocated_dist_table USING colocated_dist_table_pkey; +SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); create_distributed_table --------------------------------------------------------------------- (1 row) --- Insert data into 'lineitem_streaming' -\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|' --- Insert data into 'orders_streaming' -\COPY orders_streaming FROM STDIN WITH DELIMITER '|' --- Initial Row Count in Shard -SELECT COUNT(*) FROM orders_streaming; - count +CREATE TABLE table_with_index_rep_identity(key int NOT NULL); +CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key); +ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx; +CLUSTER table_with_index_rep_identity USING uqx; +SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors'); + create_distributed_table --------------------------------------------------------------------- - 7 + (1 row) -SELECT COUNT(*) FROM lineitem_streaming; +-- END: Create co-located distributed and reference tables. +-- BEGIN : Create Foreign key constraints. +ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); +-- END : Create Foreign key constraints. +-- BEGIN : Load data into tables. +INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i; +SELECT COUNT(*) FROM sensors; count --------------------------------------------------------------------- - 22 + 1001 (1 row) --- Create Foreign constraint between two shards. -ALTER TABLE lineitem_streaming ADD CONSTRAINT test_constraint - FOREIGN KEY(l_orderkey) REFERENCES orders_streaming(o_orderkey); --- Before Split, List shard and placement data. -SELECT shard.shardid, logicalrelid, shardstorage, shardminvalue, shardmaxvalue nodename, nodeport, placementid +SELECT COUNT(*) FROM reference_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT COUNT(*) FROM colocated_dist_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +-- END: Load data into tables. +-- BEGIN : Display current state. +-- TODO(niupre): Can we refactor this to be a function? +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport FROM pg_dist_shard AS shard INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - WHERE node.noderole = 'primary' AND (logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass) + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) ORDER BY logicalrelid, shardminvalue::BIGINT; - shardid | logicalrelid | shardstorage | shardminvalue | nodename | nodeport | placementid + shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport --------------------------------------------------------------------- - 82060000 | lineitem_streaming | t | -2147483648 | 2147483647 | 57637 | 117 - 82060001 | orders_streaming | t | -2147483648 | 2147483647 | 57637 | 118 + 8981000 | sensors | -2147483648 | -1 | localhost | 57637 + 8981001 | sensors | 0 | 2147483647 | localhost | 57638 + 8981003 | colocated_dist_table | -2147483648 | -1 | localhost | 57637 + 8981004 | colocated_dist_table | 0 | 2147483647 | localhost | 57638 + 8981005 | table_with_index_rep_identity | -2147483648 | -1 | localhost | 57637 + 8981006 | table_with_index_rep_identity | 0 | 2147483647 | localhost | 57638 +(6 rows) + +\c - - - :worker_1_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_8981000 | fkey_table_to_dist_8981000 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981003(measureid) +(1 row) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8981000 | CREATE INDEX hash_index_on_sensors_8981000 ON citus_split_test_schema.sensors_8981000 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981000 | CREATE INDEX index_on_sensors_8981000 ON citus_split_test_schema.sensors_8981000 USING btree (lower((measureid)::text)) + sensors_8981000 | CREATE INDEX index_with_include_on_sensors_8981000 ON citus_split_test_schema.sensors_8981000 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981000 | CREATE UNIQUE INDEX sensors_pkey_8981000 ON citus_split_test_schema.sensors_8981000 USING btree (measureid, eventdatetime, measure_data) +(4 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + table_with_index_rep_identity_8981005 | CREATE UNIQUE INDEX uqx_8981005 ON citus_split_test_schema.table_with_index_rep_identity_8981005 USING btree (key) +(1 row) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + stats_on_sensors + stats_on_sensors_8981000 (2 rows) --- Trigger five way way Split on Shard. +\c - - - :worker_2_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_8981001 | fkey_table_to_dist_8981001 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981004(measureid) +(1 row) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8981001 | CREATE INDEX hash_index_on_sensors_8981001 ON citus_split_test_schema.sensors_8981001 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981001 | CREATE INDEX index_on_sensors_8981001 ON citus_split_test_schema.sensors_8981001 USING btree (lower((measureid)::text)) + sensors_8981001 | CREATE INDEX index_with_include_on_sensors_8981001 ON citus_split_test_schema.sensors_8981001 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981001 | CREATE UNIQUE INDEX sensors_pkey_8981001 ON citus_split_test_schema.sensors_8981001 USING btree (measureid, eventdatetime, measure_data) +(4 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + table_with_index_rep_identity_8981006 | CREATE UNIQUE INDEX uqx_8981006 ON citus_split_test_schema.table_with_index_rep_identity_8981006 USING btree (key) +(1 row) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + stats_on_sensors + stats_on_sensors_8981001 +(2 rows) + +-- END : Display current state +-- BEGIN : Move one shard before we split it. +\c - postgres - :master_port +SET search_path TO "citus_split_test_schema"; +SET citus.next_shard_id TO 8981007; +SET citus.defer_drop_after_shard_move TO OFF; +SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- END : Move one shard before we split it. +-- BEGIN : Set node id variables SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset -SELECT * FROM citus_split_shard_by_split_points( - 82060000, - ARRAY['268435455', '536870911', '1073741823', '1610612735'], - ARRAY[:worker_1_node, :worker_1_node, :worker_2_node, :worker_2_node, :worker_2_node], +-- END : Set node id variables +-- BEGIN : Split two shards : One with move and One without move. +-- Perform 2 way split +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981000, + ARRAY['-1073741824'], + ARRAY[:worker_1_node, :worker_2_node], 'blocking'); citus_split_shard_by_split_points --------------------------------------------------------------------- (1 row) - -- Row Count in Shard after Split (should be same as before) -SELECT COUNT(*) FROM orders_streaming; - count +-- Perform 3 way split +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981001, + ARRAY['536870911', '1610612735'], + ARRAY[:worker_1_node, :worker_1_node, :worker_2_node], + 'blocking'); + citus_split_shard_by_split_points --------------------------------------------------------------------- - 7 + (1 row) -SELECT COUNT(*) FROM lineitem_streaming; - count +-- END : Split two shards : One with move and One without move. +-- BEGIN : Move a shard post split. +SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); + citus_move_shard_placement --------------------------------------------------------------------- - 22 + (1 row) --- After Split, List shard and placement data. -SELECT shard.shardid, logicalrelid, shardstorage, shardminvalue, shardmaxvalue nodename, nodeport, placementid +-- END : Move a shard post split. +-- BEGIN : Display current state. +-- TODO(niupre): Can we refactor this to be a function? +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport FROM pg_dist_shard AS shard INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - WHERE node.noderole = 'primary' AND (logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass) + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) ORDER BY logicalrelid, shardminvalue::BIGINT; - shardid | logicalrelid | shardstorage | shardminvalue | nodename | nodeport | placementid + shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport --------------------------------------------------------------------- - 82060002 | lineitem_streaming | t | -2147483648 | 268435455 | 57637 | 119 - 82060003 | lineitem_streaming | t | 268435456 | 536870911 | 57637 | 120 - 82060004 | lineitem_streaming | t | 536870912 | 1073741823 | 57638 | 121 - 82060005 | lineitem_streaming | t | 1073741824 | 1610612735 | 57638 | 122 - 82060006 | lineitem_streaming | t | 1610612736 | 2147483647 | 57638 | 123 - 82060007 | orders_streaming | t | -2147483648 | 268435455 | 57637 | 124 - 82060008 | orders_streaming | t | 268435456 | 536870911 | 57637 | 125 - 82060009 | orders_streaming | t | 536870912 | 1073741823 | 57638 | 126 - 82060010 | orders_streaming | t | 1073741824 | 1610612735 | 57638 | 127 - 82060011 | orders_streaming | t | 1610612736 | 2147483647 | 57638 | 128 -(10 rows) + 8981007 | sensors | -2147483648 | -1073741824 | localhost | 57638 + 8981008 | sensors | -1073741823 | -1 | localhost | 57638 + 8981013 | sensors | 0 | 536870911 | localhost | 57637 + 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 + 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 + 8981009 | colocated_dist_table | -2147483648 | -1073741824 | localhost | 57638 + 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 + 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 + 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 + 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 + 8981011 | table_with_index_rep_identity | -2147483648 | -1073741824 | localhost | 57638 + 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 + 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 + 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 + 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 +(15 rows) --- Cleanup for Test. -\c - - - :master_port -SET client_min_messages TO WARNING; -DROP SCHEMA citus_split_shard_by_split_points_blocking CASCADE; -ERROR: schema "citus_split_shard_by_split_points_blocking" does not exist +\c - - - :worker_1_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_8981013 | fkey_table_to_dist_8981013 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981016(measureid) + sensors_8981014 | fkey_table_to_dist_8981014 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981017(measureid) +(2 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8981013 | CREATE INDEX hash_index_on_sensors_8981013 ON citus_split_test_schema.sensors_8981013 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981013 | CREATE INDEX index_on_sensors_8981013 ON citus_split_test_schema.sensors_8981013 USING btree (lower((measureid)::text)) + sensors_8981013 | CREATE INDEX index_with_include_on_sensors_8981013 ON citus_split_test_schema.sensors_8981013 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981013 | CREATE UNIQUE INDEX sensors_pkey_8981013 ON citus_split_test_schema.sensors_8981013 USING btree (measureid, eventdatetime, measure_data) + sensors_8981014 | CREATE INDEX hash_index_on_sensors_8981014 ON citus_split_test_schema.sensors_8981014 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981014 | CREATE INDEX index_on_sensors_8981014 ON citus_split_test_schema.sensors_8981014 USING btree (lower((measureid)::text)) + sensors_8981014 | CREATE INDEX index_with_include_on_sensors_8981014 ON citus_split_test_schema.sensors_8981014 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981014 | CREATE UNIQUE INDEX sensors_pkey_8981014 ON citus_split_test_schema.sensors_8981014 USING btree (measureid, eventdatetime, measure_data) +(8 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + table_with_index_rep_identity_8981019 | CREATE UNIQUE INDEX uqx_8981019 ON citus_split_test_schema.table_with_index_rep_identity_8981019 USING btree (key) + table_with_index_rep_identity_8981020 | CREATE UNIQUE INDEX uqx_8981020 ON citus_split_test_schema.table_with_index_rep_identity_8981020 USING btree (key) +(2 rows) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + stats_on_sensors + stats_on_sensors_8981013 + stats_on_sensors_8981014 +(3 rows) + +\c - - - :worker_2_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + relname | Constraint | Definition +--------------------------------------------------------------------- + sensors_8981007 | fkey_table_to_dist_8981007 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981009(measureid) + sensors_8981008 | fkey_table_to_dist_8981008 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981010(measureid) + sensors_8981015 | fkey_table_to_dist_8981015 | FOREIGN KEY (measureid) REFERENCES colocated_dist_table_8981018(measureid) +(3 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + sensors_8981007 | CREATE INDEX hash_index_on_sensors_8981007 ON citus_split_test_schema.sensors_8981007 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981007 | CREATE INDEX index_on_sensors_8981007 ON citus_split_test_schema.sensors_8981007 USING btree (lower((measureid)::text)) + sensors_8981007 | CREATE INDEX index_with_include_on_sensors_8981007 ON citus_split_test_schema.sensors_8981007 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981007 | CREATE UNIQUE INDEX sensors_pkey_8981007 ON citus_split_test_schema.sensors_8981007 USING btree (measureid, eventdatetime, measure_data) + sensors_8981008 | CREATE INDEX hash_index_on_sensors_8981008 ON citus_split_test_schema.sensors_8981008 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981008 | CREATE INDEX index_on_sensors_8981008 ON citus_split_test_schema.sensors_8981008 USING btree (lower((measureid)::text)) + sensors_8981008 | CREATE INDEX index_with_include_on_sensors_8981008 ON citus_split_test_schema.sensors_8981008 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981008 | CREATE UNIQUE INDEX sensors_pkey_8981008 ON citus_split_test_schema.sensors_8981008 USING btree (measureid, eventdatetime, measure_data) + sensors_8981015 | CREATE INDEX hash_index_on_sensors_8981015 ON citus_split_test_schema.sensors_8981015 USING hash (((measure_data -> 'IsFailed'::text))) + sensors_8981015 | CREATE INDEX index_on_sensors_8981015 ON citus_split_test_schema.sensors_8981015 USING btree (lower((measureid)::text)) + sensors_8981015 | CREATE INDEX index_with_include_on_sensors_8981015 ON citus_split_test_schema.sensors_8981015 USING btree (((measure_data -> 'IsFailed'::text))) INCLUDE (measure_data, eventdatetime, measure_status) + sensors_8981015 | CREATE UNIQUE INDEX sensors_pkey_8981015 ON citus_split_test_schema.sensors_8981015 USING btree (measureid, eventdatetime, measure_data) +(12 rows) + + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + tablename | indexdef +--------------------------------------------------------------------- + table_with_index_rep_identity_8981011 | CREATE UNIQUE INDEX uqx_8981011 ON citus_split_test_schema.table_with_index_rep_identity_8981011 USING btree (key) + table_with_index_rep_identity_8981012 | CREATE UNIQUE INDEX uqx_8981012 ON citus_split_test_schema.table_with_index_rep_identity_8981012 USING btree (key) + table_with_index_rep_identity_8981021 | CREATE UNIQUE INDEX uqx_8981021 ON citus_split_test_schema.table_with_index_rep_identity_8981021 USING btree (key) +(3 rows) + + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + stxname +--------------------------------------------------------------------- + stats_on_sensors + stats_on_sensors_8981007 + stats_on_sensors_8981008 + stats_on_sensors_8981015 +(4 rows) + +-- END : Display current state +-- BEGIN: Should be able to change/drop constraints +\c - postgres - :master_port +SET search_path TO "citus_split_test_schema"; +ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed; +ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200; +DROP STATISTICS stats_on_sensors; +DROP INDEX index_on_sensors_renamed; +ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist; +-- END: Should be able to change/drop constraints +-- BEGIN: Split second time on another schema +SET search_path TO public; +SET citus.next_shard_id TO 8981031; +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981007, + ARRAY['-2100000000'], + ARRAY[:worker_1_node, :worker_2_node], + 'blocking'); + citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +SET search_path TO "citus_split_test_schema"; +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) + ORDER BY logicalrelid, shardminvalue::BIGINT; + shardid | logicalrelid | shardminvalue | shardmaxvalue | nodename | nodeport +--------------------------------------------------------------------- + 8981031 | sensors | -2147483648 | -2100000000 | localhost | 57637 + 8981032 | sensors | -2099999999 | -1073741824 | localhost | 57638 + 8981008 | sensors | -1073741823 | -1 | localhost | 57638 + 8981013 | sensors | 0 | 536870911 | localhost | 57637 + 8981014 | sensors | 536870912 | 1610612735 | localhost | 57637 + 8981015 | sensors | 1610612736 | 2147483647 | localhost | 57638 + 8981033 | colocated_dist_table | -2147483648 | -2100000000 | localhost | 57637 + 8981034 | colocated_dist_table | -2099999999 | -1073741824 | localhost | 57638 + 8981010 | colocated_dist_table | -1073741823 | -1 | localhost | 57638 + 8981016 | colocated_dist_table | 0 | 536870911 | localhost | 57637 + 8981017 | colocated_dist_table | 536870912 | 1610612735 | localhost | 57637 + 8981018 | colocated_dist_table | 1610612736 | 2147483647 | localhost | 57638 + 8981035 | table_with_index_rep_identity | -2147483648 | -2100000000 | localhost | 57637 + 8981036 | table_with_index_rep_identity | -2099999999 | -1073741824 | localhost | 57638 + 8981012 | table_with_index_rep_identity | -1073741823 | -1 | localhost | 57638 + 8981019 | table_with_index_rep_identity | 0 | 536870911 | localhost | 57637 + 8981020 | table_with_index_rep_identity | 536870912 | 1610612735 | localhost | 57637 + 8981021 | table_with_index_rep_identity | 1610612736 | 2147483647 | localhost | 57638 +(18 rows) + +-- END: Split second time on another schema +-- BEGIN: Validate Data Count +SELECT COUNT(*) FROM sensors; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT COUNT(*) FROM reference_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +SELECT COUNT(*) FROM colocated_dist_table; + count +--------------------------------------------------------------------- + 1001 +(1 row) + +-- END: Validate Data Count +--BEGIN : Cleanup +\c - postgres - :master_port +DROP SCHEMA "citus_split_test_schema" CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table citus_split_test_schema.sensors +drop cascades to table citus_split_test_schema.reference_table +drop cascades to table citus_split_test_schema.colocated_dist_table +drop cascades to table citus_split_test_schema.table_with_index_rep_identity +--END : Cleanup diff --git a/src/test/regress/expected/index_create.out b/src/test/regress/expected/index_create.out index 6e2876fa4..4d3455ce5 100644 --- a/src/test/regress/expected/index_create.out +++ b/src/test/regress/expected/index_create.out @@ -38,3 +38,73 @@ ALTER INDEX idx2 ALTER COLUMN 1 SET STATISTICS 1000; -- test reindex REINDEX INDEX idx1; ALTER TABLE test_tbl REPLICA IDENTITY USING INDEX a_index; +-- postgres allows ALTER INDEX rename on tables, and so Citus.. +-- and, also ALTER TABLE rename on indexes.. +CREATE TABLE alter_idx_rename_test (a INT); +CREATE INDEX alter_idx_rename_test_idx ON alter_idx_rename_test (a); +CREATE TABLE alter_idx_rename_test_parted (a INT) PARTITION BY LIST (a); +CREATE INDEX alter_idx_rename_test_parted_idx ON alter_idx_rename_test_parted (a); +BEGIN; +-- rename index/table with weird syntax +ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2; +ALTER TABLE alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2; +ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2; +ALTER TABLE alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2; +-- also, rename index/table with proper syntax +ALTER INDEX alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3; +ALTER TABLE alter_idx_rename_test_2 RENAME TO alter_idx_rename_test_3; +ALTER INDEX alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3; +ALTER TABLE alter_idx_rename_test_parted_2 RENAME TO alter_idx_rename_test_parted_3; +SELECT 'alter_idx_rename_test_3'::regclass, 'alter_idx_rename_test_idx_3'::regclass; + regclass | regclass +--------------------------------------------------------------------- + alter_idx_rename_test_3 | alter_idx_rename_test_idx_3 +(1 row) + +SELECT 'alter_idx_rename_test_parted_3'::regclass, 'alter_idx_rename_test_parted_idx_3'::regclass; + regclass | regclass +--------------------------------------------------------------------- + alter_idx_rename_test_parted_3 | alter_idx_rename_test_parted_idx_3 +(1 row) + +ROLLBACK; +-- now, on distributed tables +SELECT create_distributed_table('alter_idx_rename_test', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('alter_idx_rename_test_parted', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- rename index/table with weird syntax +ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2; +ALTER TABLE alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2; +ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2; +ALTER TABLE alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2; +-- also, rename index/table with proper syntax +ALTER INDEX alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3; +ALTER TABLE alter_idx_rename_test_2 RENAME TO alter_idx_rename_test_3; +ALTER INDEX alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3; +ALTER TABLE alter_idx_rename_test_parted_2 RENAME TO alter_idx_rename_test_parted_3; +SELECT 'alter_idx_rename_test_3'::regclass, 'alter_idx_rename_test_idx_3'::regclass; + regclass | regclass +--------------------------------------------------------------------- + alter_idx_rename_test_3 | alter_idx_rename_test_idx_3 +(1 row) + +SELECT 'alter_idx_rename_test_parted_3'::regclass, 'alter_idx_rename_test_parted_idx_3'::regclass; + regclass | regclass +--------------------------------------------------------------------- + alter_idx_rename_test_parted_3 | alter_idx_rename_test_parted_idx_3 +(1 row) + +ALTER INDEX alter_idx_rename_test_idx_3 RENAME TO alter_idx_rename_test_idx_4; +DROP INDEX alter_idx_rename_test_idx_4; +DROP TABLE alter_idx_rename_test_3; +DROP INDEX alter_idx_rename_test_parted_idx_3; +DROP TABLE alter_idx_rename_test_parted_3; \ No newline at end of file diff --git a/src/test/regress/expected/isolation_acquire_distributed_locks.out b/src/test/regress/expected/isolation_acquire_distributed_locks.out index ed51f1da6..86abfc64e 100644 --- a/src/test/regress/expected/isolation_acquire_distributed_locks.out +++ b/src/test/regress/expected/isolation_acquire_distributed_locks.out @@ -51,7 +51,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -117,7 +117,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -174,7 +174,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -271,7 +271,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -336,7 +336,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -398,7 +398,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -455,7 +455,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -512,7 +512,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -569,7 +569,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -626,7 +626,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -683,7 +683,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -740,7 +740,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -797,7 +797,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -854,7 +854,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -911,7 +911,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -968,7 +968,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -1024,7 +1024,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -1081,7 +1081,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) @@ -1184,7 +1184,7 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func +citus_remove_node --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/isolation_blocking_move_multi_shard_commands_on_mx.out b/src/test/regress/expected/isolation_blocking_move_multi_shard_commands_on_mx.out index 83da032b8..ff93f37b2 100644 --- a/src/test/regress/expected/isolation_blocking_move_multi_shard_commands_on_mx.out +++ b/src/test/regress/expected/isolation_blocking_move_multi_shard_commands_on_mx.out @@ -73,11 +73,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-update s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -155,11 +150,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-delete s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -235,11 +225,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -307,8 +292,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_blocking_move_single_shard_commands_on_mx.out b/src/test/regress/expected/isolation_blocking_move_single_shard_commands_on_mx.out index ad6c17c7f..0162815b8 100644 --- a/src/test/regress/expected/isolation_blocking_move_single_shard_commands_on_mx.out +++ b/src/test/regress/expected/isolation_blocking_move_single_shard_commands_on_mx.out @@ -72,11 +72,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-update s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -153,11 +148,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-delete s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -233,11 +223,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -305,11 +290,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -377,8 +357,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_blocking_shard_split.out b/src/test/regress/expected/isolation_blocking_shard_split.out new file mode 100644 index 000000000..ff3c250fd --- /dev/null +++ b/src/test/regress/expected/isolation_blocking_shard_split.out @@ -0,0 +1,951 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-update s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-update: + UPDATE to_split_table SET value = 111 WHERE id = 123456789; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 1 + 57638|1500004|t | 0 +(3 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-delete s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-delete: + DELETE FROM to_split_table WHERE id = 123456789; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 1 + 57638|1500004|t | 0 +(3 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-copy: + COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-update s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-update: + UPDATE to_split_table SET value = 111 WHERE id = 123456789; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 1 + 57638|1500004|t | 0 +(3 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-insert s1-begin s1-select s2-begin s2-blocking-shard-split s1-delete s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-delete: + DELETE FROM to_split_table WHERE id = 123456789; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 1 + 57638|1500004|t | 0 +(3 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-copy: + COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +ERROR: could not find valid entry for shard xxxxx +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s1-load-cache s1-insert s1-begin s1-blocking-shard-split s2-blocking-shard-split s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500001, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +step s1-commit: + COMMIT; + +step s2-blocking-shard-split: <... completed> +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500003|t | 0 + 57637|1500005|t | 1 + 57638|1500004|t | 0 + 57638|1500006|t | 0 +(4 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-insert s1-begin s1-blocking-shard-split s2-blocking-shard-split s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + -- Id '123456789' maps to shard xxxxx. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + INSERT INTO to_split_table VALUES (123456789, 1); + +get_shard_id_for_distribution_column +--------------------------------------------------------------------- + 1500002 +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500001, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +step s1-commit: + COMMIT; + +step s2-blocking-shard-split: <... completed> +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500003|t | 0 + 57637|1500005|t | 1 + 57638|1500004|t | 0 + 57638|1500006|t | 0 +(4 rows) + + id|value +--------------------------------------------------------------------- +123456789| 1 +(1 row) + + +starting permutation: s1-load-cache s1-begin s1-select s2-begin s2-blocking-shard-split s1-ddl s2-commit s1-commit s2-print-cluster s2-print-index-count +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-load-cache: + -- Indirect way to load cache. + TRUNCATE to_split_table; + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-ddl: + CREATE INDEX test_table_index ON to_split_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + +step s2-print-index-count: + SELECT + nodeport, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 1 + 57637|t | 1 + 57638|t | 1 +(3 rows) + + +starting permutation: s1-begin s1-select s2-begin s2-blocking-shard-split s1-ddl s2-commit s1-commit s2-print-cluster s2-print-index-count +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: + BEGIN; + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; + +step s1-select: + SELECT count(*) FROM to_split_table WHERE id = 123456789; + +count +--------------------------------------------------------------------- + 0 +(1 row) + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-ddl: + CREATE INDEX test_table_index ON to_split_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500001|t | 0 + 57637|1500003|t | 0 + 57638|1500004|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + +step s2-print-index-count: + SELECT + nodeport, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; + +nodeport|success|result +--------------------------------------------------------------------- + 57637|t | 1 + 57637|t | 1 + 57638|t | 1 +(3 rows) + diff --git a/src/test/regress/expected/isolation_blocking_shard_split_with_fkey_to_reference.out b/src/test/regress/expected/isolation_blocking_shard_split_with_fkey_to_reference.out new file mode 100644 index 000000000..410b9c2a0 --- /dev/null +++ b/src/test/regress/expected/isolation_blocking_shard_split_with_fkey_to_reference.out @@ -0,0 +1,301 @@ +Parsed test spec with 2 sessions + +starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-delete s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-delete: + DELETE FROM reference_table WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-delete: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500004|t | 0 + 57638|1500003|t | 0 + 57638|1500005|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-update s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-update: + UPDATE reference_table SET value = 5 WHERE id = 5; + +step s2-commit: + COMMIT; + +step s1-update: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500004|t | 0 + 57638|1500003|t | 0 + 57638|1500005|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-insert s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-insert: + INSERT INTO reference_table VALUES (5, 10); + +step s2-commit: + COMMIT; + +step s1-insert: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500004|t | 0 + 57638|1500003|t | 0 + 57638|1500005|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-copy s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-copy: + COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; + +step s2-commit: + COMMIT; + +step s1-copy: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500004|t | 0 + 57638|1500003|t | 0 + 57638|1500005|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + + +starting permutation: s2-add-fkey s1-begin s2-begin s2-blocking-shard-split s1-ddl s2-commit s1-commit s2-print-cluster +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s2-add-fkey: + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); + +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s2-blocking-shard-split: + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'blocking'); + +citus_split_shard_by_split_points +--------------------------------------------------------------------- + +(1 row) + +step s1-ddl: + CREATE INDEX reference_table_index ON reference_table(id); + +step s2-commit: + COMMIT; + +step s1-ddl: <... completed> +step s1-commit: + COMMIT; + +step s2-print-cluster: + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; + +nodeport|shardid|success|result +--------------------------------------------------------------------- + 57637|1500004|t | 0 + 57638|1500003|t | 0 + 57638|1500005|t | 0 +(3 rows) + +id|value +--------------------------------------------------------------------- +(0 rows) + diff --git a/src/test/regress/expected/isolation_concurrent_dml.out b/src/test/regress/expected/isolation_concurrent_dml.out index eca23913e..f6b18d709 100644 --- a/src/test/regress/expected/isolation_concurrent_dml.out +++ b/src/test/regress/expected/isolation_concurrent_dml.out @@ -19,11 +19,6 @@ step s1-commit: COMMIT; step s2-update: <... completed> -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s2-update create_distributed_table @@ -37,11 +32,6 @@ step s1-insert: step s2-update: UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-multi-insert s2-update s1-commit create_distributed_table @@ -62,11 +52,6 @@ step s1-commit: COMMIT; step s2-update: <... completed> -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-multi-insert s2-multi-insert-overlap s1-commit create_distributed_table @@ -86,11 +71,6 @@ step s2-multi-insert-overlap: step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s1-multi-insert s2-multi-insert s1-commit s2-commit create_distributed_table @@ -116,8 +96,3 @@ step s1-commit: step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_copy_vs_all_on_mx.out b/src/test/regress/expected/isolation_copy_vs_all_on_mx.out index ef2fb5661..8f60ccae9 100644 --- a/src/test/regress/expected/isolation_copy_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_copy_vs_all_on_mx.out @@ -89,11 +89,6 @@ count 11 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s3-select-count step s1-start-session-level-connection: @@ -150,11 +145,6 @@ step s3-select-count: SELECT COUNT(*) FROM copy_table; ERROR: relation "copy_table" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -245,8 +235,3 @@ count 8 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_ddl_vs_all.out b/src/test/regress/expected/isolation_ddl_vs_all.out index 6d4aa5811..7e040fccc 100644 --- a/src/test/regress/expected/isolation_ddl_vs_all.out +++ b/src/test/regress/expected/isolation_ddl_vs_all.out @@ -22,11 +22,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table @@ -48,11 +43,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-add-column s1-commit s2-commit s1-show-indexes s1-show-columns create_distributed_table @@ -82,11 +72,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-rename-column s1-commit s2-commit s1-show-indexes s1-show-columns create_distributed_table @@ -116,11 +101,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes create_distributed_table @@ -150,11 +130,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes create_distributed_table @@ -182,11 +157,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table @@ -209,11 +179,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table @@ -236,11 +201,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes create_distributed_table @@ -270,11 +230,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes create_distributed_table @@ -302,11 +257,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table @@ -329,11 +279,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table @@ -357,11 +302,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-table-size s1-commit s2-commit s1-show-indexes create_distributed_table @@ -388,11 +328,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-master-modify-multiple-shards s1-commit s2-commit s1-show-indexes create_distributed_table @@ -415,11 +350,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-create-index s2-distribute-table s1-commit s2-commit s1-show-indexes create_distributed_table @@ -449,11 +379,6 @@ run_command_on_workers (localhost,57638,t,4) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-table-size s1-commit s2-commit s1-show-columns create_distributed_table @@ -481,11 +406,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns create_distributed_table @@ -508,11 +428,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-add-column s2-distribute-table s1-commit s2-commit s1-show-columns create_distributed_table @@ -542,11 +457,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-table-size s1-commit s2-commit s1-show-columns create_distributed_table @@ -574,11 +484,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns create_distributed_table @@ -601,11 +506,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-distribute-table s1-commit s2-commit s1-show-columns create_distributed_table @@ -635,11 +535,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table @@ -666,11 +561,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table @@ -693,11 +583,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table @@ -727,11 +612,6 @@ run_command_on_workers (localhost,57638,t,4) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-table-size s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table @@ -756,11 +636,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table @@ -781,11 +656,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table @@ -813,11 +683,6 @@ run_command_on_workers (localhost,57638,t,4) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table @@ -844,11 +709,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table @@ -871,11 +731,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table @@ -905,11 +760,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table @@ -936,11 +786,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table @@ -963,11 +808,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table @@ -997,8 +837,3 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_delete_vs_all.out b/src/test/regress/expected/isolation_delete_vs_all.out index 4492451fe..aba31313a 100644 --- a/src/test/regress/expected/isolation_delete_vs_all.out +++ b/src/test/regress/expected/isolation_delete_vs_all.out @@ -20,11 +20,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-truncate s1-commit s2-commit s1-select-count create_distributed_table @@ -46,11 +41,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-drop s1-commit s2-commit s1-select-count create_distributed_table @@ -68,11 +58,6 @@ step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; ERROR: relation "delete_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -101,11 +86,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-delete s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -135,11 +115,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-delete s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -166,11 +141,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -199,11 +169,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-delete s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -233,11 +198,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -266,11 +226,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-table-size s1-commit s2-commit s1-select-count create_distributed_table @@ -296,11 +251,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-delete s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table @@ -329,11 +279,6 @@ count 8 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-delete s1-commit s2-commit s1-select-count create_distributed_table @@ -355,11 +300,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-delete s1-commit s2-commit s1-select-count create_distributed_table @@ -378,11 +318,6 @@ ERROR: relation "delete_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; ERROR: relation "delete_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -411,11 +346,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -445,11 +375,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -478,11 +403,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -512,11 +432,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -545,11 +460,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-delete s1-commit s2-commit s1-select-count create_distributed_table @@ -575,11 +485,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-delete s1-commit s2-commit s1-select-count create_distributed_table @@ -608,8 +513,3 @@ count 8 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out b/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out index e3704e66c..ede303225 100644 --- a/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out +++ b/src/test/regress/expected/isolation_dis2ref_foreign_keys_on_mx.out @@ -99,11 +99,6 @@ id|value 2| 2 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: @@ -200,11 +195,6 @@ id|value 2| 2 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: @@ -306,11 +296,6 @@ id|value 2| 2 (4 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: @@ -410,11 +395,6 @@ id|value 2| 2 (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-rollback-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: @@ -515,11 +495,6 @@ id|value 2| 2 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: @@ -617,11 +592,6 @@ id|value --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-select-for-udpate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-display step s1-start-session-level-connection: @@ -719,8 +689,3 @@ id|value 2| 2 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out b/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out index 38e185202..0b9042c1c 100644 --- a/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out +++ b/src/test/regress/expected/isolation_drop_alter_index_select_for_update_on_mx.out @@ -85,11 +85,6 @@ count 6 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-index s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit s2-commit-worker s2-stop-connection step s1-begin: @@ -141,11 +136,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -229,11 +219,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-flaky-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection step s1-start-session-level-connection: @@ -279,8 +264,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_drop_vs_all.out b/src/test/regress/expected/isolation_drop_vs_all.out index e4cfe791a..a6ec239fe 100644 --- a/src/test/regress/expected/isolation_drop_vs_all.out +++ b/src/test/regress/expected/isolation_drop_vs_all.out @@ -18,11 +18,6 @@ ERROR: table "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop-schema s2-drop-schema s1-commit s2-commit s1-select-count create_distributed_table @@ -42,11 +37,6 @@ ERROR: schema "drop_tests" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop-schema s2-drop-schema-2 s1-commit s2-commit s1-select-count create_distributed_table @@ -64,11 +54,6 @@ step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -95,11 +80,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-ddl-create-index s1-begin s2-begin s1-drop s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -127,11 +107,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s1-drop s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -156,11 +131,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -187,11 +157,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-ddl-add-column s1-begin s2-begin s1-drop s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -219,11 +184,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -250,11 +210,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-drop s2-table-size s1-commit s2-commit s1-select-count create_distributed_table @@ -274,11 +229,6 @@ ERROR: could not compute table size: relation does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s2-initialize s1-begin s2-begin s1-drop s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table @@ -300,11 +250,6 @@ ERROR: could not create distributed table: relation does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-ddl-create-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -330,11 +275,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -361,11 +301,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-ddl-add-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -391,11 +326,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -422,11 +352,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-ddl-rename-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -452,11 +377,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s2-initialize s1-begin s2-begin s1-table-size s2-drop s1-commit s2-commit s1-select-count create_distributed_table @@ -479,11 +399,6 @@ step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s2-initialize s1-begin s2-begin s1-distribute-table s2-drop s1-commit s2-commit s1-select-count create_distributed_table @@ -509,8 +424,3 @@ step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_dump_global_wait_edges.out b/src/test/regress/expected/isolation_dump_global_wait_edges.out index 046025599..0086e488a 100644 --- a/src/test/regress/expected/isolation_dump_global_wait_edges.out +++ b/src/test/regress/expected/isolation_dump_global_wait_edges.out @@ -56,11 +56,6 @@ step s2-update: <... completed> step s2-abort: ABORT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort step s1-begin: @@ -137,8 +132,3 @@ step s3-update: <... completed> step s3-abort: ABORT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_hash_copy_vs_all.out b/src/test/regress/expected/isolation_hash_copy_vs_all.out index bbf9e9c6b..4c734925c 100644 --- a/src/test/regress/expected/isolation_hash_copy_vs_all.out +++ b/src/test/regress/expected/isolation_hash_copy_vs_all.out @@ -17,11 +17,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table @@ -45,11 +40,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -77,11 +67,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count create_distributed_table @@ -93,8 +78,8 @@ step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-adaptive-select: - SET citus.enable_repartition_joins TO ON; - SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.enable_repartition_joins TO ON; + SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id|data|int_data|id|data|int_data --------------------------------------------------------------------- @@ -112,11 +97,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table @@ -135,11 +115,6 @@ count 11 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table @@ -158,11 +133,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table @@ -181,11 +151,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table @@ -204,11 +169,6 @@ count 9 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table @@ -228,11 +188,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table @@ -248,11 +203,6 @@ step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; ERROR: relation "hash_copy" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -279,11 +229,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -311,11 +256,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-flaky-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -341,11 +281,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -372,11 +307,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -404,11 +334,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -435,11 +360,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table @@ -463,11 +383,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table @@ -486,11 +401,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table @@ -515,11 +425,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table @@ -546,11 +451,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table @@ -559,10 +459,10 @@ create_distributed_table (1 row) step s1-recreate-with-replication-2: - DROP TABLE hash_copy; - SET citus.shard_replication_factor TO 2; - CREATE TABLE hash_copy(id integer, data text, int_data int); - SELECT create_distributed_table('hash_copy', 'id'); + DROP TABLE hash_copy; + SET citus.shard_replication_factor TO 2; + CREATE TABLE hash_copy(id integer, data text, int_data int); + SELECT create_distributed_table('hash_copy', 'id'); create_distributed_table --------------------------------------------------------------------- @@ -581,11 +481,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table @@ -594,10 +489,10 @@ create_distributed_table (1 row) step s1-recreate-with-replication-2: - DROP TABLE hash_copy; - SET citus.shard_replication_factor TO 2; - CREATE TABLE hash_copy(id integer, data text, int_data int); - SELECT create_distributed_table('hash_copy', 'id'); + DROP TABLE hash_copy; + SET citus.shard_replication_factor TO 2; + CREATE TABLE hash_copy(id integer, data text, int_data int); + SELECT create_distributed_table('hash_copy', 'id'); create_distributed_table --------------------------------------------------------------------- @@ -616,11 +511,6 @@ count 9 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table @@ -629,10 +519,10 @@ create_distributed_table (1 row) step s1-recreate-with-replication-2: - DROP TABLE hash_copy; - SET citus.shard_replication_factor TO 2; - CREATE TABLE hash_copy(id integer, data text, int_data int); - SELECT create_distributed_table('hash_copy', 'id'); + DROP TABLE hash_copy; + SET citus.shard_replication_factor TO 2; + CREATE TABLE hash_copy(id integer, data text, int_data int); + SELECT create_distributed_table('hash_copy', 'id'); create_distributed_table --------------------------------------------------------------------- @@ -651,11 +541,6 @@ count 20 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-recreate-with-replication-2 s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table @@ -664,10 +549,10 @@ create_distributed_table (1 row) step s1-recreate-with-replication-2: - DROP TABLE hash_copy; - SET citus.shard_replication_factor TO 2; - CREATE TABLE hash_copy(id integer, data text, int_data int); - SELECT create_distributed_table('hash_copy', 'id'); + DROP TABLE hash_copy; + SET citus.shard_replication_factor TO 2; + CREATE TABLE hash_copy(id integer, data text, int_data int); + SELECT create_distributed_table('hash_copy', 'id'); create_distributed_table --------------------------------------------------------------------- @@ -686,11 +571,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table @@ -714,11 +594,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table @@ -746,11 +621,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count create_distributed_table @@ -761,8 +631,8 @@ create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-adaptive-select: - SET citus.enable_repartition_joins TO ON; - SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.enable_repartition_joins TO ON; + SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id|data|int_data|id|data|int_data --------------------------------------------------------------------- @@ -781,11 +651,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table @@ -804,11 +669,6 @@ count 11 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table @@ -827,11 +687,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table @@ -850,11 +705,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table @@ -873,11 +723,6 @@ count 9 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table @@ -897,11 +742,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table @@ -918,11 +758,6 @@ step s2-copy: <... completed> ERROR: relation "hash_copy" does not exist step s1-select-count: SELECT COUNT(*) FROM hash_copy; ERROR: relation "hash_copy" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -949,11 +784,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -981,11 +811,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1013,11 +838,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1045,11 +865,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1076,11 +891,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table @@ -1104,11 +914,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table @@ -1127,11 +932,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table @@ -1157,11 +957,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table @@ -1188,8 +983,3 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_insert_select_conflict.out b/src/test/regress/expected/isolation_insert_select_conflict.out index 2a375574a..9c5411727 100644 --- a/src/test/regress/expected/isolation_insert_select_conflict.out +++ b/src/test/regress/expected/isolation_insert_select_conflict.out @@ -7,21 +7,21 @@ create_distributed_table (1 row) step s1-begin: - SET citus.shard_replication_factor to 1; - BEGIN; + SET citus.shard_replication_factor to 1; + BEGIN; step s1-insert-into-select-conflict-update: - INSERT INTO target_table - SELECT - col_1, col_2 - FROM ( - SELECT - col_1, col_2, col_3 - FROM - source_table - LIMIT 5 - ) as foo - ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; + INSERT INTO target_table + SELECT + col_1, col_2 + FROM ( + SELECT + col_1, col_2, col_3 + FROM + source_table + LIMIT 5 + ) as foo + ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; col_1|col_2 --------------------------------------------------------------------- @@ -33,22 +33,17 @@ col_1|col_2 (5 rows) step s2-begin: - BEGIN; + BEGIN; step s2-update: - UPDATE target_table SET col_2 = 5; + UPDATE target_table SET col_2 = 5; step s1-commit: - COMMIT; + COMMIT; step s2-update: <... completed> step s2-commit: - COMMIT; - -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) + COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-do-nothing s2-begin s2-delete s1-commit s2-commit @@ -58,39 +53,34 @@ create_distributed_table (1 row) step s1-begin: - SET citus.shard_replication_factor to 1; - BEGIN; + SET citus.shard_replication_factor to 1; + BEGIN; step s1-insert-into-select-conflict-do-nothing: - INSERT INTO target_table - SELECT - col_1, col_2 - FROM ( - SELECT - col_1, col_2, col_3 - FROM - source_table - LIMIT 5 - ) as foo - ON CONFLICT DO NOTHING; + INSERT INTO target_table + SELECT + col_1, col_2 + FROM ( + SELECT + col_1, col_2, col_3 + FROM + source_table + LIMIT 5 + ) as foo + ON CONFLICT DO NOTHING; step s2-begin: - BEGIN; + BEGIN; step s2-delete: - DELETE FROM target_table; + DELETE FROM target_table; step s1-commit: - COMMIT; + COMMIT; step s2-delete: <... completed> step s2-commit: - COMMIT; - -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) + COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-do-nothing s2-begin s2-insert-into-select-conflict-update s1-commit s2-commit @@ -100,40 +90,40 @@ create_distributed_table (1 row) step s1-begin: - SET citus.shard_replication_factor to 1; - BEGIN; + SET citus.shard_replication_factor to 1; + BEGIN; step s1-insert-into-select-conflict-do-nothing: - INSERT INTO target_table - SELECT - col_1, col_2 - FROM ( - SELECT - col_1, col_2, col_3 - FROM - source_table - LIMIT 5 - ) as foo - ON CONFLICT DO NOTHING; + INSERT INTO target_table + SELECT + col_1, col_2 + FROM ( + SELECT + col_1, col_2, col_3 + FROM + source_table + LIMIT 5 + ) as foo + ON CONFLICT DO NOTHING; step s2-begin: - BEGIN; + BEGIN; step s2-insert-into-select-conflict-update: - INSERT INTO target_table - SELECT - col_1, col_2 - FROM ( - SELECT - col_1, col_2, col_3 - FROM - source_table - LIMIT 5 - ) as foo - ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; + INSERT INTO target_table + SELECT + col_1, col_2 + FROM ( + SELECT + col_1, col_2, col_3 + FROM + source_table + LIMIT 5 + ) as foo + ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; step s1-commit: - COMMIT; + COMMIT; step s2-insert-into-select-conflict-update: <... completed> col_1|col_2 @@ -146,12 +136,7 @@ col_1|col_2 (5 rows) step s2-commit: - COMMIT; - -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) + COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-insert-into-select-conflict-update s1-commit s2-commit @@ -161,21 +146,21 @@ create_distributed_table (1 row) step s1-begin: - SET citus.shard_replication_factor to 1; - BEGIN; + SET citus.shard_replication_factor to 1; + BEGIN; step s1-insert-into-select-conflict-update: - INSERT INTO target_table - SELECT - col_1, col_2 - FROM ( - SELECT - col_1, col_2, col_3 - FROM - source_table - LIMIT 5 - ) as foo - ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; + INSERT INTO target_table + SELECT + col_1, col_2 + FROM ( + SELECT + col_1, col_2, col_3 + FROM + source_table + LIMIT 5 + ) as foo + ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; col_1|col_2 --------------------------------------------------------------------- @@ -187,23 +172,23 @@ col_1|col_2 (5 rows) step s2-begin: - BEGIN; + BEGIN; step s2-insert-into-select-conflict-update: - INSERT INTO target_table - SELECT - col_1, col_2 - FROM ( - SELECT - col_1, col_2, col_3 - FROM - source_table - LIMIT 5 - ) as foo - ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; + INSERT INTO target_table + SELECT + col_1, col_2 + FROM ( + SELECT + col_1, col_2, col_3 + FROM + source_table + LIMIT 5 + ) as foo + ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; step s1-commit: - COMMIT; + COMMIT; step s2-insert-into-select-conflict-update: <... completed> col_1|col_2 @@ -216,12 +201,7 @@ col_1|col_2 (5 rows) step s2-commit: - COMMIT; - -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) + COMMIT; starting permutation: s1-begin s1-insert-into-select-conflict-update s2-begin s2-insert-into-select-conflict-do-nothing s1-commit s2-commit @@ -231,21 +211,21 @@ create_distributed_table (1 row) step s1-begin: - SET citus.shard_replication_factor to 1; - BEGIN; + SET citus.shard_replication_factor to 1; + BEGIN; step s1-insert-into-select-conflict-update: - INSERT INTO target_table - SELECT - col_1, col_2 - FROM ( - SELECT - col_1, col_2, col_3 - FROM - source_table - LIMIT 5 - ) as foo - ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; + INSERT INTO target_table + SELECT + col_1, col_2 + FROM ( + SELECT + col_1, col_2, col_3 + FROM + source_table + LIMIT 5 + ) as foo + ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; col_1|col_2 --------------------------------------------------------------------- @@ -257,32 +237,27 @@ col_1|col_2 (5 rows) step s2-begin: - BEGIN; + BEGIN; step s2-insert-into-select-conflict-do-nothing: - INSERT INTO target_table - SELECT - col_1, col_2 - FROM ( - SELECT - col_1, col_2, col_3 - FROM - source_table - LIMIT 5 - ) as foo - ON CONFLICT DO NOTHING; + INSERT INTO target_table + SELECT + col_1, col_2 + FROM ( + SELECT + col_1, col_2, col_3 + FROM + source_table + LIMIT 5 + ) as foo + ON CONFLICT DO NOTHING; step s1-commit: - COMMIT; + COMMIT; step s2-insert-into-select-conflict-do-nothing: <... completed> step s2-commit: - COMMIT; - -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) + COMMIT; starting permutation: s1-begin-replication-factor-2 s1-insert-into-select-conflict-update-replication-factor-2 s2-begin-replication-factor-2 s2-insert-into-select-conflict-update-replication-factor-2 s1-commit s2-commit @@ -292,21 +267,21 @@ create_distributed_table (1 row) step s1-begin-replication-factor-2: - SET citus.shard_replication_factor to 2; - BEGIN; + SET citus.shard_replication_factor to 2; + BEGIN; step s1-insert-into-select-conflict-update-replication-factor-2: - INSERT INTO target_table_2 - SELECT - col_1, col_2 - FROM ( - SELECT - col_1, col_2, col_3 - FROM - source_table - LIMIT 5 - ) as foo - ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; + INSERT INTO target_table_2 + SELECT + col_1, col_2 + FROM ( + SELECT + col_1, col_2, col_3 + FROM + source_table + LIMIT 5 + ) as foo + ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; col_1|col_2|col_3 --------------------------------------------------------------------- @@ -318,24 +293,24 @@ col_1|col_2|col_3 (5 rows) step s2-begin-replication-factor-2: - SET citus.shard_replication_factor to 2; - BEGIN; + SET citus.shard_replication_factor to 2; + BEGIN; step s2-insert-into-select-conflict-update-replication-factor-2: - INSERT INTO target_table_2 - SELECT - col_1, col_2 - FROM ( - SELECT - col_1, col_2, col_3 - FROM - source_table - LIMIT 5 - ) as foo - ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; + INSERT INTO target_table_2 + SELECT + col_1, col_2 + FROM ( + SELECT + col_1, col_2, col_3 + FROM + source_table + LIMIT 5 + ) as foo + ON CONFLICT(col_1) DO UPDATE SET col_2 = EXCLUDED.col_2 RETURNING *; step s1-commit: - COMMIT; + COMMIT; step s2-insert-into-select-conflict-update-replication-factor-2: <... completed> col_1|col_2|col_3 @@ -348,10 +323,5 @@ col_1|col_2|col_3 (5 rows) step s2-commit: - COMMIT; - -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) + COMMIT; diff --git a/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out b/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out index 91a487c8a..82e202aee 100644 --- a/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_insert_select_vs_all_on_mx.out @@ -89,11 +89,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-colocated-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -184,11 +179,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -279,11 +269,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert-select-via-coordinator s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -374,11 +359,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -469,11 +449,6 @@ count 12 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -564,11 +539,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -659,11 +629,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -754,11 +719,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -849,11 +809,6 @@ count 13 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -944,11 +899,6 @@ count 13 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -1013,11 +963,6 @@ step s3-select-count: SELECT COUNT(*) FROM dist_table; ERROR: relation "dist_table" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-begin s2-coordinator-drop s1-commit-worker s2-commit s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -1082,11 +1027,6 @@ step s3-select-count: SELECT COUNT(*) FROM dist_table; ERROR: relation "dist_table" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-colocated-insert-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -1177,11 +1117,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select-via-coordinator s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -1272,8 +1207,3 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_insert_vs_all_on_mx.out b/src/test/regress/expected/isolation_insert_vs_all_on_mx.out index 8f4bb9cbb..cba634e63 100644 --- a/src/test/regress/expected/isolation_insert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_insert_vs_all_on_mx.out @@ -89,11 +89,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -184,11 +179,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -279,11 +269,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-insert-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -374,11 +359,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -469,11 +449,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -564,11 +539,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -659,11 +629,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-multi-row s2-start-session-level-connection s2-begin-on-worker s2-update-multi-row s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -754,11 +719,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -849,11 +809,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -945,11 +900,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s3-select-count s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -1040,8 +990,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_logical_replication_multi_shard_commands_on_mx.out b/src/test/regress/expected/isolation_logical_replication_multi_shard_commands_on_mx.out index a5eb30520..1189b0c40 100644 --- a/src/test/regress/expected/isolation_logical_replication_multi_shard_commands_on_mx.out +++ b/src/test/regress/expected/isolation_logical_replication_multi_shard_commands_on_mx.out @@ -89,11 +89,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-update s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -187,11 +182,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-delete s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -283,11 +273,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-select s2-commit-worker s3-release-advisory-lock s1-commit s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -372,11 +357,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-start-session-level-connection s2-begin-on-worker s2-insert s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-begin: @@ -451,11 +431,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-update s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -533,11 +508,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-delete s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -613,11 +583,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -685,8 +650,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_logical_replication_single_shard_commands_on_mx.out b/src/test/regress/expected/isolation_logical_replication_single_shard_commands_on_mx.out index 43ee52eeb..9e55d42ca 100644 --- a/src/test/regress/expected/isolation_logical_replication_single_shard_commands_on_mx.out +++ b/src/test/regress/expected/isolation_logical_replication_single_shard_commands_on_mx.out @@ -88,11 +88,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-update s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -185,11 +180,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-delete s2-commit-worker s3-release-advisory-lock s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -281,11 +271,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-select s2-commit-worker s3-release-advisory-lock s1-commit s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -370,11 +355,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s3-acquire-advisory-lock s1-begin s1-move-placement s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s2-commit-worker s3-release-advisory-lock s1-commit s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -459,11 +439,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-start-session-level-connection s2-begin-on-worker s2-insert s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-begin: @@ -537,11 +512,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-update s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -618,11 +588,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-delete s1-move-placement s2-commit-worker s1-commit s1-select s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -698,11 +663,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -770,11 +730,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-insert s1-begin s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-move-placement s2-commit-worker s1-commit s1-get-shard-distribution s2-stop-connection step s1-insert: @@ -843,8 +798,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_metadata_sync_deadlock.out b/src/test/regress/expected/isolation_metadata_sync_deadlock.out index 2ee9a986a..42951eee5 100644 --- a/src/test/regress/expected/isolation_metadata_sync_deadlock.out +++ b/src/test/regress/expected/isolation_metadata_sync_deadlock.out @@ -117,8 +117,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out b/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out index 5e6f9fdba..15328aab6 100644 --- a/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out +++ b/src/test/regress/expected/isolation_modify_with_subquery_vs_dml.out @@ -5,7 +5,7 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; @@ -14,76 +14,61 @@ step s1-insert_to_events_test_table: INSERT INTO events_test_table VALUES(4,6,8,10); step s2-commit: - COMMIT; + COMMIT; step s1-insert_to_events_test_table: <... completed> step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-modify_with_subquery_v1 s1-update_events_test_table s2-commit s1-commit step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; step s1-update_events_test_table: - UPDATE users_test_table SET value_1 = 3; + UPDATE users_test_table SET value_1 = 3; step s2-commit: - COMMIT; + COMMIT; step s1-update_events_test_table: <... completed> step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-modify_with_subquery_v1 s1-delete_events_test_table s2-commit s1-commit step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; step s1-delete_events_test_table: - DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3; + DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3; step s2-commit: - COMMIT; + COMMIT; step s1-delete_events_test_table: <... completed> step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s1-insert_to_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s1-insert_to_events_test_table: INSERT INTO events_test_table VALUES(4,6,8,10); @@ -96,12 +81,7 @@ step s1-commit: step s2-modify_with_subquery_v1: <... completed> step s2-commit: - COMMIT; - -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) + COMMIT; starting permutation: s1-begin s2-begin s1-update_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit @@ -109,10 +89,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s1-update_events_test_table: - UPDATE users_test_table SET value_1 = 3; + UPDATE users_test_table SET value_1 = 3; step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; @@ -122,12 +102,7 @@ step s1-commit: step s2-modify_with_subquery_v1: <... completed> step s2-commit: - COMMIT; - -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) + COMMIT; starting permutation: s1-begin s2-begin s1-delete_events_test_table s2-modify_with_subquery_v1 s1-commit s2-commit @@ -135,10 +110,10 @@ step s1-begin: BEGIN; step s2-begin: - BEGIN; + BEGIN; step s1-delete_events_test_table: - DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3; + DELETE FROM events_test_table WHERE user_id = 1 or user_id = 3; step s2-modify_with_subquery_v1: UPDATE users_test_table SET value_2 = 5 FROM events_test_table WHERE users_test_table.user_id = events_test_table.user_id; @@ -148,10 +123,5 @@ step s1-commit: step s2-modify_with_subquery_v1: <... completed> step s2-commit: - COMMIT; - -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) + COMMIT; diff --git a/src/test/regress/expected/isolation_ref2ref_foreign_keys.out b/src/test/regress/expected/isolation_ref2ref_foreign_keys.out index c45405fdb..6c63be9c1 100644 --- a/src/test/regress/expected/isolation_ref2ref_foreign_keys.out +++ b/src/test/regress/expected/isolation_ref2ref_foreign_keys.out @@ -38,11 +38,6 @@ mode|count --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-begin s2-delete-table-1 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -76,11 +71,6 @@ mode|count --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-begin s2-update-table-2 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -114,11 +104,6 @@ mode|count --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-begin s2-delete-table-2 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -152,11 +137,6 @@ mode|count --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-begin s2-update-table-3 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks step s2-begin: @@ -196,11 +176,6 @@ mode|count --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-begin s2-delete-table-3 s1-begin s1-view-locks s1-rollback s2-rollback s1-view-locks step s2-begin: @@ -240,11 +215,6 @@ mode|count --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-begin s2-insert-table-1 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -278,11 +248,6 @@ mode|count --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-begin s2-insert-table-2 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -316,11 +281,6 @@ mode|count --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-begin s2-insert-table-3 s1-view-locks s2-rollback s1-view-locks step s2-begin: @@ -354,11 +314,6 @@ mode|count --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-update-table-1 s1-delete-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -389,11 +344,6 @@ id|value 5| 5 (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-update-table-1 s1-insert-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -426,11 +376,6 @@ id|value 7| 2 (4 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-update-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -462,11 +407,6 @@ id|value 5| 5 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-delete-table-1 s1-delete-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -497,11 +437,6 @@ id|value 5| 5 (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-delete-table-1 s1-insert-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -533,11 +468,6 @@ id|value 5| 5 (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-delete-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-2 step s1-begin: @@ -568,11 +498,6 @@ id|value 5| 5 (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-delete-table-1 s1-delete-table-3 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -607,11 +532,6 @@ id|value 5| 5 (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-delete-table-1 s1-insert-table-3 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -643,11 +563,6 @@ id|value 5| 5 (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-delete-table-1 s1-update-table-3 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -682,11 +597,6 @@ id|value 5| 5 (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-insert-table-1 s1-update-table-3 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -723,11 +633,6 @@ id|value 5| 5 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s1-update-table-3 s2-insert-table-1 s1-commit s2-commit s1-select-table-3 step s1-begin: @@ -764,11 +669,6 @@ id|value 5| 5 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-insert-table-1 s1-update-table-2 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -800,11 +700,6 @@ id|value 5| 5 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s1-update-table-2 s2-insert-table-1 s1-commit s2-commit s1-select-table-3 step s1-begin: @@ -836,11 +731,6 @@ id|value 5| 5 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-insert-table-2 s1-update-table-3 s2-commit s1-commit s1-select-table-3 step s1-begin: @@ -877,11 +767,6 @@ id|value 5| 5 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s1-update-table-3 s2-insert-table-2 s1-commit s2-commit s1-select-table-3 step s1-begin: @@ -918,11 +803,6 @@ id|value 5| 5 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-1 s2-commit s1-commit step s1-begin: @@ -950,11 +830,6 @@ step s2-commit: step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-2 s2-commit s1-commit step s1-begin: @@ -982,11 +857,6 @@ step s2-commit: step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-insert-table-1 s1-select-table-3 s2-commit s1-commit step s1-begin: @@ -1014,11 +884,6 @@ step s2-commit: step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-1 s2-commit s1-commit step s1-begin: @@ -1046,11 +911,6 @@ step s2-commit: step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-2 s2-commit s1-commit step s1-begin: @@ -1078,11 +938,6 @@ step s2-commit: step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-delete-table-2 s1-select-table-3 s2-commit s1-commit step s1-begin: @@ -1110,11 +965,6 @@ step s2-commit: step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-1 s2-commit s1-commit step s1-begin: @@ -1142,11 +992,6 @@ step s2-commit: step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-2 s2-commit s1-commit step s1-begin: @@ -1174,11 +1019,6 @@ step s2-commit: step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s2-begin s2-update-table-3 s1-select-table-3 s2-commit s1-commit step s1-begin: @@ -1206,8 +1046,3 @@ step s2-commit: step s1-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out b/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out index 91482f247..21f2f11c4 100644 --- a/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out +++ b/src/test/regress/expected/isolation_ref2ref_foreign_keys_on_mx.out @@ -87,11 +87,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: @@ -180,11 +175,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: @@ -273,11 +263,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: @@ -366,11 +351,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-update-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: @@ -459,11 +439,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-delete-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: @@ -552,11 +527,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-1 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: @@ -645,11 +615,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-2 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: @@ -738,11 +703,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-table-3 s1-start-session-level-connection s1-view-locks s2-rollback-worker s1-view-locks s1-stop-connection s2-stop-connection step s2-start-session-level-connection: @@ -831,8 +791,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out b/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out index 5e4c8d8d8..e5aa05580 100644 --- a/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_ref_select_for_update_vs_all_on_mx.out @@ -87,11 +87,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-insert s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table @@ -188,11 +183,6 @@ count 2 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table @@ -280,11 +270,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-insert-select-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table @@ -373,11 +358,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table @@ -474,11 +454,6 @@ count 2 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-alter s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table @@ -562,11 +537,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_reference_table @@ -655,11 +625,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection create_reference_table @@ -710,8 +675,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out b/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out index 88f4d68cd..f42a41669 100644 --- a/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_ref_update_delete_upsert_vs_all_on_mx.out @@ -97,11 +97,6 @@ count 2 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-insert-select-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table @@ -198,11 +193,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-add-primary-key s1-start-session-level-connection s1-begin-on-worker s1-upsert s2-start-session-level-connection s2-begin-on-worker s2-drop s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table @@ -293,11 +283,6 @@ step s3-select-count: SELECT COUNT(*) FROM ref_table; ERROR: relation "ref_table" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_reference_table @@ -394,8 +379,3 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_reference_copy_vs_all.out b/src/test/regress/expected/isolation_reference_copy_vs_all.out index b7703be8e..1329ac93b 100644 --- a/src/test/regress/expected/isolation_reference_copy_vs_all.out +++ b/src/test/regress/expected/isolation_reference_copy_vs_all.out @@ -17,11 +17,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_reference_table @@ -45,11 +40,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_reference_table @@ -77,11 +67,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count create_reference_table @@ -111,11 +96,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_reference_table @@ -134,11 +114,6 @@ count 11 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_reference_table @@ -158,11 +133,6 @@ count 20 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_reference_table @@ -182,11 +152,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_reference_table @@ -206,11 +171,6 @@ count 9 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_reference_table @@ -230,11 +190,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_reference_table @@ -250,11 +205,6 @@ step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; ERROR: relation "reference_copy" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_reference_table @@ -281,11 +231,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_reference_table @@ -313,11 +258,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-flaky-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_reference_table @@ -343,11 +283,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_reference_table @@ -374,11 +309,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_reference_table @@ -406,11 +336,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_reference_table @@ -437,11 +362,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_reference_table @@ -465,11 +385,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_reference_table @@ -489,11 +404,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_reference_table @@ -520,11 +430,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_reference_table @@ -548,11 +453,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_reference_table @@ -580,11 +480,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count create_reference_table @@ -614,11 +509,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_reference_table @@ -637,11 +527,6 @@ count 11 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_reference_table @@ -661,11 +546,6 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_reference_table @@ -685,11 +565,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_reference_table @@ -709,11 +584,6 @@ count 9 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_reference_table @@ -733,11 +603,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_reference_table @@ -754,11 +619,6 @@ step s2-copy: <... completed> ERROR: relation "reference_copy" does not exist step s1-select-count: SELECT COUNT(*) FROM reference_copy; ERROR: relation "reference_copy" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table @@ -785,11 +645,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table @@ -817,11 +672,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table @@ -849,11 +699,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table @@ -881,11 +726,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table @@ -912,11 +752,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_reference_table @@ -940,11 +775,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_reference_table @@ -964,11 +794,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_reference_table @@ -995,8 +820,3 @@ count 15 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_reference_on_mx.out b/src/test/regress/expected/isolation_reference_on_mx.out index 4d7f1a51c..cbb130420 100644 --- a/src/test/regress/expected/isolation_reference_on_mx.out +++ b/src/test/regress/expected/isolation_reference_on_mx.out @@ -82,11 +82,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -170,11 +165,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -258,11 +248,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -345,11 +330,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -433,11 +413,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -520,11 +495,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -607,11 +577,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -695,11 +660,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection step s2-start-session-level-connection: @@ -752,11 +712,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-select-from-ref-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection step s2-start-session-level-connection: @@ -809,8 +764,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_replace_wait_function.out b/src/test/regress/expected/isolation_replace_wait_function.out index b36aca587..950200d8b 100644 --- a/src/test/regress/expected/isolation_replace_wait_function.out +++ b/src/test/regress/expected/isolation_replace_wait_function.out @@ -22,8 +22,3 @@ ERROR: duplicate key value violates unique constraint "test_locking_a_key_14000 step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_replicated_dist_on_mx.out b/src/test/regress/expected/isolation_replicated_dist_on_mx.out index 646783bae..e53bf0ca9 100644 --- a/src/test/regress/expected/isolation_replicated_dist_on_mx.out +++ b/src/test/regress/expected/isolation_replicated_dist_on_mx.out @@ -2,7 +2,7 @@ Parsed test spec with 2 sessions starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-1-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -10,7 +10,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -18,7 +18,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -26,7 +26,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -34,7 +34,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -42,7 +42,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -67,7 +67,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -75,22 +75,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-1-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -98,7 +93,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -106,7 +101,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -114,7 +109,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -122,7 +117,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -130,7 +125,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -155,7 +150,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -163,22 +158,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-all-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -186,7 +176,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -194,7 +184,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -202,7 +192,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -210,7 +200,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -218,7 +208,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -243,7 +233,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -251,22 +241,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-all-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -274,7 +259,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -282,7 +267,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -290,7 +275,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -298,7 +283,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -306,7 +291,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -331,7 +316,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -339,22 +324,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-1-from-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -362,7 +342,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -370,7 +350,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-delete-1-from-rep-table: - SELECT run_commands_on_session_level_connection_to_node('DELETE FROM replicated_table WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM replicated_table WHERE user_id = 1'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -378,7 +358,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -386,7 +366,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -394,7 +374,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -419,7 +399,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -427,22 +407,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-all-from-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -450,7 +425,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -458,7 +433,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-delete-all-from-rep-table: - SELECT run_commands_on_session_level_connection_to_node('DELETE FROM replicated_table'); + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM replicated_table'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -466,7 +441,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -474,7 +449,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -482,7 +457,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -507,7 +482,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -515,22 +490,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-1-from-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -538,7 +508,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -546,7 +516,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-delete-1-from-rep-table: - SELECT run_commands_on_session_level_connection_to_node('DELETE FROM replicated_table WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM replicated_table WHERE user_id = 1'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -554,7 +524,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -562,7 +532,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -570,7 +540,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -595,7 +565,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -603,22 +573,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-all-from-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -626,7 +591,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -634,7 +599,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-delete-all-from-rep-table: - SELECT run_commands_on_session_level_connection_to_node('DELETE FROM replicated_table'); + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM replicated_table'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -642,7 +607,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -650,7 +615,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -658,7 +623,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -683,7 +648,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -691,22 +656,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-1-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -714,7 +674,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -722,7 +682,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -730,7 +690,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -738,7 +698,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -746,7 +706,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -771,7 +731,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -779,22 +739,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-1-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -802,7 +757,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -810,7 +765,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -818,7 +773,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -826,7 +781,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -834,7 +789,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -859,7 +814,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -867,22 +822,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-all-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -890,7 +840,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -898,7 +848,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -906,7 +856,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -914,7 +864,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -922,7 +872,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -947,7 +897,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -955,22 +905,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-all-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -978,7 +923,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -986,7 +931,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -994,7 +939,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1002,7 +947,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1010,7 +955,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -1035,7 +980,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1043,22 +988,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-1-rep-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1066,7 +1006,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1074,7 +1014,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1082,7 +1022,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1090,7 +1030,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1098,7 +1038,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -1123,7 +1063,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1131,22 +1071,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-1-rep-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1154,7 +1089,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1162,7 +1097,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1170,7 +1105,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1178,7 +1113,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1186,7 +1121,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -1211,7 +1146,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1219,22 +1154,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-all-rep-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1242,7 +1172,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1250,7 +1180,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1258,7 +1188,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1266,7 +1196,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1274,7 +1204,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -1299,7 +1229,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1307,22 +1237,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-all-rep-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1330,7 +1255,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1338,7 +1263,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1346,7 +1271,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1354,7 +1279,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1362,7 +1287,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -1387,7 +1312,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1395,22 +1320,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-1-rep-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1418,7 +1338,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1426,7 +1346,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1434,7 +1354,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1442,7 +1362,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1450,7 +1370,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1474,7 +1394,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1482,22 +1402,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-1-rep-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1505,7 +1420,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1513,7 +1428,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1521,7 +1436,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1529,7 +1444,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1537,7 +1452,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1561,7 +1476,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1569,22 +1484,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-all-rep-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1592,7 +1502,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1600,7 +1510,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1608,7 +1518,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1616,7 +1526,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1624,7 +1534,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1648,7 +1558,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1656,22 +1566,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-all-rep-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1679,7 +1584,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1687,7 +1592,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1695,7 +1600,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1703,7 +1608,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1711,7 +1616,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1735,7 +1640,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1743,22 +1648,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-1-to-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1766,7 +1666,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1774,7 +1674,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-1-to-rep-table: - SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 1, 101 && echo 1, 111 && echo 1,1111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 1, 101 && echo 1, 111 && echo 1,1111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1782,7 +1682,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1790,7 +1690,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1798,7 +1698,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -1823,7 +1723,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1831,22 +1731,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-1-to-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1854,7 +1749,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1862,7 +1757,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-1-to-rep-table: - SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 1, 101 && echo 1, 111 && echo 1,1111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 1, 101 && echo 1, 111 && echo 1,1111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1870,7 +1765,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1878,7 +1773,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1886,7 +1781,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -1911,7 +1806,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1919,22 +1814,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-all-to-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1942,7 +1832,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1950,7 +1840,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-all-to-rep-table: - SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1958,7 +1848,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1966,7 +1856,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1974,7 +1864,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12 WHERE user_id = 1'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -1999,7 +1889,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -2007,22 +1897,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-all-to-rep-table s2-start-session-level-connection s2-begin-on-worker s2-update-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2030,7 +1915,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2038,7 +1923,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-all-to-rep-table: - SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2046,7 +1931,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2054,7 +1939,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2062,7 +1947,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -2087,7 +1972,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -2095,22 +1980,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-all-to-rep-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2118,7 +1998,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2126,7 +2006,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-all-to-rep-table: - SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2134,7 +2014,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2142,7 +2022,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2150,7 +2030,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2174,7 +2054,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -2182,22 +2062,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-1-to-rep-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2205,7 +2080,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2213,7 +2088,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-1-to-rep-table: - SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 1, 101 && echo 1, 111 && echo 1,1111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 1, 101 && echo 1, 111 && echo 1,1111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2221,7 +2096,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2229,7 +2104,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2237,7 +2112,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2261,7 +2136,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -2269,22 +2144,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-all-to-rep-table s2-start-session-level-connection s2-begin-on-worker s2-copy-all-to-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2292,7 +2162,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2300,7 +2170,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-all-to-rep-table: - SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2308,7 +2178,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2316,7 +2186,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2324,7 +2194,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-copy-all-to-rep-table: - SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2348,7 +2218,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -2356,22 +2226,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-all-to-rep-table s2-start-session-level-connection s2-begin-on-worker s2-select-from-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2379,7 +2244,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2387,7 +2252,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-all-to-rep-table: - SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY replicated_table FROM PROGRAM ''echo 10, 101 && echo 11, 111 && echo 11, 111 && echo 12, 111 && echo 13, 111 && echo 14, 111 && echo 15, 111 && echo 16, 111 && echo 17, 111 && echo 18, 111 && echo 19, 111 && echo 20, 111 && echo 21, 111 && echo 22, 111 && echo 23, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2395,7 +2260,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2403,7 +2268,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2411,7 +2276,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-select-from-rep-table: - SELECT run_commands_on_session_level_connection_to_node('SELECT count(*) FROM replicated_table'); + SELECT run_commands_on_session_level_connection_to_node('SELECT count(*) FROM replicated_table'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2435,7 +2300,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -2443,22 +2308,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-1-rep-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2466,7 +2326,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2474,7 +2334,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2482,10 +2342,10 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-alter-table: - ALTER TABLE replicated_table ADD COLUMN x INT; + ALTER TABLE replicated_table ADD COLUMN x INT; step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -2497,25 +2357,20 @@ run_commands_on_session_level_connection_to_node step s1-alter-table: <... completed> step s1-commit: - COMMIT; + COMMIT; step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-select-from-rep-table s1-begin s1-alter-table s2-commit-worker s1-commit s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2523,7 +2378,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2531,7 +2386,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-select-from-rep-table: - SELECT run_commands_on_session_level_connection_to_node('SELECT count(*) FROM replicated_table'); + SELECT run_commands_on_session_level_connection_to_node('SELECT count(*) FROM replicated_table'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2539,10 +2394,10 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-alter-table: - ALTER TABLE replicated_table ADD COLUMN x INT; + ALTER TABLE replicated_table ADD COLUMN x INT; step s2-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -2554,25 +2409,20 @@ run_commands_on_session_level_connection_to_node step s1-alter-table: <... completed> step s1-commit: - COMMIT; + COMMIT; step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-select s2-start-session-level-connection s2-begin-on-worker s2-insert-into-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2580,7 +2430,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2588,7 +2438,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-select: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table SELECT * FROM replicated_table_2'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table SELECT * FROM replicated_table_2'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2596,7 +2446,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2604,7 +2454,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2612,7 +2462,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(8,81),(9,91),(10,91),(11,91),(12,91), (13,91), (14,91), (15,91), (16,91), (17,91), (18,91), (19,91), (20,91)'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -2637,7 +2487,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -2645,22 +2495,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-select s2-start-session-level-connection s2-begin-on-worker s2-insert-into-1-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2668,7 +2513,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2676,7 +2521,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-select: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table SELECT * FROM replicated_table_2'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table SELECT * FROM replicated_table_2'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2684,7 +2529,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2692,7 +2537,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2700,7 +2545,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-1-rep-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table VALUES(1,81)'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -2725,7 +2570,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -2733,22 +2578,17 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-select-from-single-rep s2-start-session-level-connection s2-begin-on-worker s2-update-all-single-rep-table s2-update-all-rep-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2756,7 +2596,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2764,7 +2604,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-select-from-single-rep: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table SELECT * FROM single_replicated_table LIMIT 10'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO replicated_table SELECT * FROM single_replicated_table LIMIT 10'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2772,7 +2612,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -2780,7 +2620,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2788,7 +2628,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-all-single-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE single_replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE single_replicated_table SET value_1 = 12'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -2796,7 +2636,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-all-rep-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE replicated_table SET value_1 = 12'); step s1-commit-worker: SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -2821,7 +2661,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -2829,15 +2669,10 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_select_for_update.out b/src/test/regress/expected/isolation_select_for_update.out index a59f7fe03..9965497f2 100644 --- a/src/test/regress/expected/isolation_select_for_update.out +++ b/src/test/regress/expected/isolation_select_for_update.out @@ -29,11 +29,6 @@ step s2-update-t1: <... completed> step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-t1-t2-for-share s2-begin s2-delete-t1 s1-commit s2-commit step s1-begin: @@ -64,11 +59,6 @@ step s2-delete-t1: <... completed> step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-t1-rt-for-update s2-begin s2-update-t1 s1-commit s2-commit step s1-begin: @@ -99,11 +89,6 @@ step s2-update-t1: <... completed> step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-t1-rt-with-lc-for-update s2-begin s2-update-rt s1-commit s2-commit step s1-begin: @@ -135,11 +120,6 @@ step s2-update-rt: <... completed> step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-t1-rt-with-lc-for-update s2-begin s2-update-t1 s1-commit s2-commit step s1-begin: @@ -170,11 +150,6 @@ step s1-commit: step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-t1-t2-for-share s2-begin s2-select-from-t1-t2-for-share s1-commit s2-commit step s1-begin: @@ -213,11 +188,6 @@ step s1-commit: step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-t1-rt-for-update s2-begin s2-select-from-t1-t2-for-update s1-commit s2-commit step s1-begin: @@ -257,11 +227,6 @@ id|val_1|id|val_1 step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-t1-within-cte s2-begin s2-select-from-t1-t2-for-update s1-commit s2-commit step s1-begin: @@ -298,11 +263,6 @@ id|val_1|id|val_1 step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-t1-within-cte s2-begin s2-update-t1 s1-commit s2-commit step s1-begin: @@ -330,11 +290,6 @@ step s2-update-t1: <... completed> step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-t1-with-subquery s2-begin s2-update-t1 s1-commit s2-commit step s1-begin: @@ -365,11 +320,6 @@ step s2-update-t1: <... completed> step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-rt-with-subquery s2-begin s2-update-rt s1-commit s2-commit step s1-begin: @@ -396,11 +346,6 @@ step s2-update-rt: <... completed> step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-select-from-t1-with-view s2-begin s2-update-t1 s1-commit s2-commit step s1-begin: @@ -427,11 +372,6 @@ step s2-update-t1: <... completed> step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-update-rt-with-cte-select-from-rt s2-begin s2-update-rt s1-commit s2-commit step s1-begin: @@ -454,8 +394,3 @@ step s2-update-rt: <... completed> step s2-commit: COMMIT; -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_select_vs_all.out b/src/test/regress/expected/isolation_select_vs_all.out index 06f8dc332..ba7c00d1e 100644 --- a/src/test/regress/expected/isolation_select_vs_all.out +++ b/src/test/regress/expected/isolation_select_vs_all.out @@ -27,11 +27,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -64,11 +59,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -104,11 +94,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -141,11 +126,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -182,11 +162,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -226,11 +201,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -266,11 +236,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -310,11 +275,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -357,11 +317,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-insert s1-commit s1-select-count master_create_empty_shard @@ -385,11 +340,6 @@ count 6 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-insert-select s1-commit s1-select-count master_create_empty_shard @@ -413,11 +363,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-update s1-commit s1-select-count master_create_empty_shard @@ -441,11 +386,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-delete s1-commit s1-select-count master_create_empty_shard @@ -469,11 +409,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-truncate s1-commit s1-select-count master_create_empty_shard @@ -498,11 +433,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-drop s1-commit s1-select-count master_create_empty_shard @@ -523,11 +453,6 @@ step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -558,11 +483,6 @@ run_command_on_workers (localhost,57638,t,1) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-router-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -595,11 +515,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-disable-binary-protocol s1-router-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -634,11 +549,6 @@ run_command_on_workers (localhost,57638,t,1) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -670,11 +580,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-router-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -707,11 +612,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -743,11 +643,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-table-size s1-commit s1-select-count master_create_empty_shard @@ -776,11 +671,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-router-select s2-master-modify-multiple-shards s1-commit s1-select-count master_create_empty_shard @@ -804,11 +694,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-master-drop-all-shards s1-commit s1-select-count master_create_empty_shard @@ -831,11 +716,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-router-select s2-distribute-table s1-commit s1-select-count master_create_empty_shard @@ -864,11 +744,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-insert s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -892,11 +767,6 @@ count 6 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-insert-select s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -920,11 +790,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-update s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -948,11 +813,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-delete s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -976,11 +836,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-truncate s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -1004,11 +859,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-drop s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -1025,11 +875,6 @@ step s2-router-select: <... completed> ERROR: relation "select_append" does not exist step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-router-select s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -1060,11 +905,6 @@ run_command_on_workers (localhost,57638,t,1) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-router-select s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -1097,11 +937,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-router-select s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -1133,11 +968,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-router-select s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -1170,11 +1000,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-router-select s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -1206,11 +1031,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-table-size s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -1239,11 +1059,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -1267,11 +1082,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s1-commit s1-select-count master_create_empty_shard @@ -1294,11 +1104,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-router-select s1-commit s1-select-count master_create_empty_shard @@ -1327,11 +1132,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert s1-commit s1-select-count master_create_empty_shard @@ -1359,11 +1159,6 @@ count 6 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert-select s1-commit s1-select-count master_create_empty_shard @@ -1391,11 +1186,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-update s1-commit s1-select-count master_create_empty_shard @@ -1423,11 +1213,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-delete s1-commit s1-select-count master_create_empty_shard @@ -1455,11 +1240,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-truncate s1-commit s1-select-count master_create_empty_shard @@ -1488,11 +1268,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-drop s1-commit s1-select-count master_create_empty_shard @@ -1517,11 +1292,6 @@ step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -1556,11 +1326,6 @@ run_command_on_workers (localhost,57638,t,1) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-real-time-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -1597,11 +1362,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -1636,11 +1396,6 @@ run_command_on_workers (localhost,57638,t,1) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -1676,11 +1431,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-real-time-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -1717,11 +1467,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -1757,11 +1502,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-table-size s1-commit s1-select-count master_create_empty_shard @@ -1794,11 +1534,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-real-time-select s2-master-modify-multiple-shards s1-commit s1-select-count master_create_empty_shard @@ -1826,11 +1561,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-real-time-select s2-distribute-table s1-commit s1-select-count master_create_empty_shard @@ -1859,11 +1589,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-insert s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -1891,11 +1616,6 @@ count 6 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-insert-select s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -1923,11 +1643,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-update s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -1955,11 +1670,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-delete s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -1987,11 +1697,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-truncate s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -2015,11 +1720,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-drop s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -2036,11 +1736,6 @@ step s2-real-time-select: <... completed> ERROR: relation "select_append" does not exist step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-real-time-select s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -2075,11 +1770,6 @@ run_command_on_workers (localhost,57638,t,1) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-real-time-select s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -2116,11 +1806,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-real-time-select s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -2156,11 +1841,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-real-time-select s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -2197,11 +1877,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-real-time-select s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -2237,11 +1912,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-table-size s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -2274,11 +1944,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -2306,11 +1971,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-real-time-select s1-commit s1-select-count master_create_empty_shard @@ -2339,11 +1999,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-insert s1-commit s1-select-count master_create_empty_shard @@ -2374,11 +2029,6 @@ count 6 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-insert-select s1-commit s1-select-count master_create_empty_shard @@ -2409,11 +2059,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-update s1-commit s1-select-count master_create_empty_shard @@ -2444,11 +2089,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-delete s1-commit s1-select-count master_create_empty_shard @@ -2479,11 +2119,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-truncate s1-commit s1-select-count master_create_empty_shard @@ -2515,11 +2150,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-drop s1-commit s1-select-count master_create_empty_shard @@ -2547,11 +2177,6 @@ step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -2589,11 +2214,6 @@ run_command_on_workers (localhost,57638,t,1) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-adaptive-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -2633,11 +2253,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -2675,11 +2290,6 @@ run_command_on_workers (localhost,57638,t,1) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -2718,11 +2328,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-adaptive-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -2762,11 +2367,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -2805,11 +2405,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-table-size s1-commit s1-select-count master_create_empty_shard @@ -2845,11 +2440,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-adaptive-select s2-master-modify-multiple-shards s1-commit s1-select-count master_create_empty_shard @@ -2880,11 +2470,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-adaptive-select s2-distribute-table s1-commit s1-select-count master_create_empty_shard @@ -2916,11 +2501,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-insert s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -2951,11 +2531,6 @@ count 6 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-insert-select s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -2986,11 +2561,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-update s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -3021,11 +2591,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-delete s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -3056,11 +2621,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-truncate s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -3087,11 +2647,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-drop s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -3111,11 +2666,6 @@ step s2-adaptive-select: <... completed> ERROR: relation "select_append" does not exist step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-adaptive-select s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -3153,11 +2703,6 @@ run_command_on_workers (localhost,57638,t,1) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-adaptive-select s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -3197,11 +2742,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-adaptive-select s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -3240,11 +2780,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-adaptive-select s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -3284,11 +2819,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-adaptive-select s1-commit s1-select-count s1-show-columns master_create_empty_shard @@ -3327,11 +2857,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-table-size s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -3367,11 +2892,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -3402,11 +2922,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-adaptive-select s1-commit s1-select-count master_create_empty_shard @@ -3438,8 +2953,3 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_select_vs_all_on_mx.out b/src/test/regress/expected/isolation_select_vs_all_on_mx.out index 90c296404..519401246 100644 --- a/src/test/regress/expected/isolation_select_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_select_vs_all_on_mx.out @@ -81,11 +81,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-insert-select s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -176,11 +171,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-delete s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -271,11 +261,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -366,11 +351,6 @@ count 7 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-begin s2-index s1-commit-worker s2-commit s1-stop-connection step s1-start-session-level-connection: @@ -422,11 +402,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: @@ -509,11 +484,6 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-disable-binary-protocol-on-worker s1-select s2-flaky-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection step s1-start-session-level-connection: @@ -568,8 +538,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_setup.out b/src/test/regress/expected/isolation_setup.out new file mode 100644 index 000000000..c551354cb --- /dev/null +++ b/src/test/regress/expected/isolation_setup.out @@ -0,0 +1,19 @@ +Parsed test spec with 1 sessions + +starting permutation: setup +step setup: + -- Replace pg_isolation_test_session_is_blocked so that all isolation tests are run with Citus implementation. + -- + -- Vanilla PG only checks for local blocks, whereas citus implementation also checks worker jobs in distributed + -- transactions. + -- + -- We have some tests that do not produce deterministic outputs when we use the Citus UDFs. They restore this + -- function in the setup phase and replace it again on the teardown phase so that the remainder of the tests can + -- keep using the Citus alternatives. Those tests should never be run concurrently with other isolation tests. + SELECT citus_internal.replace_isolation_tester_func(); + +replace_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/isolation_shard_rebalancer_progress.out b/src/test/regress/expected/isolation_shard_rebalancer_progress.out index 992e95057..b03b7b99d 100644 --- a/src/test/regress/expected/isolation_shard_rebalancer_progress.out +++ b/src/test/regress/expected/isolation_shard_rebalancer_progress.out @@ -144,8 +144,3 @@ table_name|shardid|shard_size|sourcename|sourceport|source_shard_size|targetname --------------------------------------------------------------------- (0 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_truncate_vs_all.out b/src/test/regress/expected/isolation_truncate_vs_all.out index 7514be591..d323c3f54 100644 --- a/src/test/regress/expected/isolation_truncate_vs_all.out +++ b/src/test/regress/expected/isolation_truncate_vs_all.out @@ -20,11 +20,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count master_create_empty_shard @@ -46,11 +41,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-drop s1-commit s2-commit s1-select-count master_create_empty_shard @@ -68,11 +58,6 @@ step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; ERROR: relation "truncate_append" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -101,11 +86,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-truncate s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -135,11 +115,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-truncate s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -166,11 +141,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns master_create_empty_shard @@ -199,11 +169,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-truncate s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns master_create_empty_shard @@ -233,11 +198,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns master_create_empty_shard @@ -266,11 +226,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-table-size s1-commit s2-commit s1-select-count master_create_empty_shard @@ -297,11 +252,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count master_create_empty_shard @@ -323,11 +273,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-drop-all-shards s1-commit s2-commit s1-select-count master_create_empty_shard @@ -354,11 +299,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-truncate s2-distribute-table s1-commit s2-commit s1-select-count master_create_empty_shard @@ -386,11 +326,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count master_create_empty_shard @@ -412,11 +347,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-truncate s1-commit s2-commit s1-select-count master_create_empty_shard @@ -435,11 +365,6 @@ ERROR: relation "truncate_append" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; ERROR: relation "truncate_append" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -468,11 +393,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes master_create_empty_shard @@ -502,11 +422,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns master_create_empty_shard @@ -535,11 +450,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns master_create_empty_shard @@ -569,11 +479,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns master_create_empty_shard @@ -602,11 +507,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-truncate s1-commit s2-commit s1-select-count master_create_empty_shard @@ -632,11 +532,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-truncate s1-commit s2-commit s1-select-count master_create_empty_shard @@ -658,11 +553,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-master-drop-all-shards s2-truncate s1-commit s2-commit s1-select-count master_create_empty_shard @@ -689,11 +579,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-distribute-table s2-truncate s1-commit s2-commit s1-select-count master_create_empty_shard @@ -721,8 +606,3 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out b/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out index 5a4b244f0..e3baf9e89 100644 --- a/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_truncate_vs_all_on_mx.out @@ -90,11 +90,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -186,11 +181,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-select s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -282,11 +272,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -378,11 +363,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -474,11 +454,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-alter s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit s2-commit-worker s2-stop-connection s3-select-count step s1-begin: @@ -539,11 +514,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-truncate s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count step s1-start-session-level-connection: @@ -635,11 +605,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-truncate s3-select-count-from-ref-table s1-commit-worker s1-stop-connection step s1-start-session-level-connection: @@ -691,8 +656,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_undistribute_table.out b/src/test/regress/expected/isolation_undistribute_table.out index 4048f0f52..b972001f3 100644 --- a/src/test/regress/expected/isolation_undistribute_table.out +++ b/src/test/regress/expected/isolation_undistribute_table.out @@ -20,11 +20,6 @@ step s1-commit: step s2-undistribute: <... completed> ERROR: cannot complete operation because no such table exists -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-undistribute s2-select s1-commit step s1-begin: @@ -52,11 +47,6 @@ a|b 5|6 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-undistribute s2-insert s1-commit s2-select step s1-begin: @@ -89,11 +79,6 @@ a| b 9|10 (5 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-undistribute s2-insert-select s1-commit s2-select step s1-begin: @@ -127,11 +112,6 @@ a|b 5|6 (6 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-undistribute s2-delete s1-commit s2-select step s1-begin: @@ -161,11 +141,6 @@ a|b 5|6 (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-undistribute s2-copy s1-commit s2-select step s1-begin: @@ -198,11 +173,6 @@ step s2-select: 13|14 (5 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-undistribute s2-drop s1-commit s2-select step s1-begin: @@ -227,11 +197,6 @@ step s2-select: SELECT * FROM dist_table ORDER BY 1, 2; ERROR: relation "dist_table" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-undistribute s2-truncate s1-commit s2-select step s1-begin: @@ -264,11 +229,6 @@ a|b 5|6 (3 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-undistribute s2-select-for-update s1-commit step s1-begin: @@ -294,11 +254,6 @@ a|b 5|6 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-begin s1-undistribute s2-create-index-concurrently s1-commit step s1-begin: @@ -319,8 +274,3 @@ step s1-commit: COMMIT; step s2-create-index-concurrently: <... completed> -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out b/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out index ae3223ca7..c3badcec4 100644 --- a/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_update_delete_upsert_vs_all_on_mx.out @@ -94,11 +94,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete s2-start-session-level-connection s2-begin-on-worker s2-copy s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_distributed_table @@ -194,11 +189,6 @@ count 3 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-alter-table s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection s3-select-count create_distributed_table @@ -290,11 +280,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update s2-start-session-level-connection s2-begin-on-worker s2-select-for-update s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection create_distributed_table @@ -382,8 +367,3 @@ stop_session_level_connection_to_node (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_update_node.out b/src/test/regress/expected/isolation_update_node.out index ff2b76f00..86615648c 100644 --- a/src/test/regress/expected/isolation_update_node.out +++ b/src/test/regress/expected/isolation_update_node.out @@ -8,7 +8,7 @@ nodeid|nodename |nodeport (2 rows) step s1-begin: - BEGIN; + BEGIN; step s1-update-node-1: SELECT 1 FROM master_update_node( @@ -28,7 +28,7 @@ step s2-update-node-2: 58638); step s1-commit: - COMMIT; + COMMIT; step s2-update-node-2: <... completed> ?column? @@ -98,7 +98,7 @@ nodeid|nodename |nodeport (2 rows) step s1-begin: - BEGIN; + BEGIN; step s1-update-node-1: SELECT 1 FROM master_update_node( @@ -112,7 +112,7 @@ step s1-update-node-1: (1 row) step s2-begin: - BEGIN; + BEGIN; step s2-update-node-1: SELECT 1 FROM master_update_node( @@ -121,7 +121,7 @@ step s2-update-node-1: 58637); step s1-commit: - COMMIT; + COMMIT; step s2-update-node-1: <... completed> ?column? @@ -130,7 +130,7 @@ step s2-update-node-1: <... completed> (1 row) step s2-abort: - ABORT; + ABORT; step s1-show-nodes: SELECT nodeid, nodename, nodeport, isactive @@ -192,7 +192,7 @@ create_distributed_table (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-update-node-nonexistent: SELECT 1 FROM master_update_node( diff --git a/src/test/regress/expected/isolation_update_vs_all.out b/src/test/regress/expected/isolation_update_vs_all.out index f35f28317..54cf4ce02 100644 --- a/src/test/regress/expected/isolation_update_vs_all.out +++ b/src/test/regress/expected/isolation_update_vs_all.out @@ -20,11 +20,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-update-cte s1-commit s2-commit s1-select-count create_distributed_table @@ -56,11 +51,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update s2-delete s1-commit s2-commit s1-select-count create_distributed_table @@ -82,11 +72,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update s2-truncate s1-commit s2-commit s1-select-count create_distributed_table @@ -108,11 +93,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update s2-drop s1-commit s2-commit s1-select-count create_distributed_table @@ -130,11 +110,6 @@ step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; ERROR: relation "update_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -163,11 +138,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-update s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -197,11 +167,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-update s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -228,11 +193,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -261,11 +221,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-update s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -295,11 +250,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -328,11 +278,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update s2-table-size s1-commit s2-commit s1-select-count create_distributed_table @@ -358,11 +303,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table @@ -384,11 +324,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-update s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table @@ -417,11 +352,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-delete s1-commit s2-commit s1-select-count create_distributed_table @@ -448,11 +378,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-truncate s1-commit s2-commit s1-select-count create_distributed_table @@ -479,11 +404,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-drop s1-commit s2-commit s1-select-count create_distributed_table @@ -506,11 +426,6 @@ step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; ERROR: relation "update_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -544,11 +459,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-update-cte s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -583,11 +493,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-update-cte s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -619,11 +524,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -657,11 +557,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-update-cte s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -696,11 +591,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -734,11 +624,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-table-size s1-commit s2-commit s1-select-count create_distributed_table @@ -769,11 +654,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update-cte s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table @@ -800,11 +680,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-update s1-commit s2-commit s1-select-count create_distributed_table @@ -826,11 +701,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-update s1-commit s2-commit s1-select-count create_distributed_table @@ -852,11 +722,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-update s1-commit s2-commit s1-select-count create_distributed_table @@ -875,11 +740,6 @@ ERROR: relation "update_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; ERROR: relation "update_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-update s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -908,11 +768,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-update s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -942,11 +797,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -975,11 +825,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -1009,11 +854,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -1043,11 +883,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-update s1-commit s2-commit s1-select-count create_distributed_table @@ -1073,11 +908,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-update s1-commit s2-commit s1-select-count create_distributed_table @@ -1099,11 +929,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-update s1-commit s2-commit s1-select-count create_distributed_table @@ -1132,11 +957,6 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-update-cte s1-commit s2-commit s1-select-count create_distributed_table @@ -1162,11 +982,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-update-cte s1-commit s2-commit s1-select-count create_distributed_table @@ -1192,11 +1007,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-update-cte s1-commit s2-commit s1-select-count create_distributed_table @@ -1215,11 +1025,6 @@ ERROR: relation "update_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; ERROR: relation "update_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-update-cte s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -1253,11 +1058,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-update-cte s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -1292,11 +1092,6 @@ run_command_on_workers (localhost,57638,t,0) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-update-cte s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -1330,11 +1125,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-update-cte s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -1369,11 +1159,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-update-cte s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -1403,11 +1188,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-update-cte s1-commit s2-commit s1-select-count create_distributed_table @@ -1438,11 +1218,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-update-cte s1-commit s2-commit s1-select-count create_distributed_table @@ -1468,11 +1243,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-update-cte s1-commit s2-commit s1-select-count create_distributed_table @@ -1507,8 +1277,3 @@ count 10 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_upsert_vs_all.out b/src/test/regress/expected/isolation_upsert_vs_all.out index dfad2f3c1..017df63de 100644 --- a/src/test/regress/expected/isolation_upsert_vs_all.out +++ b/src/test/regress/expected/isolation_upsert_vs_all.out @@ -20,11 +20,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-update s1-commit s2-commit s1-select-count create_distributed_table @@ -46,11 +41,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-delete s1-commit s2-commit s1-select-count create_distributed_table @@ -72,11 +62,6 @@ count 4 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-truncate s1-commit s2-commit s1-select-count create_distributed_table @@ -98,11 +83,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-drop s1-commit s2-commit s1-select-count create_distributed_table @@ -120,11 +100,6 @@ step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; ERROR: relation "upsert_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -153,11 +128,6 @@ run_command_on_workers (localhost,57638,t,4) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-upsert s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -187,11 +157,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s1-upsert s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -218,11 +183,6 @@ run_command_on_workers (localhost,57638,t,4) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -251,11 +211,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-upsert s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -285,11 +240,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -318,11 +268,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-table-size s1-commit s2-commit s1-select-count create_distributed_table @@ -348,11 +293,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table @@ -374,11 +314,6 @@ count 0 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-upsert s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table @@ -407,11 +342,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-update s2-upsert s1-commit s2-commit s1-select-count create_distributed_table @@ -433,11 +363,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-upsert s1-commit s2-commit s1-select-count create_distributed_table @@ -459,11 +384,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-upsert s1-commit s2-commit s1-select-count create_distributed_table @@ -485,11 +405,6 @@ count 1 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-upsert s1-commit s2-commit s1-select-count create_distributed_table @@ -508,11 +423,6 @@ ERROR: relation "upsert_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; ERROR: relation "upsert_hash" does not exist -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-upsert s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -541,11 +451,6 @@ run_command_on_workers (localhost,57638,t,4) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-upsert s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table @@ -575,11 +480,6 @@ run_command_on_workers (localhost,57638,t,2) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -608,11 +508,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -642,11 +537,6 @@ run_command_on_workers (localhost,57638,t,"") (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table @@ -676,11 +566,6 @@ run_command_on_workers (localhost,57638,t,new_column) (2 rows) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-upsert s1-commit s2-commit s1-select-count create_distributed_table @@ -706,11 +591,6 @@ count 5 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-upsert s1-commit s2-commit s1-select-count create_distributed_table @@ -732,8 +612,3 @@ count 1 (1 row) -restore_isolation_tester_func ---------------------------------------------------------------------- - -(1 row) - diff --git a/src/test/regress/expected/isolation_vacuum_skip_locked.out b/src/test/regress/expected/isolation_vacuum_skip_locked.out new file mode 100644 index 000000000..5e27966ee --- /dev/null +++ b/src/test/regress/expected/isolation_vacuum_skip_locked.out @@ -0,0 +1,171 @@ +Parsed test spec with 2 sessions + +starting permutation: lock_share vac_specified commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_specified: VACUUM (SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_share vac_all_parts commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +step vac_all_parts: VACUUM (SKIP_LOCKED) parted; +step commit: + COMMIT; + + +starting permutation: lock_share analyze_specified commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +s2: WARNING: skipping analyze of "part1" --- lock not available +step analyze_specified: ANALYZE (SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_share analyze_all_parts commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +step analyze_all_parts: ANALYZE (SKIP_LOCKED) parted; +step commit: + COMMIT; + + +starting permutation: lock_share vac_analyze_specified commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_analyze_specified: VACUUM (ANALYZE, SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_share vac_analyze_all_parts commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +step vac_analyze_all_parts: VACUUM (ANALYZE, SKIP_LOCKED) parted; +step commit: + COMMIT; + + +starting permutation: lock_share vac_full_specified commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_full_specified: VACUUM (SKIP_LOCKED, FULL) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_share vac_full_all_parts commit +step lock_share: + BEGIN; + LOCK part1 IN SHARE MODE; + +step vac_full_all_parts: VACUUM (SKIP_LOCKED, FULL) parted; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive vac_specified commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_specified: VACUUM (SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive vac_all_parts commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +step vac_all_parts: VACUUM (SKIP_LOCKED) parted; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive analyze_specified commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +s2: WARNING: skipping analyze of "part1" --- lock not available +step analyze_specified: ANALYZE (SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive analyze_all_parts commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +step analyze_all_parts: ANALYZE (SKIP_LOCKED) parted; +step commit: + COMMIT; + +step analyze_all_parts: <... completed> + +starting permutation: lock_access_exclusive vac_analyze_specified commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_analyze_specified: VACUUM (ANALYZE, SKIP_LOCKED) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive vac_analyze_all_parts commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +step vac_analyze_all_parts: VACUUM (ANALYZE, SKIP_LOCKED) parted; +step commit: + COMMIT; + +step vac_analyze_all_parts: <... completed> + +starting permutation: lock_access_exclusive vac_full_specified commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +s2: WARNING: skipping vacuum of "part1" --- lock not available +step vac_full_specified: VACUUM (SKIP_LOCKED, FULL) part1, part2; +step commit: + COMMIT; + + +starting permutation: lock_access_exclusive vac_full_all_parts commit +step lock_access_exclusive: + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; + +step vac_full_all_parts: VACUUM (SKIP_LOCKED, FULL) parted; +step commit: + COMMIT; + diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index cfc93165a..a7b75811d 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -426,20 +426,20 @@ SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDE ALTER EXTENSION citus UPDATE TO '9.4-2'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -467,20 +467,20 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '9.4-1'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -563,20 +563,20 @@ SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDE ALTER EXTENSION citus UPDATE TO '9.5-2'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -604,20 +604,20 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '9.5-1'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -1031,6 +1031,15 @@ SELECT * FROM multi_extension.print_extension_changes(); | view citus_stat_activity (41 rows) +-- Test downgrade to 11.0-1 from 11.0-2 +ALTER EXTENSION citus UPDATE TO '11.0-2'; +ALTER EXTENSION citus UPDATE TO '11.0-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + -- Snapshot of state at 11.0-2 ALTER EXTENSION citus UPDATE TO '11.0-2'; SELECT * FROM multi_extension.print_extension_changes(); @@ -1042,9 +1051,26 @@ SELECT * FROM multi_extension.print_extension_changes(); | function start_metadata_sync_to_all_nodes() boolean (4 rows) --- Test downgrade script (result should be empty) -ALTER EXTENSION citus UPDATE TO '11.0-1'; +-- Test downgrade to 11.0-2 from 11.0-3 +ALTER EXTENSION citus UPDATE TO '11.0-3'; ALTER EXTENSION citus UPDATE TO '11.0-2'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Snapshot of state at 11.0-3 +ALTER EXTENSION citus UPDATE TO '11.0-3'; +SELECT * FROM multi_extension.print_extension_changes(); + previous_object | current_object +--------------------------------------------------------------------- +(0 rows) + +-- Test downgrade to 11.0-3 from 11.1-1 +ALTER EXTENSION citus UPDATE TO '11.1-1'; +ALTER EXTENSION citus UPDATE TO '11.0-3'; +-- Should be empty result since upgrade+downgrade should be a no-op SELECT * FROM multi_extension.print_extension_changes(); previous_object | current_object --------------------------------------------------------------------- @@ -1069,28 +1095,24 @@ SELECT * FROM multi_extension.print_extension_changes(); table columnar.chunk_group | table columnar.options | table columnar.stripe | + | function citus_split_shard_by_split_points(bigint,text[],integer[],citus.split_mode) void | function columnar.get_storage_id(regclass) bigint | function columnar_internal.columnar_handler(internal) table_am_handler + | function worker_split_copy(bigint,citus.split_copy_info[]) void | schema columnar_internal | sequence columnar_internal.storageid_seq | table columnar_internal.chunk | table columnar_internal.chunk_group | table columnar_internal.options | table columnar_internal.stripe + | type citus.split_copy_info + | type citus.split_mode | view columnar.chunk | view columnar.chunk_group | view columnar.options | view columnar.storage | view columnar.stripe -(27 rows) - --- Test downgrade script (result should be empty) -ALTER EXTENSION citus UPDATE TO '11.0-2'; -ALTER EXTENSION citus UPDATE TO '11.1-1'; -SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) +(31 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/multi_size_queries.out b/src/test/regress/expected/multi_size_queries.out index 4f1df941c..97036b1db 100644 --- a/src/test/regress/expected/multi_size_queries.out +++ b/src/test/regress/expected/multi_size_queries.out @@ -75,7 +75,7 @@ SELECT citus_table_size('customer_copy_hash'), citus_table_size('supplier'); citus_table_size | citus_table_size | citus_table_size --------------------------------------------------------------------- - 548864 | 548864 | 425984 + 548864 | 548864 | 442368 (1 row) CREATE INDEX index_1 on customer_copy_hash(c_custkey); diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index 725c307da..5849eb52f 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -246,10 +246,6 @@ WHERE tablename = 'dustbunnies_990002' ORDER BY attname; \c - - :master_host :master_port SET citus.log_remote_commands TO ON; --- verify warning for unqualified VACUUM -VACUUM; -WARNING: not propagating VACUUM command to worker nodes -HINT: Provide a specific table in order to VACUUM distributed tables. -- check for multiple table vacuum VACUUM dustbunnies, second_dustbunnies; NOTICE: issuing VACUUM public.dustbunnies_990002 @@ -260,14 +256,10 @@ NOTICE: issuing VACUUM public.second_dustbunnies_990003 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing VACUUM public.second_dustbunnies_990003 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx --- and warning when using targeted VACUUM without DDL propagation +-- and do not propagate when using targeted VACUUM without DDL propagation SET citus.enable_ddl_propagation to false; VACUUM dustbunnies; -WARNING: not propagating VACUUM command to worker nodes -HINT: Set citus.enable_ddl_propagation to true in order to send targeted VACUUM commands to worker nodes. ANALYZE dustbunnies; -WARNING: not propagating ANALYZE command to worker nodes -HINT: Set citus.enable_ddl_propagation to true in order to send targeted ANALYZE commands to worker nodes. SET citus.enable_ddl_propagation to DEFAULT; -- test worker_hash SELECT worker_hash(123); @@ -314,3 +306,269 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx 1 (1 row) +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 970000; +SET citus.log_remote_commands TO OFF; +CREATE TABLE local_vacuum_table(id int primary key, b text); +CREATE TABLE reference_vacuum_table(id int); +SELECT create_reference_table('reference_vacuum_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE distributed_vacuum_table(id int); +SELECT create_distributed_table('distributed_vacuum_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SET citus.log_remote_commands TO ON; +-- should propagate to all workers because no table is specified +VACUUM; +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'on' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'on' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- should not propagate because no distributed table is specified +insert into local_vacuum_table select i from generate_series(1,1000000) i; +delete from local_vacuum_table; +VACUUM local_vacuum_table; +SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') ); + pg_size_pretty +--------------------------------------------------------------------- + 21 MB +(1 row) + +-- vacuum full deallocates pages of dead tuples whereas normal vacuum only marks dead tuples on visibility map +VACUUM FULL local_vacuum_table; +SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') ); + pg_size_pretty +--------------------------------------------------------------------- + 16 kB +(1 row) + +-- should propagate to all workers because table is reference table +VACUUM reference_vacuum_table; +NOTICE: issuing VACUUM public.reference_vacuum_table_970000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM public.reference_vacuum_table_970000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- should propagate to all workers because table is distributed table +VACUUM distributed_vacuum_table; +NOTICE: issuing VACUUM public.distributed_vacuum_table_970001 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- only distributed_vacuum_table and reference_vacuum_table should propagate +VACUUM distributed_vacuum_table, local_vacuum_table, reference_vacuum_table; +NOTICE: issuing VACUUM public.distributed_vacuum_table_970001 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM public.reference_vacuum_table_970000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM public.reference_vacuum_table_970000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- only reference_vacuum_table should propagate +VACUUM local_vacuum_table, reference_vacuum_table; +NOTICE: issuing VACUUM public.reference_vacuum_table_970000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM public.reference_vacuum_table_970000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- vacuum (disable_page_skipping) aggressively process pages of the relation, it does not respect visibility map +VACUUM (DISABLE_PAGE_SKIPPING true) local_vacuum_table; +VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table; +-- vacuum (index_cleanup on, parallel 1) should execute index vacuuming and index cleanup phases in parallel +insert into local_vacuum_table select i from generate_series(1,1000000) i; +delete from local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') ); + pg_size_pretty +--------------------------------------------------------------------- + 56 MB +(1 row) + +insert into local_vacuum_table select i from generate_series(1,1000000) i; +delete from local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') ); + pg_size_pretty +--------------------------------------------------------------------- + 21 MB +(1 row) + +-- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true) +insert into local_vacuum_table select i from generate_series(1,1000000) i; +delete from local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; +SELECT pg_total_relation_size('local_vacuum_table') as size1 \gset +insert into local_vacuum_table select i from generate_series(1,1000000) i; +delete from local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; +SELECT pg_total_relation_size('local_vacuum_table') as size2 \gset +SELECT :size1 > :size2 as truncate_less_size; + truncate_less_size +--------------------------------------------------------------------- + t +(1 row) + +-- vacuum (analyze) should be analyzing the table to generate statistics after vacuuming +select analyze_count from pg_stat_all_tables where relname = 'local_vacuum_table' or relname = 'reference_vacuum_table'; + analyze_count +--------------------------------------------------------------------- + 0 + 0 +(2 rows) + +vacuum (analyze) local_vacuum_table, reference_vacuum_table; +NOTICE: issuing VACUUM (ANALYZE) public.reference_vacuum_table_970000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM (ANALYZE) public.reference_vacuum_table_970000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- give enough time for stats to be updated.(updated per 500ms by default) +select pg_sleep(1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +select analyze_count from pg_stat_all_tables where relname = 'local_vacuum_table' or relname = 'reference_vacuum_table'; + analyze_count +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +-- should not propagate because ddl propagation is disabled +SET citus.enable_ddl_propagation TO OFF; +VACUUM distributed_vacuum_table; +SET citus.enable_ddl_propagation TO ON; +SET citus.log_remote_commands TO OFF; +-- ANALYZE tests +CREATE TABLE local_analyze_table(id int); +CREATE TABLE reference_analyze_table(id int); +SELECT create_reference_table('reference_analyze_table'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE distributed_analyze_table(id int); +SELECT create_distributed_table('distributed_analyze_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE loc (a INT, b INT); +CREATE TABLE dist (a INT); +SELECT create_distributed_table ('dist', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SET citus.log_remote_commands TO ON; +-- should propagate to all workers because no table is specified +ANALYZE; +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ANALYZE +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ANALYZE +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'on' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SET citus.enable_ddl_propagation TO 'on' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- should not propagate because no distributed table is specified +ANALYZE local_analyze_table; +-- should propagate to all workers because table is reference table +ANALYZE reference_analyze_table; +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ANALYZE public.reference_analyze_table_970002 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ANALYZE public.reference_analyze_table_970002 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- should propagate to all workers because table is distributed table +ANALYZE distributed_analyze_table; +NOTICE: issuing ANALYZE public.distributed_analyze_table_970003 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- only distributed_analyze_table and reference_analyze_table should propagate +ANALYZE distributed_analyze_table, local_analyze_table, reference_analyze_table; +NOTICE: issuing ANALYZE public.distributed_analyze_table_970003 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ANALYZE public.reference_analyze_table_970002 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ANALYZE public.reference_analyze_table_970002 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- only reference_analyze_table should propagate +ANALYZE local_analyze_table, reference_analyze_table; +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ANALYZE public.reference_analyze_table_970002 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ANALYZE public.reference_analyze_table_970002 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing PREPARE TRANSACTION 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing COMMIT PREPARED 'citus_xx_xx_xx_xx' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- should not propagate because ddl propagation is disabled +SET citus.enable_ddl_propagation TO OFF; +ANALYZE distributed_analyze_table; +SET citus.enable_ddl_propagation TO ON; +-- analyze only specified columns for corresponding tables +ANALYZE loc(b), dist(a); +NOTICE: issuing ANALYZE public.dist_970004 (a) +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index c5c60fd70..fa6e42065 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -10,7 +10,7 @@ set search_path to pg14; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 980000; SET citus.shard_count TO 2; --- test the new vacuum option, process_toast +-- test the new vacuum option, process_toast and also auto option for index_cleanup CREATE TABLE t1 (a int); SELECT create_distributed_table('t1','a'); create_distributed_table @@ -41,6 +41,69 @@ NOTICE: issuing VACUUM pg14.t1_980000 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing VACUUM pg14.t1_980001 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +VACUUM (INDEX_CLEANUP AUTO) t1; +NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980001 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +VACUUM (INDEX_CLEANUP) t1; +NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980001 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +VACUUM (INDEX_CLEANUP AuTo) t1; +NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM (INDEX_CLEANUP auto) pg14.t1_980001 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +VACUUM (INDEX_CLEANUP false) t1; +NOTICE: issuing VACUUM (INDEX_CLEANUP false) pg14.t1_980000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM (INDEX_CLEANUP false) pg14.t1_980001 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +VACUUM (INDEX_CLEANUP true) t1; +NOTICE: issuing VACUUM (INDEX_CLEANUP true) pg14.t1_980000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM (INDEX_CLEANUP true) pg14.t1_980001 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +VACUUM (INDEX_CLEANUP "AUTOX") t1; +ERROR: index_cleanup requires a Boolean value +VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1; +NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,PROCESS_TOAST,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM (ANALYZE,FREEZE,FULL,SKIP_LOCKED,PROCESS_TOAST,TRUNCATE,INDEX_CLEANUP auto) pg14.t1_980001 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1; +NOTICE: issuing VACUUM (FULL,PROCESS_TOAST,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing VACUUM (FULL,PROCESS_TOAST,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- vacuum (process_toast true) should be vacuuming toast tables (default is true) +CREATE TABLE local_vacuum_table(name text); +select reltoastrelid from pg_class where relname='local_vacuum_table' +\gset +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; +SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; + frozen_performed +--------------------------------------------------------------------- + t +(1 row) + +-- vacuum (process_toast false) should not be vacuuming toast tables (default is true) +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; +SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; + frozen_not_performed +--------------------------------------------------------------------- + t +(1 row) + +DROP TABLE local_vacuum_table; SET citus.log_remote_commands TO OFF; create table dist(a int, b int); select create_distributed_table('dist','a'); @@ -1376,6 +1439,35 @@ SELECT create_distributed_table('ctlt1', 'a'); (1 row) CREATE TABLE ctlt_all_2 (LIKE ctlt1 INCLUDING ALL); +CREATE TABLE compression_and_defaults ( + data text COMPRESSION lz4 DEFAULT '"{}"'::text COLLATE "C" NOT NULL PRIMARY KEY, + rev text +) +WITH ( + autovacuum_vacuum_scale_factor='0.01', + fillfactor='75' +); +SELECT create_distributed_table('compression_and_defaults', 'data', colocate_with:='none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE compression_and_generated_col ( + data text COMPRESSION lz4 GENERATED ALWAYS AS (rev || '{]') STORED COLLATE "C" NOT NULL, + rev text +) +WITH ( + autovacuum_vacuum_scale_factor='0.01', + fillfactor='75' +); +SELECT create_distributed_table('compression_and_generated_col', 'rev', colocate_with:='none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE compression_and_defaults, compression_and_generated_col; -- cleanup set client_min_messages to error; drop extension postgres_fdw cascade; diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index ba9e74dda..e14b98282 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -113,7 +113,7 @@ ORDER BY 1; function citus_shard_indexes_on_worker() function citus_shard_sizes() function citus_shards_on_worker() - function citus_split_shard_by_split_points(bigint,integer[],integer[]) + function citus_split_shard_by_split_points(bigint,text[],integer[],citus.split_mode) function citus_stat_activity() function citus_stat_statements() function citus_stat_statements_reset() @@ -243,6 +243,7 @@ ORDER BY 1; function worker_record_sequence_dependency(regclass,regclass,name) function worker_save_query_explain_analyze(text,jsonb) function worker_split_shard_replication_setup(citus.split_shard_info[]) + function worker_split_copy(bigint,citus.split_copy_info[]) schema citus schema citus_internal schema columnar @@ -272,6 +273,8 @@ ORDER BY 1; type citus.distribution_type type citus.shard_transfer_mode type citus.split_shard_info + type citus.split_copy_info + type citus.split_mode type citus_copy_format type noderole view citus_dist_stat_activity @@ -289,5 +292,5 @@ ORDER BY 1; view columnar.stripe view pg_dist_shard_placement view time_partitions -(270 rows) +(274 rows) diff --git a/src/test/regress/expected/worker_split_copy_test.out b/src/test/regress/expected/worker_split_copy_test.out new file mode 100644 index 000000000..52f93d5c1 --- /dev/null +++ b/src/test/regress/expected/worker_split_copy_test.out @@ -0,0 +1,96 @@ +CREATE SCHEMA worker_split_copy_test; +SET search_path TO worker_split_copy_test; +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 81070000; +-- BEGIN: Create distributed table and insert data. +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table"(id int primary key, value char); +SELECT create_distributed_table('"test !/ \n _""dist_123_table"', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO "test !/ \n _""dist_123_table" (id, value) (SELECT g.id, 'N' FROM generate_series(1, 1000) AS g(id)); +-- END: Create distributed table and insert data. +-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy. +\c - - - :worker_1_port +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070015"(id int primary key, value char); +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070016"(id int primary key, value char); +-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy. +-- BEGIN: List row count for source shard and targets shard in Worker1. +\c - - - :worker_1_port +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070000"; + count +--------------------------------------------------------------------- + 510 +(1 row) + +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015"; + count +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016"; + count +--------------------------------------------------------------------- + 0 +(1 row) + +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070001"; + count +--------------------------------------------------------------------- + 490 +(1 row) + +-- END: List row count for source shard and targets shard in Worker1. +-- BEGIN: Set worker_1_node and worker_2_node +\c - - - :worker_1_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END: Set worker_1_node and worker_2_node +-- BEGIN: Trigger 2-way local shard split copy. +-- Ensure we will perform text copy. +SET citus.enable_binary_protocol = false; +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + -1073741824, --split range end + :worker_1_node)::citus.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + -1073741823, --split range begin + -1, --split range end + :worker_1_node)::citus.split_copy_info + ] + ); + worker_split_copy +--------------------------------------------------------------------- + +(1 row) + +-- END: Trigger 2-way local shard split copy. +-- BEGIN: List updated row count for local targets shard. +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015"; + count +--------------------------------------------------------------------- + 247 +(1 row) + +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016"; + count +--------------------------------------------------------------------- + 263 +(1 row) + +-- END: List updated row count for local targets shard. +-- BEGIN: CLEANUP. +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA worker_split_copy_test CASCADE; +-- END: CLEANUP. diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index ac60c7c49..c0fbfcfa3 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -1,3 +1,4 @@ +test: isolation_setup test: isolation_add_remove_node test: isolation_update_node test: isolation_update_node_lock_writes @@ -27,7 +28,7 @@ test: isolation_dml_vs_repair isolation_copy_placement_vs_copy_placement test: isolation_concurrent_dml isolation_data_migration test: isolation_drop_shards isolation_copy_placement_vs_modification -test: isolation_insert_vs_vacuum isolation_transaction_recovery +test: isolation_insert_vs_vacuum isolation_transaction_recovery isolation_vacuum_skip_locked test: isolation_progress_monitoring test: isolation_dump_local_wait_edges diff --git a/src/test/regress/mitmscripts/README.md b/src/test/regress/mitmscripts/README.md index 03967b4ac..884f10b06 100644 --- a/src/test/regress/mitmscripts/README.md +++ b/src/test/regress/mitmscripts/README.md @@ -17,9 +17,18 @@ Automated Failure Testing works by inserting a network proxy (mitmproxy) between ## Getting Started -First off, to use this you'll need mitmproxy, I recommend version `7.0.4`, and I also recommend running it with `python 3.9`. This script integrates pretty deeply with mitmproxy so other versions might fail to work. +First off, to use this you'll need mitmproxy. +Currently, we rely on a [fork](https://github.com/thanodnl/mitmproxy/tree/fix/tcp-flow-kill) to run the failure tests. +We recommned using pipenv to setup your failure testing environment since that will handle installing the fork +and other dependencies which may be updated/changed. -I highly recommend using pipenv to install mitmproxy. It lets you easily manage isolated environments (instead of installing python packages globally). If you've heard of virtualenv, pipenv is that but much easier to use. +Setting up pipenv is easy if you already have python and pip set up: +```bash +pip install pipenv +``` + +If the Pipfile requires a version you do not have, simply install that python version and retry. +Pipenv should be able to find the newly installed python and set up the environment. Once you've installed it: diff --git a/src/test/regress/spec/isolation_acquire_distributed_locks.spec b/src/test/regress/spec/isolation_acquire_distributed_locks.spec index eb6d51e68..ba2d0af7f 100644 --- a/src/test/regress/spec/isolation_acquire_distributed_locks.spec +++ b/src/test/regress/spec/isolation_acquire_distributed_locks.spec @@ -51,8 +51,6 @@ teardown DROP TABLE partitioned_table; SELECT citus_remove_node('localhost', 57636); - - SELECT citus_internal.restore_isolation_tester_func(); } // coordinator session diff --git a/src/test/regress/spec/isolation_blocking_move_multi_shard_commands.spec b/src/test/regress/spec/isolation_blocking_move_multi_shard_commands.spec index 45f5fed84..610920cc4 100644 --- a/src/test/regress/spec/isolation_blocking_move_multi_shard_commands.spec +++ b/src/test/regress/spec/isolation_blocking_move_multi_shard_commands.spec @@ -3,9 +3,6 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 1; @@ -18,8 +15,6 @@ setup teardown { - SELECT citus_internal.restore_isolation_tester_func(); - DROP TABLE selected_shard; DROP TABLE logical_replicate_placement; } diff --git a/src/test/regress/spec/isolation_blocking_move_multi_shard_commands_on_mx.spec b/src/test/regress/spec/isolation_blocking_move_multi_shard_commands_on_mx.spec index df73e0bfc..2592d6b1b 100644 --- a/src/test/regress/spec/isolation_blocking_move_multi_shard_commands_on_mx.spec +++ b/src/test/regress/spec/isolation_blocking_move_multi_shard_commands_on_mx.spec @@ -20,9 +20,6 @@ setup AS 'citus', $$stop_session_level_connection_to_node$$; RESET citus.enable_metadata_sync; - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - -- start_metadata_sync_to_node can not be run inside a transaction block -- following is a workaround to overcome that -- port numbers are hard coded at the moment @@ -48,8 +45,6 @@ teardown { DROP TABLE selected_shard; DROP TABLE logical_replicate_placement; - - SELECT citus_internal.restore_isolation_tester_func(); } diff --git a/src/test/regress/spec/isolation_blocking_move_single_shard_commands.spec b/src/test/regress/spec/isolation_blocking_move_single_shard_commands.spec index f125904c0..c834a2916 100644 --- a/src/test/regress/spec/isolation_blocking_move_single_shard_commands.spec +++ b/src/test/regress/spec/isolation_blocking_move_single_shard_commands.spec @@ -2,9 +2,6 @@ // so setting the corresponding shard here is useful setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 1; @@ -16,8 +13,6 @@ setup teardown { - SELECT citus_internal.restore_isolation_tester_func(); - DROP TABLE selected_shard; DROP TABLE logical_replicate_placement; } diff --git a/src/test/regress/spec/isolation_blocking_move_single_shard_commands_on_mx.spec b/src/test/regress/spec/isolation_blocking_move_single_shard_commands_on_mx.spec index 74024a7c8..5b102917d 100644 --- a/src/test/regress/spec/isolation_blocking_move_single_shard_commands_on_mx.spec +++ b/src/test/regress/spec/isolation_blocking_move_single_shard_commands_on_mx.spec @@ -19,9 +19,6 @@ setup AS 'citus', $$stop_session_level_connection_to_node$$; RESET citus.enable_ddl_propagation; - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - -- start_metadata_sync_to_node can not be run inside a transaction block -- following is a workaround to overcome that -- port numbers are hard coded at the moment @@ -46,8 +43,6 @@ teardown { DROP TABLE selected_shard; DROP TABLE logical_replicate_placement; - - SELECT citus_internal.restore_isolation_tester_func(); } diff --git a/src/test/regress/spec/isolation_blocking_shard_split.spec b/src/test/regress/spec/isolation_blocking_shard_split.spec new file mode 100644 index 000000000..a06824886 --- /dev/null +++ b/src/test/regress/spec/isolation_blocking_shard_split.spec @@ -0,0 +1,146 @@ +setup +{ + SET citus.shard_count to 2; + SET citus.shard_replication_factor to 1; + SELECT setval('pg_dist_shardid_seq', 1500000); + + CREATE TABLE to_split_table (id int, value int); + SELECT create_distributed_table('to_split_table', 'id'); +} + +teardown +{ + DROP TABLE to_split_table; +} + +session "s1" + +step "s1-begin" +{ + BEGIN; + + -- the tests are written with the logic where single shard SELECTs + -- do not to open transaction blocks + SET citus.select_opens_transaction_block TO false; +} + +// cache all placements +step "s1-load-cache" +{ + -- Indirect way to load cache. + TRUNCATE to_split_table; +} + +step "s1-insert" +{ + -- Id '123456789' maps to shard 1500002. + SELECT get_shard_id_for_distribution_column('to_split_table', 123456789); + + INSERT INTO to_split_table VALUES (123456789, 1); +} + +step "s1-update" +{ + UPDATE to_split_table SET value = 111 WHERE id = 123456789; +} + +step "s1-delete" +{ + DELETE FROM to_split_table WHERE id = 123456789; +} + +step "s1-select" +{ + SELECT count(*) FROM to_split_table WHERE id = 123456789; +} + +step "s1-ddl" +{ + CREATE INDEX test_table_index ON to_split_table(id); +} + +step "s1-copy" +{ + COPY to_split_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; +} + +step "s1-blocking-shard-split" +{ + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500001, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'blocking'); +} + +step "s1-commit" +{ + COMMIT; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-blocking-shard-split" +{ + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['1073741824'], + ARRAY[1, 2], + 'blocking'); +} + +step "s2-commit" +{ + COMMIT; +} + +step "s2-print-cluster" +{ + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM to_split_table ORDER BY id, value; +} + +step "s2-print-index-count" +{ + SELECT + nodeport, success, result + FROM + run_command_on_placements('to_split_table', 'select count(*) from pg_indexes WHERE tablename = ''%s''') + ORDER BY + nodeport; +} + +// Run shard split while concurrently performing DML and index creation +// We expect DML,Copy to fail because the shard they are waiting for is destroyed. + permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" + // The same tests without loading the cache at first + permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-insert" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" + permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" + +// Concurrent shard split blocks on different shards of the same table (or any colocated table) + permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster" + // The same test above without loading the cache at first + permutation "s1-insert" "s1-begin" "s1-blocking-shard-split" "s2-blocking-shard-split" "s1-commit" "s2-print-cluster" + +// Concurrent DDL blocks on different shards of the same table (or any colocated table) + permutation "s1-load-cache" "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" + // The same tests without loading the cache at first + permutation "s1-begin" "s1-select" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" "s2-print-index-count" diff --git a/src/test/regress/spec/isolation_blocking_shard_split_with_fkey_to_reference.spec b/src/test/regress/spec/isolation_blocking_shard_split_with_fkey_to_reference.spec new file mode 100644 index 000000000..243d8ef05 --- /dev/null +++ b/src/test/regress/spec/isolation_blocking_shard_split_with_fkey_to_reference.spec @@ -0,0 +1,104 @@ +setup +{ + SELECT setval('pg_dist_shardid_seq', 1500000); + SET citus.shard_count to 2; + SET citus.shard_replication_factor to 1; + + CREATE TABLE reference_table (id int PRIMARY KEY, value int); + SELECT create_reference_table('reference_table'); + + CREATE TABLE table_to_split (id int, value int); + SELECT create_distributed_table('table_to_split', 'id'); +} + +teardown +{ + DROP TABLE table_to_split CASCADE; + DROP TABLE reference_table CASCADE; +} + +session "s1" + +step "s1-begin" +{ + BEGIN; +} + +step "s1-insert" +{ + INSERT INTO reference_table VALUES (5, 10); +} + +step "s1-update" +{ + UPDATE reference_table SET value = 5 WHERE id = 5; +} + +step "s1-delete" +{ + DELETE FROM reference_table WHERE id = 5; +} + +step "s1-ddl" +{ + CREATE INDEX reference_table_index ON reference_table(id); +} + +step "s1-copy" +{ + COPY reference_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; +} + +step "s1-commit" +{ + COMMIT; +} + +session "s2" + +step "s2-begin" +{ + BEGIN; +} + +step "s2-blocking-shard-split" +{ + SELECT pg_catalog.citus_split_shard_by_split_points( + 1500002, + ARRAY['-1073741824'], + ARRAY[1, 2], + 'blocking'); +} + +step "s2-add-fkey" +{ + ALTER TABLE table_to_split ADD CONSTRAINT fkey_const FOREIGN KEY(value) REFERENCES reference_table(id); +} + +step "s2-commit" +{ + COMMIT; +} + +step "s2-print-cluster" +{ + -- row count per shard + SELECT + nodeport, shardid, success, result + FROM + run_command_on_placements('table_to_split', 'select count(*) from %s') + ORDER BY + nodeport, shardid; + + -- rows + SELECT id, value FROM table_to_split ORDER BY id, value; +} + +// Run shard split while concurrently performing an DML and index creation on the +// reference table which the distributed table have a foreign key to. +// All modifications should block on shard split. +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-delete" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-update" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-insert" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-copy" "s2-commit" "s1-commit" "s2-print-cluster" +permutation "s2-add-fkey" "s1-begin" "s2-begin" "s2-blocking-shard-split" "s1-ddl" "s2-commit" "s1-commit" "s2-print-cluster" diff --git a/src/test/regress/spec/isolation_citus_dist_activity.spec b/src/test/regress/spec/isolation_citus_dist_activity.spec index b2e977dda..eb0ee2a5d 100644 --- a/src/test/regress/spec/isolation_citus_dist_activity.spec +++ b/src/test/regress/spec/isolation_citus_dist_activity.spec @@ -1,5 +1,8 @@ setup { + -- revert back to pg_isolation_test_session_is_blocked until the tests are fixed + SELECT citus_internal.restore_isolation_tester_func(); + CREATE OR REPLACE FUNCTION test_assign_global_pid() RETURNS void LANGUAGE C STRICT @@ -14,6 +17,9 @@ setup teardown { + -- replace pg_isolation_test_session_is_blocked so that next tests are run with Citus implementation + SELECT citus_internal.replace_isolation_tester_func(); + DROP TABLE test_table; } diff --git a/src/test/regress/spec/isolation_concurrent_dml.spec b/src/test/regress/spec/isolation_concurrent_dml.spec index 35f2fd7a4..937880671 100644 --- a/src/test/regress/spec/isolation_concurrent_dml.spec +++ b/src/test/regress/spec/isolation_concurrent_dml.spec @@ -1,7 +1,5 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); CREATE TABLE test_concurrent_dml (test_id integer NOT NULL, data text); SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('test_concurrent_dml', 'test_id', 'hash', shard_count:=4); @@ -10,7 +8,6 @@ setup teardown { DROP TABLE IF EXISTS test_concurrent_dml CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_copy_vs_all_on_mx.spec b/src/test/regress/spec/isolation_copy_vs_all_on_mx.spec index 92fb26e73..47abce739 100644 --- a/src/test/regress/spec/isolation_copy_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_copy_vs_all_on_mx.spec @@ -12,7 +12,6 @@ setup teardown { DROP TABLE IF EXISTS copy_table CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_ddl_vs_all.spec b/src/test/regress/spec/isolation_ddl_vs_all.spec index 2d672f3c1..46a1a9872 100644 --- a/src/test/regress/spec/isolation_ddl_vs_all.spec +++ b/src/test/regress/spec/isolation_ddl_vs_all.spec @@ -5,9 +5,6 @@ // create range distributed table to test behavior of DDL in concurrent operations setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_replication_factor TO 1; CREATE TABLE ddl_hash(id integer, data text); SELECT create_distributed_table('ddl_hash', 'id'); @@ -17,8 +14,6 @@ setup teardown { DROP TABLE IF EXISTS ddl_hash CASCADE; - - SELECT citus_internal.restore_isolation_tester_func(); } // session 1 diff --git a/src/test/regress/spec/isolation_delete_vs_all.spec b/src/test/regress/spec/isolation_delete_vs_all.spec index 2b612f0d1..b1ecb3a19 100644 --- a/src/test/regress/spec/isolation_delete_vs_all.spec +++ b/src/test/regress/spec/isolation_delete_vs_all.spec @@ -5,9 +5,6 @@ // create range distributed table to test behavior of DELETE in concurrent operations setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_replication_factor TO 1; CREATE TABLE delete_hash(id integer, data text); SELECT create_distributed_table('delete_hash', 'id'); @@ -17,8 +14,6 @@ setup teardown { DROP TABLE IF EXISTS delete_hash CASCADE; - - SELECT citus_internal.restore_isolation_tester_func(); } // session 1 diff --git a/src/test/regress/spec/isolation_dis2ref_foreign_keys_on_mx.spec b/src/test/regress/spec/isolation_dis2ref_foreign_keys_on_mx.spec index eb312fae3..3bfc5e6bd 100644 --- a/src/test/regress/spec/isolation_dis2ref_foreign_keys_on_mx.spec +++ b/src/test/regress/spec/isolation_dis2ref_foreign_keys_on_mx.spec @@ -15,7 +15,6 @@ setup teardown { DROP TABLE ref_table, dist_table; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_distributed_deadlock_detection.spec b/src/test/regress/spec/isolation_distributed_deadlock_detection.spec index 9676530bc..27e18ea1c 100644 --- a/src/test/regress/spec/isolation_distributed_deadlock_detection.spec +++ b/src/test/regress/spec/isolation_distributed_deadlock_detection.spec @@ -1,8 +1,5 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - CREATE TABLE deadlock_detection_reference (user_id int UNIQUE, some_val int); SELECT create_reference_table('deadlock_detection_reference'); @@ -26,7 +23,6 @@ teardown DROP TABLE local_deadlock_table; DROP TABLE deadlock_detection_test_rep_2; DROP TABLE deadlock_detection_reference; - SELECT citus_internal.restore_isolation_tester_func(); SET citus.shard_replication_factor = 1; } diff --git a/src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec b/src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec index 032bf0d02..ba83a18b1 100644 --- a/src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec +++ b/src/test/regress/spec/isolation_drop_alter_index_select_for_update_on_mx.spec @@ -12,7 +12,6 @@ setup teardown { DROP TABLE IF EXISTS dist_table CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_drop_vs_all.spec b/src/test/regress/spec/isolation_drop_vs_all.spec index c970567e0..dd625fa79 100644 --- a/src/test/regress/spec/isolation_drop_vs_all.spec +++ b/src/test/regress/spec/isolation_drop_vs_all.spec @@ -5,9 +5,6 @@ // create distributed table to test behavior of DROP in concurrent operations setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_replication_factor TO 1; CREATE SCHEMA drop_tests; CREATE TABLE drop_tests.drop_hash(id integer, data text); @@ -23,8 +20,6 @@ teardown { DROP TABLE IF EXISTS drop_tests.drop_hash, drop_tests_2.drop_hash_2 CASCADE; DROP SCHEMA IF EXISTS drop_tests, drop_tests_2 CASCADE; - - SELECT citus_internal.restore_isolation_tester_func(); } // session 1 diff --git a/src/test/regress/spec/isolation_dump_global_wait_edges.spec b/src/test/regress/spec/isolation_dump_global_wait_edges.spec index d9328eddf..f6f41a6ce 100644 --- a/src/test/regress/spec/isolation_dump_global_wait_edges.spec +++ b/src/test/regress/spec/isolation_dump_global_wait_edges.spec @@ -1,7 +1,5 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); CREATE TABLE distributed_table (x int primary key, y int); SELECT create_distributed_table('distributed_table', 'x'); INSERT INTO distributed_table VALUES (1,0); @@ -19,7 +17,6 @@ setup teardown { DROP TABLE distributed_table; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec b/src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec index dfe1587be..03512f343 100644 --- a/src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec +++ b/src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec @@ -17,7 +17,6 @@ teardown { DROP TABLE ref_table; DROP TABLE tt1; - SELECT citus_internal.restore_isolation_tester_func(); SELECT citus_remove_node('localhost', 57636); } diff --git a/src/test/regress/spec/isolation_global_pid.spec b/src/test/regress/spec/isolation_global_pid.spec index 690d1768a..f1a24cfd9 100644 --- a/src/test/regress/spec/isolation_global_pid.spec +++ b/src/test/regress/spec/isolation_global_pid.spec @@ -11,7 +11,6 @@ setup teardown { DROP TABLE dist_table; - SELECT citus_internal.restore_isolation_tester_func(); SELECT citus_remove_node('localhost', 57636); } diff --git a/src/test/regress/spec/isolation_hash_copy_vs_all.spec b/src/test/regress/spec/isolation_hash_copy_vs_all.spec index 58ec28f5c..ac3e015f5 100644 --- a/src/test/regress/spec/isolation_hash_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_hash_copy_vs_all.spec @@ -5,8 +5,6 @@ // create append distributed table to test behavior of COPY in concurrent operations setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; CREATE TABLE hash_copy(id integer, data text, int_data int); SELECT create_distributed_table('hash_copy', 'id'); @@ -16,7 +14,6 @@ setup teardown { DROP TABLE IF EXISTS hash_copy CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } // session 1 diff --git a/src/test/regress/spec/isolation_insert_select_conflict.spec b/src/test/regress/spec/isolation_insert_select_conflict.spec index a25f01712..cf548ff2d 100644 --- a/src/test/regress/spec/isolation_insert_select_conflict.spec +++ b/src/test/regress/spec/isolation_insert_select_conflict.spec @@ -1,7 +1,5 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); CREATE TABLE target_table(col_1 int primary key, col_2 int); SELECT create_distributed_table('target_table','col_1'); INSERT INTO target_table VALUES(1,2),(2,3),(3,4),(4,5),(5,6); @@ -18,7 +16,6 @@ setup teardown { DROP TABLE target_table, target_table_2, source_table; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_insert_select_vs_all_on_mx.spec b/src/test/regress/spec/isolation_insert_select_vs_all_on_mx.spec index 82d8d8638..6481bd999 100644 --- a/src/test/regress/spec/isolation_insert_select_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_insert_select_vs_all_on_mx.spec @@ -12,7 +12,6 @@ setup teardown { DROP TABLE IF EXISTS dist_table CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_insert_vs_all_on_mx.spec b/src/test/regress/spec/isolation_insert_vs_all_on_mx.spec index 7367fecb6..0a61899e4 100644 --- a/src/test/regress/spec/isolation_insert_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_insert_vs_all_on_mx.spec @@ -12,7 +12,6 @@ setup teardown { DROP TABLE IF EXISTS insert_table CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } diff --git a/src/test/regress/spec/isolation_logical_replication_multi_shard_commands_on_mx.spec b/src/test/regress/spec/isolation_logical_replication_multi_shard_commands_on_mx.spec index 512eb3af7..24432a78b 100644 --- a/src/test/regress/spec/isolation_logical_replication_multi_shard_commands_on_mx.spec +++ b/src/test/regress/spec/isolation_logical_replication_multi_shard_commands_on_mx.spec @@ -17,8 +17,6 @@ teardown { DROP TABLE selected_shard; DROP TABLE logical_replicate_placement; - - SELECT citus_internal.restore_isolation_tester_func(); } diff --git a/src/test/regress/spec/isolation_logical_replication_single_shard_commands_on_mx.spec b/src/test/regress/spec/isolation_logical_replication_single_shard_commands_on_mx.spec index 5f2a9cc93..7f24dab91 100644 --- a/src/test/regress/spec/isolation_logical_replication_single_shard_commands_on_mx.spec +++ b/src/test/regress/spec/isolation_logical_replication_single_shard_commands_on_mx.spec @@ -15,8 +15,6 @@ teardown { DROP TABLE selected_shard; DROP TABLE logical_replicate_placement; - - SELECT citus_internal.restore_isolation_tester_func(); } diff --git a/src/test/regress/spec/isolation_master_update_node.spec b/src/test/regress/spec/isolation_master_update_node.spec index 5234db32b..a6358d760 100644 --- a/src/test/regress/spec/isolation_master_update_node.spec +++ b/src/test/regress/spec/isolation_master_update_node.spec @@ -1,5 +1,8 @@ setup { + -- revert back to pg_isolation_test_session_is_blocked until the tests are fixed + SELECT citus_internal.restore_isolation_tester_func(); + SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57638); @@ -9,6 +12,9 @@ setup teardown { + -- replace pg_isolation_test_session_is_blocked so that next tests are run with Citus implementation + SELECT citus_internal.replace_isolation_tester_func(); + DROP TABLE t1; -- remove the nodes again diff --git a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec index 64b376395..67c20a2b2 100644 --- a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec +++ b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec @@ -26,7 +26,6 @@ teardown DROP TABLE deadlock_detection_test; DROP TABLE t2; SET citus.shard_replication_factor = 1; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_modify_with_subquery_vs_dml.spec b/src/test/regress/spec/isolation_modify_with_subquery_vs_dml.spec index 89835dd30..ae5a08e0a 100644 --- a/src/test/regress/spec/isolation_modify_with_subquery_vs_dml.spec +++ b/src/test/regress/spec/isolation_modify_with_subquery_vs_dml.spec @@ -1,7 +1,5 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor to 2; CREATE TABLE users_test_table(user_id int, value_1 int, value_2 int, value_3 int); SELECT create_distributed_table('users_test_table', 'user_id'); @@ -31,7 +29,6 @@ teardown DROP TABLE users_test_table; DROP TABLE events_test_table; SET citus.shard_replication_factor to 1; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_multi_shard_modify_vs_all.spec b/src/test/regress/spec/isolation_multi_shard_modify_vs_all.spec index 61ecd1a28..60216f27f 100644 --- a/src/test/regress/spec/isolation_multi_shard_modify_vs_all.spec +++ b/src/test/regress/spec/isolation_multi_shard_modify_vs_all.spec @@ -1,8 +1,5 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_replication_factor to 1; SET citus.shard_count to 32; SET citus.multi_shard_modify_mode to 'parallel'; @@ -34,7 +31,6 @@ teardown { DROP TABLE users_test_table; DROP TABLE events_test_table; - SELECT citus_internal.restore_isolation_tester_func(); SET citus.shard_count to 4; } diff --git a/src/test/regress/spec/isolation_multiuser_locking.spec b/src/test/regress/spec/isolation_multiuser_locking.spec index 177b0f778..5891ca622 100644 --- a/src/test/regress/spec/isolation_multiuser_locking.spec +++ b/src/test/regress/spec/isolation_multiuser_locking.spec @@ -1,9 +1,6 @@ setup { SET citus.max_cached_conns_per_worker to 0; - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_replication_factor TO 1; CREATE USER test_user_1; @@ -17,8 +14,6 @@ setup teardown { - SELECT citus_internal.restore_isolation_tester_func(); - BEGIN; DROP TABLE IF EXISTS test_table; DROP USER test_user_1, test_user_2; diff --git a/src/test/regress/spec/isolation_mx_common.include.spec b/src/test/regress/spec/isolation_mx_common.include.spec index ed70908ac..fec4b2b98 100644 --- a/src/test/regress/spec/isolation_mx_common.include.spec +++ b/src/test/regress/spec/isolation_mx_common.include.spec @@ -29,9 +29,6 @@ setup AS 'citus', $$stop_session_level_connection_to_node$$; RESET citus.enable_metadata_sync; - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - -- start_metadata_sync_to_node can not be run inside a transaction block -- following is a workaround to overcome that -- port numbers are hard coded at the moment diff --git a/src/test/regress/spec/isolation_rebalancer_deferred_drop.spec b/src/test/regress/spec/isolation_rebalancer_deferred_drop.spec index f2e566936..7de6d69cc 100644 --- a/src/test/regress/spec/isolation_rebalancer_deferred_drop.spec +++ b/src/test/regress/spec/isolation_rebalancer_deferred_drop.spec @@ -23,9 +23,6 @@ setup LANGUAGE C STRICT VOLATILE AS 'citus', $$stop_session_level_connection_to_node$$; - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - CREATE OR REPLACE PROCEDURE isolation_cleanup_orphaned_shards() LANGUAGE C AS 'citus', $$isolation_cleanup_orphaned_shards$$; @@ -44,8 +41,6 @@ COMMENT ON PROCEDURE isolation_cleanup_orphaned_shards() teardown { - SELECT citus_internal.restore_isolation_tester_func(); - DROP TABLE selected_shard; DROP TABLE t1; } diff --git a/src/test/regress/spec/isolation_ref2ref_foreign_keys.spec b/src/test/regress/spec/isolation_ref2ref_foreign_keys.spec index d92ee5357..6137df058 100644 --- a/src/test/regress/spec/isolation_ref2ref_foreign_keys.spec +++ b/src/test/regress/spec/isolation_ref2ref_foreign_keys.spec @@ -1,8 +1,5 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - CREATE TABLE ref_table_1(id int PRIMARY KEY, value int); SELECT create_reference_table('ref_table_1'); @@ -20,7 +17,6 @@ setup teardown { DROP TABLE ref_table_1, ref_table_2, ref_table_3; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_ref2ref_foreign_keys_on_mx.spec b/src/test/regress/spec/isolation_ref2ref_foreign_keys_on_mx.spec index 2f7a0b74c..2d0fc9df0 100644 --- a/src/test/regress/spec/isolation_ref2ref_foreign_keys_on_mx.spec +++ b/src/test/regress/spec/isolation_ref2ref_foreign_keys_on_mx.spec @@ -19,7 +19,6 @@ setup teardown { DROP TABLE ref_table_1, ref_table_2, ref_table_3; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_ref_select_for_update_vs_all_on_mx.spec b/src/test/regress/spec/isolation_ref_select_for_update_vs_all_on_mx.spec index 8f7e89945..0adf1864a 100644 --- a/src/test/regress/spec/isolation_ref_select_for_update_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_ref_select_for_update_vs_all_on_mx.spec @@ -11,7 +11,6 @@ setup teardown { DROP TABLE IF EXISTS ref_table CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec b/src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec index 6fc02d78e..d36e5c586 100644 --- a/src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_ref_update_delete_upsert_vs_all_on_mx.spec @@ -11,7 +11,6 @@ setup teardown { DROP TABLE IF EXISTS ref_table CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_reference_copy_vs_all.spec b/src/test/regress/spec/isolation_reference_copy_vs_all.spec index e08223db2..07dfdb5f2 100644 --- a/src/test/regress/spec/isolation_reference_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_reference_copy_vs_all.spec @@ -5,8 +5,6 @@ // create append distributed table to test behavior of COPY in concurrent operations setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; CREATE TABLE reference_copy(id integer, data text, int_data int); SELECT create_reference_table('reference_copy'); @@ -16,7 +14,6 @@ setup teardown { DROP TABLE IF EXISTS reference_copy CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } // session 1 diff --git a/src/test/regress/spec/isolation_reference_on_mx.spec b/src/test/regress/spec/isolation_reference_on_mx.spec index 787928b4f..45e4bf905 100644 --- a/src/test/regress/spec/isolation_reference_on_mx.spec +++ b/src/test/regress/spec/isolation_reference_on_mx.spec @@ -12,7 +12,6 @@ setup teardown { DROP TABLE ref_table; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_replace_wait_function.spec b/src/test/regress/spec/isolation_replace_wait_function.spec index 5790ef57f..bf7f8a03d 100644 --- a/src/test/regress/spec/isolation_replace_wait_function.spec +++ b/src/test/regress/spec/isolation_replace_wait_function.spec @@ -5,8 +5,6 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); select setval('pg_dist_shardid_seq', GREATEST(1400000, nextval('pg_dist_shardid_seq'))); CREATE TABLE test_locking (a int unique); @@ -16,7 +14,6 @@ setup teardown { DROP TABLE test_locking; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index 867e2bd3a..7a2a4d8dd 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -1,8 +1,5 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - CREATE TABLE ref_table(a int primary key); SELECT create_reference_table('ref_table'); INSERT INTO ref_table VALUES (1), (3), (5), (7); @@ -13,7 +10,6 @@ setup teardown { - SELECT citus_internal.restore_isolation_tester_func(); DROP TABLE ref_table, dist_table; SELECT master_remove_node('localhost', 57636); } diff --git a/src/test/regress/spec/isolation_replicated_dist_on_mx.spec b/src/test/regress/spec/isolation_replicated_dist_on_mx.spec index b1e59d512..f3dd18816 100644 --- a/src/test/regress/spec/isolation_replicated_dist_on_mx.spec +++ b/src/test/regress/spec/isolation_replicated_dist_on_mx.spec @@ -25,7 +25,6 @@ setup teardown { DROP TABLE replicated_table, replicated_table_2, single_replicated_table; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_select_for_update.spec b/src/test/regress/spec/isolation_select_for_update.spec index 4dcd2f42c..d92884aed 100644 --- a/src/test/regress/spec/isolation_select_for_update.spec +++ b/src/test/regress/spec/isolation_select_for_update.spec @@ -1,9 +1,6 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_replication_factor to 1; CREATE TABLE test_table_1_rf1(id int, val_1 int); @@ -28,8 +25,6 @@ teardown DROP TABLE test_table_1_rf1 CASCADE; DROP TABLE test_table_2_rf1; DROP TABLE ref_table; - - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_select_vs_all.spec b/src/test/regress/spec/isolation_select_vs_all.spec index 2aebcc7bb..741dd1809 100644 --- a/src/test/regress/spec/isolation_select_vs_all.spec +++ b/src/test/regress/spec/isolation_select_vs_all.spec @@ -5,9 +5,6 @@ // create range distributed table to test behavior of SELECT in concurrent operations setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 6780300; CREATE TABLE select_append(id integer, data text, int_data int); @@ -19,7 +16,6 @@ setup teardown { DROP TABLE IF EXISTS select_append CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } // session 1 diff --git a/src/test/regress/spec/isolation_select_vs_all_on_mx.spec b/src/test/regress/spec/isolation_select_vs_all_on_mx.spec index 66201af7f..0bac9b42f 100644 --- a/src/test/regress/spec/isolation_select_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_select_vs_all_on_mx.spec @@ -12,7 +12,6 @@ setup teardown { DROP TABLE IF EXISTS select_table CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_setup.spec b/src/test/regress/spec/isolation_setup.spec new file mode 100644 index 000000000..0a69a2c9d --- /dev/null +++ b/src/test/regress/spec/isolation_setup.spec @@ -0,0 +1,15 @@ +session "s1" +step "setup" +{ + -- Replace pg_isolation_test_session_is_blocked so that all isolation tests are run with Citus implementation. + -- + -- Vanilla PG only checks for local blocks, whereas citus implementation also checks worker jobs in distributed + -- transactions. + -- + -- We have some tests that do not produce deterministic outputs when we use the Citus UDFs. They restore this + -- function in the setup phase and replace it again on the teardown phase so that the remainder of the tests can + -- keep using the Citus alternatives. Those tests should never be run concurrently with other isolation tests. + SELECT citus_internal.replace_isolation_tester_func(); +} + +permutation "setup" diff --git a/src/test/regress/spec/isolation_shard_rebalancer_progress.spec b/src/test/regress/spec/isolation_shard_rebalancer_progress.spec index d2248292d..9e7bcca25 100644 --- a/src/test/regress/spec/isolation_shard_rebalancer_progress.spec +++ b/src/test/regress/spec/isolation_shard_rebalancer_progress.spec @@ -1,7 +1,5 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); select setval('pg_dist_shardid_seq', GREATEST(1500000, nextval('pg_dist_shardid_seq'))); SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; @@ -23,7 +21,6 @@ teardown { DROP TABLE colocated2; DROP TABLE colocated1; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_truncate_vs_all.spec b/src/test/regress/spec/isolation_truncate_vs_all.spec index dd8cb7c2c..e9fe6ba35 100644 --- a/src/test/regress/spec/isolation_truncate_vs_all.spec +++ b/src/test/regress/spec/isolation_truncate_vs_all.spec @@ -5,9 +5,6 @@ // create range distributed table to test behavior of TRUNCATE in concurrent operations setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 5990340; CREATE TABLE truncate_append(id integer, data text); @@ -19,8 +16,6 @@ setup teardown { DROP TABLE IF EXISTS truncate_append CASCADE; - - SELECT citus_internal.restore_isolation_tester_func(); } // session 1 diff --git a/src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec b/src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec index ac87c4b05..779170b00 100644 --- a/src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_truncate_vs_all_on_mx.spec @@ -24,7 +24,6 @@ teardown DROP TABLE IF EXISTS referencing_table_2; DROP TABLE IF EXISTS referencing_table_1; DROP TABLE IF EXISTS truncate_table CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_undistribute_table.spec b/src/test/regress/spec/isolation_undistribute_table.spec index c3ae23ed5..cd08364a2 100644 --- a/src/test/regress/spec/isolation_undistribute_table.spec +++ b/src/test/regress/spec/isolation_undistribute_table.spec @@ -1,8 +1,5 @@ setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - CREATE TABLE dist_table(a INT, b INT); SELECT create_distributed_table('dist_table', 'a'); INSERT INTO dist_table VALUES (1, 2), (3, 4), (5, 6); @@ -11,7 +8,6 @@ setup teardown { DROP TABLE IF EXISTS dist_table; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_update_delete_upsert_vs_all_on_mx.spec b/src/test/regress/spec/isolation_update_delete_upsert_vs_all_on_mx.spec index b91b50390..4d8aa5330 100644 --- a/src/test/regress/spec/isolation_update_delete_upsert_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_update_delete_upsert_vs_all_on_mx.spec @@ -11,7 +11,6 @@ setup teardown { DROP TABLE IF EXISTS dist_table CASCADE; - SELECT citus_internal.restore_isolation_tester_func(); } session "s1" diff --git a/src/test/regress/spec/isolation_update_node.spec b/src/test/regress/spec/isolation_update_node.spec index 5ae6fcaa6..d6be6bfdc 100644 --- a/src/test/regress/spec/isolation_update_node.spec +++ b/src/test/regress/spec/isolation_update_node.spec @@ -1,5 +1,8 @@ setup { + -- revert back to pg_isolation_test_session_is_blocked until the tests are fixed + SELECT citus_internal.restore_isolation_tester_func(); + SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57638); @@ -8,6 +11,9 @@ setup teardown { + -- replace pg_isolation_test_session_is_blocked so that next tests are run with Citus implementation + SELECT citus_internal.replace_isolation_tester_func(); + DROP TABLE IF EXISTS test; SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; SELECT nodeid, nodename, nodeport from pg_dist_node ORDER BY 1 DESC; diff --git a/src/test/regress/spec/isolation_update_vs_all.spec b/src/test/regress/spec/isolation_update_vs_all.spec index 16da0aa63..1d249e0d4 100644 --- a/src/test/regress/spec/isolation_update_vs_all.spec +++ b/src/test/regress/spec/isolation_update_vs_all.spec @@ -5,9 +5,6 @@ // create range distributed table to test behavior of UPDATE in concurrent operations setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_replication_factor TO 1; CREATE TABLE update_hash(id integer, data text); SELECT create_distributed_table('update_hash', 'id'); @@ -17,8 +14,6 @@ setup teardown { DROP TABLE IF EXISTS update_hash CASCADE; - - SELECT citus_internal.restore_isolation_tester_func(); } // session 1 diff --git a/src/test/regress/spec/isolation_upsert_vs_all.spec b/src/test/regress/spec/isolation_upsert_vs_all.spec index b8cc5cf23..a6c5fc339 100644 --- a/src/test/regress/spec/isolation_upsert_vs_all.spec +++ b/src/test/regress/spec/isolation_upsert_vs_all.spec @@ -5,9 +5,6 @@ // create range distributed table to test behavior of UPSERT in concurrent operations setup { - SELECT citus_internal.replace_isolation_tester_func(); - SELECT citus_internal.refresh_isolation_tester_prepared_statement(); - SET citus.shard_replication_factor TO 1; CREATE TABLE upsert_hash(id integer PRIMARY KEY, data text); SELECT create_distributed_table('upsert_hash', 'id'); @@ -17,8 +14,6 @@ setup teardown { DROP TABLE IF EXISTS upsert_hash CASCADE; - - SELECT citus_internal.restore_isolation_tester_func(); } // session 1 diff --git a/src/test/regress/spec/isolation_vacuum_skip_locked.spec b/src/test/regress/spec/isolation_vacuum_skip_locked.spec new file mode 100644 index 000000000..4f1c2d3e2 --- /dev/null +++ b/src/test/regress/spec/isolation_vacuum_skip_locked.spec @@ -0,0 +1,61 @@ +// Test for SKIP_LOCKED option of VACUUM and ANALYZE commands. +# +// This also verifies that log messages are not emitted for skipped relations +// that were not specified in the VACUUM or ANALYZE command. + +setup +{ + CREATE TABLE parted (a INT) PARTITION BY LIST (a); + CREATE TABLE part1 PARTITION OF parted FOR VALUES IN (1); + ALTER TABLE part1 SET (autovacuum_enabled = false); + CREATE TABLE part2 PARTITION OF parted FOR VALUES IN (2); + ALTER TABLE part2 SET (autovacuum_enabled = false); +} + +teardown +{ + DROP TABLE IF EXISTS parted; +} + +session s1 +step lock_share +{ + BEGIN; + LOCK part1 IN SHARE MODE; +} +step lock_access_exclusive +{ + BEGIN; + LOCK part1 IN ACCESS EXCLUSIVE MODE; +} +step commit +{ + COMMIT; +} + +session s2 +step vac_specified { VACUUM (SKIP_LOCKED) part1, part2; } +step vac_all_parts { VACUUM (SKIP_LOCKED) parted; } +step analyze_specified { ANALYZE (SKIP_LOCKED) part1, part2; } +step analyze_all_parts { ANALYZE (SKIP_LOCKED) parted; } +step vac_analyze_specified { VACUUM (ANALYZE, SKIP_LOCKED) part1, part2; } +step vac_analyze_all_parts { VACUUM (ANALYZE, SKIP_LOCKED) parted; } +step vac_full_specified { VACUUM (SKIP_LOCKED, FULL) part1, part2; } +step vac_full_all_parts { VACUUM (SKIP_LOCKED, FULL) parted; } + +permutation lock_share vac_specified commit +permutation lock_share vac_all_parts commit +permutation lock_share analyze_specified commit +permutation lock_share analyze_all_parts commit +permutation lock_share vac_analyze_specified commit +permutation lock_share vac_analyze_all_parts commit +permutation lock_share vac_full_specified commit +permutation lock_share vac_full_all_parts commit +permutation lock_access_exclusive vac_specified commit +permutation lock_access_exclusive vac_all_parts commit +permutation lock_access_exclusive analyze_specified commit +permutation lock_access_exclusive analyze_all_parts commit +permutation lock_access_exclusive vac_analyze_specified commit +permutation lock_access_exclusive vac_analyze_all_parts commit +permutation lock_access_exclusive vac_full_specified commit +permutation lock_access_exclusive vac_full_all_parts commit diff --git a/src/test/regress/split_schedule b/src/test/regress/split_schedule index ff8bd7d94..871a62031 100644 --- a/src/test/regress/split_schedule +++ b/src/test/regress/split_schedule @@ -4,10 +4,13 @@ test: multi_test_helpers multi_test_helpers_superuser columnar_test_helpers test: multi_cluster_management test: multi_test_catalog_views test: tablespace +# Helpers for foreign key catalogs. +test: foreign_key_to_reference_table # Split tests go here. -#test: worker_split_binary_copy_test -#test: worker_split_text_copy_test -#test: citus_split_shard_by_split_points_negative -test: citus_sameer -#test: citus_split_shard_by_split_points +#test: citus_sameer #test: split_shard_replication_setup +test: worker_split_copy_test +test: worker_split_binary_copy_test +test: worker_split_text_copy_test +test: citus_split_shard_by_split_points_negative +test: citus_split_shard_by_split_points diff --git a/src/test/regress/sql/citus_local_tables_mx.sql b/src/test/regress/sql/citus_local_tables_mx.sql index ac3f6e4d4..c81cac2e6 100644 --- a/src/test/regress/sql/citus_local_tables_mx.sql +++ b/src/test/regress/sql/citus_local_tables_mx.sql @@ -453,7 +453,17 @@ CREATE VIEW v100 AS SELECT * FROM loc_tb; CREATE VIEW v101 AS SELECT * FROM loc_tb JOIN ref_tb USING (a); CREATE VIEW v102 AS SELECT * FROM v101; +-- a regular matview that depends on local table +CREATE MATERIALIZED VIEW matview_101 AS SELECT * from loc_tb; +-- a matview and a view that depend on the local table + each other +CREATE VIEW v103 AS SELECT * from loc_tb; +CREATE MATERIALIZED VIEW matview_102 AS SELECT * from loc_tb JOIN v103 USING (a); +CREATE OR REPLACE VIEW v103 AS SELECT * from loc_tb JOIN matview_102 USING (a); + +SET client_min_messages TO DEBUG1; +-- auto undistribute ALTER TABLE loc_tb ADD CONSTRAINT fkey FOREIGN KEY (a) references ref_tb(a); +SET client_min_messages TO WARNING; -- works fine select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100, citus_local_tables_mx.v101, citus_local_tables_mx.v102$$); @@ -464,6 +474,71 @@ select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v100$ select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v101$$); select run_command_on_workers($$SELECT count(*) from citus_local_tables_mx.v102$$); +INSERT INTO loc_tb VALUES (1), (2); +-- test a matview with columnar +CREATE MATERIALIZED VIEW matview_columnar USING COLUMNAR AS SELECT * FROM loc_tb WITH DATA; + +-- cant recreate matviews, because the size limit is set to zero, by the GUC +SET citus.max_matview_size_to_auto_recreate TO 0; +SELECT citus_add_local_table_to_metadata('loc_tb', true); +-- remove the limit +SET citus.max_matview_size_to_auto_recreate TO -1; +SELECT citus_add_local_table_to_metadata('loc_tb', true); + +-- test REFRESH MAT VIEW +SELECT * FROM matview_101 ORDER BY a; +REFRESH MATERIALIZED VIEW matview_101; +SELECT * FROM matview_101 ORDER BY a; + +-- verify columnar matview works on a table added to metadata +SELECT * FROM matview_columnar; +REFRESH MATERIALIZED VIEW matview_columnar; +SELECT * FROM matview_columnar ORDER BY a; + +-- test with partitioned tables +SET citus.use_citus_managed_tables TO ON; +CREATE TABLE parent_1 (a INT UNIQUE) PARTITION BY RANGE(a); +SET citus.use_citus_managed_tables TO OFF; + +CREATE MATERIALIZED VIEW part_matview1 as SELECT count(*) FROM parent_1 JOIN parent_1 p2 ON (true); +CREATE MATERIALIZED VIEW part_matview2 as SELECT count(*) FROM parent_1 JOIN part_matview1 on (true); + +SELECT count(*) FROM citus_local_tables_mx.part_matview1 JOIN citus_local_tables_mx.part_matview2 ON (true); + +CREATE TABLE parent_1_child_1 (a int); +CREATE TABLE parent_1_child_2 (a int); + +-- create matviews on partition tables +CREATE MATERIALIZED VIEW mv1 AS SELECT * FROM parent_1_child_1; +CREATE MATERIALIZED VIEW mv2 AS SELECT * FROM parent_1_child_2; +CREATE MATERIALIZED VIEW mv3 AS SELECT parent_1_child_2.* FROM parent_1_child_2 JOIN parent_1_child_1 USING(a); +CREATE MATERIALIZED VIEW mv4 AS SELECT * FROM mv3; + +alter table parent_1 attach partition parent_1_child_1 FOR VALUES FROM (0) TO (10) ; + +-- all matviews work +SELECT count(*) FROM citus_local_tables_mx.mv1; +SELECT count(*) FROM citus_local_tables_mx.mv2; +SELECT count(*) FROM citus_local_tables_mx.mv3; +SELECT count(*) FROM citus_local_tables_mx.mv4; + +-- recreate matviews and verify they still work +alter table parent_1 attach partition parent_1_child_2 FOR VALUES FROM (10) TO (20); + +SELECT count(*) FROM citus_local_tables_mx.mv1; +SELECT count(*) FROM citus_local_tables_mx.mv2; +SELECT count(*) FROM citus_local_tables_mx.mv3; +SELECT count(*) FROM citus_local_tables_mx.mv4; + +-- verify matviews work after undistributing +SELECT undistribute_table('parent_1'); +SELECT count(*) FROM citus_local_tables_mx.mv1; +SELECT count(*) FROM citus_local_tables_mx.mv2; +SELECT count(*) FROM citus_local_tables_mx.mv3; +SELECT count(*) FROM citus_local_tables_mx.mv4; + +-- todo: add more matview tests once 5968 and 6028 are fixed + -- cleanup at exit set client_min_messages to error; DROP SCHEMA citus_local_tables_mx CASCADE; diff --git a/src/test/regress/sql/citus_split_shard_by_split_points.sql b/src/test/regress/sql/citus_split_shard_by_split_points.sql index a66830452..8d618c3c5 100644 --- a/src/test/regress/sql/citus_split_shard_by_split_points.sql +++ b/src/test/regress/sql/citus_split_shard_by_split_points.sql @@ -1,123 +1,235 @@ --- Split Shards by Split Points tests. +/* +Citus Shard Split Test.The test is model similar to 'shard_move_constraints'. +Here is a high level overview of test plan: + 1. Create a table 'sensors' (ShardCount = 2) to be split. Add indexes and statistics on this table. + 2. Create two other tables: 'reference_table' and 'colocated_dist_table', co-located with sensors. + 3. Create Foreign key constraints between the two co-located distributed tables. + 4. Load data into the three tables. + 5. Move one of the shards for 'sensors' to test ShardMove -> Split. + 6. Trigger Split on both shards of 'sensors'. This will also split co-located tables. + 7. Move one of the split shard to test Split -> ShardMove. + 8. Split an already split shard second time on a different schema. +*/ --- Setup for Test. -CREATE SCHEMA citus_split_shard_by_split_points; -SET search_path TO citus_split_shard_by_split_points; -SET citus.shard_count TO 1; +CREATE SCHEMA "citus_split_test_schema"; +SET search_path TO "citus_split_test_schema"; +SET citus.next_shard_id TO 8981000; +SET citus.next_placement_id TO 8610000; +SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 1; -SET citus.next_shard_id TO 82060000; --- Create distributed table 'lineitem_streaming' -CREATE TABLE lineitem_streaming ( - l_orderkey bigint not null, - l_partkey integer not null, - l_suppkey integer not null, - l_linenumber integer not null, - l_quantity decimal(15, 2) not null, - l_extendedprice decimal(15, 2) not null, - l_discount decimal(15, 2) not null, - l_tax decimal(15, 2) not null, - l_returnflag char(1) not null, - l_linestatus char(1) not null, - l_shipdate date not null, - l_commitdate date not null, - l_receiptdate date not null, - l_shipinstruct char(25) not null, - l_shipmode char(10) not null, - l_comment varchar(44) not null); -SELECT create_distributed_table('lineitem_streaming', 'l_orderkey'); +-- BEGIN: Create table to split, along with other co-located tables. Add indexes, statistics etc. +CREATE TABLE sensors( + measureid integer, + eventdatetime date, + measure_data jsonb, + meaure_quantity decimal(15, 2), + measure_status char(1), + measure_comment varchar(44), + PRIMARY KEY (measureid, eventdatetime, measure_data)); +CREATE INDEX index_on_sensors ON sensors(lower(measureid::text)); +ALTER INDEX index_on_sensors ALTER COLUMN 1 SET STATISTICS 1000; +CREATE INDEX hash_index_on_sensors ON sensors USING HASH((measure_data->'IsFailed')); +CREATE INDEX index_with_include_on_sensors ON sensors ((measure_data->'IsFailed')) INCLUDE (measure_data, eventdatetime, measure_status); +CREATE STATISTICS stats_on_sensors (dependencies) ON measureid, eventdatetime FROM sensors; --- Create distributed table 'orders_streaming' -CREATE TABLE orders_streaming ( - o_orderkey bigint not null primary key, - o_custkey integer not null, - o_orderstatus char(1) not null, - o_totalprice decimal(15,2) not null, - o_orderdate date not null, - o_orderpriority char(15) not null, - o_clerk char(15) not null, - o_shippriority integer not null, - o_comment varchar(79) not null); -SELECT create_distributed_table('orders_streaming', 'o_orderkey'); +SELECT create_distributed_table('sensors', 'measureid', colocate_with:='none'); +-- END: Create table to split, along with other co-located tables. Add indexes, statistics etc. --- Insert data into 'lineitem_streaming' -\COPY lineitem_streaming FROM STDIN WITH DELIMITER '|' -99|87114|4639|1|10|11011.10|0.02|0.01|A|F|1994-05-18|1994-06-03|1994-05-23|COLLECT COD|RAIL|kages. requ -99|123766|3767|2|5|8948.80|0.02|0.07|R|F|1994-05-06|1994-05-28|1994-05-20|TAKE BACK RETURN|RAIL|ests cajole fluffily waters. blithe -99|134082|1622|3|42|46875.36|0.02|0.02|A|F|1994-04-19|1994-05-18|1994-04-20|NONE|RAIL|kages are fluffily furiously ir -99|108338|849|4|36|48467.88|0.09|0.02|A|F|1994-07-04|1994-04-17|1994-07-30|DELIVER IN PERSON|AIR|slyly. slyly e -100|62029|2030|1|28|27748.56|0.04|0.05|N|O|1998-05-08|1998-05-13|1998-06-07|COLLECT COD|TRUCK|sts haggle. slowl -100|115979|8491|2|22|43889.34|0.00|0.07|N|O|1998-06-24|1998-04-12|1998-06-29|DELIVER IN PERSON|SHIP|nto beans alongside of the fi -100|46150|8655|3|46|50422.90|0.03|0.04|N|O|1998-05-02|1998-04-10|1998-05-22|TAKE BACK RETURN|SHIP|ular accounts. even -100|38024|3031|4|14|13468.28|0.06|0.03|N|O|1998-05-22|1998-05-01|1998-06-03|COLLECT COD|MAIL|y. furiously ironic ideas gr -100|53439|955|5|37|51519.91|0.05|0.00|N|O|1998-03-06|1998-04-16|1998-03-31|TAKE BACK RETURN|TRUCK|nd the quickly s -101|118282|5816|1|49|63713.72|0.10|0.00|N|O|1996-06-21|1996-05-27|1996-06-29|DELIVER IN PERSON|REG AIR|ts -101|163334|883|2|36|50303.88|0.00|0.01|N|O|1996-05-19|1996-05-01|1996-06-04|DELIVER IN PERSON|AIR|tes. blithely pending dolphins x-ray f -101|138418|5958|3|12|17476.92|0.06|0.02|N|O|1996-03-29|1996-04-20|1996-04-12|COLLECT COD|MAIL|. quickly regular -102|88914|3931|1|37|70407.67|0.06|0.00|N|O|1997-07-24|1997-08-02|1997-08-07|TAKE BACK RETURN|SHIP|ully across the ideas. final deposit -102|169238|6787|2|34|44445.82|0.03|0.08|N|O|1997-08-09|1997-07-28|1997-08-26|TAKE BACK RETURN|SHIP|eposits cajole across -102|182321|4840|3|25|35083.00|0.01|0.01|N|O|1997-07-31|1997-07-24|1997-08-17|NONE|RAIL|bits. ironic accoun -102|61158|8677|4|15|16787.25|0.07|0.07|N|O|1997-06-02|1997-07-13|1997-06-04|DELIVER IN PERSON|SHIP|final packages. carefully even excu -103|194658|2216|1|6|10515.90|0.03|0.05|N|O|1996-10-11|1996-07-25|1996-10-28|NONE|FOB|cajole. carefully ex -103|10426|2928|2|37|49447.54|0.02|0.07|N|O|1996-09-17|1996-07-27|1996-09-20|TAKE BACK RETURN|MAIL|ies. quickly ironic requests use blithely -103|28431|8432|3|23|31266.89|0.01|0.04|N|O|1996-09-11|1996-09-18|1996-09-26|NONE|FOB|ironic accou -103|29022|4027|4|32|30432.64|0.01|0.07|N|O|1996-07-30|1996-08-06|1996-08-04|NONE|RAIL|kages doze. special, regular deposit --1995148554|112942|2943|1|9|17594.46|0.04|0.04|N|O|1996-08-03|1996-05-31|1996-08-04|DELIVER IN PERSON|TRUCK|c realms print carefully car --1686493264|15110|113|5|2|2050.22|0.03|0.08|R|F|1994-04-26|1994-03-15|1994-05-15|TAKE BACK RETURN|MAIL|e final, regular requests. carefully -\. +-- BEGIN: Create co-located distributed and reference tables. +CREATE TABLE reference_table (measureid integer PRIMARY KEY); +SELECT create_reference_table('reference_table'); --- Insert data into 'orders_streaming' -\COPY orders_streaming FROM STDIN WITH DELIMITER '|' -99|890|F|108594.87|1994-03-13|4-NOT SPECIFIED|Clerk#000000973|0|e carefully ironic packages. pending -100|1471|O|198978.27|1998-02-28|4-NOT SPECIFIED|Clerk#000000577|0|heodolites detect slyly alongside of the ent -101|280|O|118448.39|1996-03-17|3-MEDIUM|Clerk#000000419|0|ding accounts above the slyly final asymptote -102|8|O|184806.58|1997-05-09|2-HIGH|Clerk#000000596|0| slyly according to the asymptotes. carefully final packages integrate furious -103|292|O|118745.16|1996-06-20|4-NOT SPECIFIED|Clerk#000000090|0|ges. carefully unusual instructions haggle quickly regular f --1995148554|142|O|3553.15|1995-05-08|3-MEDIUM|Clerk#000000378|0|nts hinder fluffily ironic instructions. express, express excuses --1686493264|878|O|177809.13|1997-09-05|3-MEDIUM|Clerk#000000379|0|y final packages. final foxes since the quickly even -\. +CREATE TABLE colocated_dist_table (measureid integer PRIMARY KEY); +CLUSTER colocated_dist_table USING colocated_dist_table_pkey; +SELECT create_distributed_table('colocated_dist_table', 'measureid', colocate_with:='sensors'); --- Initial Row Count in Shard -SELECT COUNT(*) FROM orders_streaming; -SELECT COUNT(*) FROM lineitem_streaming; +CREATE TABLE table_with_index_rep_identity(key int NOT NULL); +CREATE UNIQUE INDEX uqx ON table_with_index_rep_identity(key); +ALTER TABLE table_with_index_rep_identity REPLICA IDENTITY USING INDEX uqx; +CLUSTER table_with_index_rep_identity USING uqx; +SELECT create_distributed_table('table_with_index_rep_identity', 'key', colocate_with:='sensors'); +-- END: Create co-located distributed and reference tables. --- Create Foreign constraint between two shards. -ALTER TABLE lineitem_streaming ADD CONSTRAINT test_constraint - FOREIGN KEY(l_orderkey) REFERENCES orders_streaming(o_orderkey); +-- BEGIN : Create Foreign key constraints. +ALTER TABLE sensors ADD CONSTRAINT fkey_table_to_dist FOREIGN KEY (measureid) REFERENCES colocated_dist_table(measureid); +-- END : Create Foreign key constraints. --- Before Split, List shard and placement data. -SELECT shard.shardid, logicalrelid, shardstorage, shardminvalue, shardmaxvalue nodename, nodeport, placementid +-- BEGIN : Load data into tables. +INSERT INTO reference_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO colocated_dist_table SELECT i FROM generate_series(0,1000)i; +INSERT INTO sensors SELECT i, '2020-01-05', '{}', 11011.10, 'A', 'I <3 Citus' FROM generate_series(0,1000)i; + +SELECT COUNT(*) FROM sensors; +SELECT COUNT(*) FROM reference_table; +SELECT COUNT(*) FROM colocated_dist_table; +-- END: Load data into tables. + +-- BEGIN : Display current state. +-- TODO(niupre): Can we refactor this to be a function? +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport FROM pg_dist_shard AS shard INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - WHERE node.noderole = 'primary' AND (logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass) + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) ORDER BY logicalrelid, shardminvalue::BIGINT; --- Trigger five way way Split on Shard. +\c - - - :worker_1_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; + +\c - - - :worker_2_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; +-- END : Display current state + +-- BEGIN : Move one shard before we split it. +\c - postgres - :master_port +SET search_path TO "citus_split_test_schema"; +SET citus.next_shard_id TO 8981007; +SET citus.defer_drop_after_shard_move TO OFF; + +SELECT citus_move_shard_placement(8981000, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='force_logical'); +-- END : Move one shard before we split it. + +-- BEGIN : Set node id variables SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END : Set node id variables -SELECT * FROM citus_split_shard_by_split_points( - 82060000, - ARRAY['268435455', '536870911', '1073741823', '1610612735'], - ARRAY[:worker_1_node, :worker_1_node, :worker_2_node, :worker_2_node, :worker_2_node], +-- BEGIN : Split two shards : One with move and One without move. +-- Perform 2 way split +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981000, + ARRAY['-1073741824'], + ARRAY[:worker_1_node, :worker_2_node], 'blocking'); - -- Row Count in Shard after Split (should be same as before) -SELECT COUNT(*) FROM orders_streaming; -SELECT COUNT(*) FROM lineitem_streaming; +-- Perform 3 way split +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981001, + ARRAY['536870911', '1610612735'], + ARRAY[:worker_1_node, :worker_1_node, :worker_2_node], + 'blocking'); +-- END : Split two shards : One with move and One without move. --- After Split, List shard and placement data. -SELECT shard.shardid, logicalrelid, shardstorage, shardminvalue, shardmaxvalue nodename, nodeport, placementid +-- BEGIN : Move a shard post split. +SELECT citus_move_shard_placement(8981007, 'localhost', :worker_1_port, 'localhost', :worker_2_port, shard_transfer_mode:='block_writes'); +-- END : Move a shard post split. + +-- BEGIN : Display current state. +-- TODO(niupre): Can we refactor this to be a function? +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport FROM pg_dist_shard AS shard INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid INNER JOIN pg_dist_node node ON placement.groupid = node.groupid - WHERE node.noderole = 'primary' AND (logicalrelid = 'lineitem_streaming'::regclass OR logicalrelid = 'orders_streaming'::regclass) + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) ORDER BY logicalrelid, shardminvalue::BIGINT; --- Cleanup for Test. -\c - - - :master_port -SET client_min_messages TO WARNING; -DROP SCHEMA citus_split_shard_by_split_points_blocking CASCADE; +\c - - - :worker_1_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; +\c - - - :worker_2_port + SET search_path TO "citus_split_test_schema", public, pg_catalog; + SET citus.show_shards_for_app_name_prefixes = '*'; + SELECT tbl.relname, fk."Constraint", fk."Definition" + FROM pg_catalog.pg_class tbl + JOIN public.table_fkeys fk on tbl.oid = fk.relid + WHERE tbl.relname like 'sensors_%' + ORDER BY 1, 2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'sensors_%' ORDER BY 1,2; + SELECT tablename, indexdef FROM pg_indexes WHERE tablename like 'table_with_index_rep_identity_%' ORDER BY 1,2; + SELECT stxname FROM pg_statistic_ext + WHERE stxnamespace IN ( + SELECT oid + FROM pg_namespace + WHERE nspname IN ('citus_split_test_schema') + ) + ORDER BY stxname ASC; +-- END : Display current state + +-- BEGIN: Should be able to change/drop constraints +\c - postgres - :master_port +SET search_path TO "citus_split_test_schema"; +ALTER INDEX index_on_sensors RENAME TO index_on_sensors_renamed; +ALTER INDEX index_on_sensors_renamed ALTER COLUMN 1 SET STATISTICS 200; +DROP STATISTICS stats_on_sensors; +DROP INDEX index_on_sensors_renamed; +ALTER TABLE sensors DROP CONSTRAINT fkey_table_to_dist; +-- END: Should be able to change/drop constraints + +-- BEGIN: Split second time on another schema +SET search_path TO public; +SET citus.next_shard_id TO 8981031; +SELECT pg_catalog.citus_split_shard_by_split_points( + 8981007, + ARRAY['-2100000000'], + ARRAY[:worker_1_node, :worker_2_node], + 'blocking'); + +SET search_path TO "citus_split_test_schema"; +SELECT shard.shardid, logicalrelid, shardminvalue, shardmaxvalue, nodename, nodeport + FROM pg_dist_shard AS shard + INNER JOIN pg_dist_placement placement ON shard.shardid = placement.shardid + INNER JOIN pg_dist_node node ON placement.groupid = node.groupid + INNER JOIN pg_catalog.pg_class cls ON shard.logicalrelid = cls.oid + WHERE node.noderole = 'primary' AND (logicalrelid = 'sensors'::regclass OR logicalrelid = 'colocated_dist_table'::regclass OR logicalrelid = 'table_with_index_rep_identity'::regclass) + ORDER BY logicalrelid, shardminvalue::BIGINT; +-- END: Split second time on another schema + +-- BEGIN: Validate Data Count +SELECT COUNT(*) FROM sensors; +SELECT COUNT(*) FROM reference_table; +SELECT COUNT(*) FROM colocated_dist_table; +-- END: Validate Data Count + +--BEGIN : Cleanup +\c - postgres - :master_port +DROP SCHEMA "citus_split_test_schema" CASCADE; +--END : Cleanup diff --git a/src/test/regress/sql/index_create.sql b/src/test/regress/sql/index_create.sql index 34ceb02a1..9e0c0805e 100644 --- a/src/test/regress/sql/index_create.sql +++ b/src/test/regress/sql/index_create.sql @@ -39,3 +39,53 @@ ALTER INDEX idx2 ALTER COLUMN 1 SET STATISTICS 1000; REINDEX INDEX idx1; ALTER TABLE test_tbl REPLICA IDENTITY USING INDEX a_index; + +-- postgres allows ALTER INDEX rename on tables, and so Citus.. +-- and, also ALTER TABLE rename on indexes.. +CREATE TABLE alter_idx_rename_test (a INT); +CREATE INDEX alter_idx_rename_test_idx ON alter_idx_rename_test (a); +CREATE TABLE alter_idx_rename_test_parted (a INT) PARTITION BY LIST (a); +CREATE INDEX alter_idx_rename_test_parted_idx ON alter_idx_rename_test_parted (a); +BEGIN; +-- rename index/table with weird syntax +ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2; +ALTER TABLE alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2; +ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2; +ALTER TABLE alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2; + +-- also, rename index/table with proper syntax +ALTER INDEX alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3; +ALTER TABLE alter_idx_rename_test_2 RENAME TO alter_idx_rename_test_3; +ALTER INDEX alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3; +ALTER TABLE alter_idx_rename_test_parted_2 RENAME TO alter_idx_rename_test_parted_3; + +SELECT 'alter_idx_rename_test_3'::regclass, 'alter_idx_rename_test_idx_3'::regclass; +SELECT 'alter_idx_rename_test_parted_3'::regclass, 'alter_idx_rename_test_parted_idx_3'::regclass; + +ROLLBACK; + +-- now, on distributed tables +SELECT create_distributed_table('alter_idx_rename_test', 'a'); +SELECT create_distributed_table('alter_idx_rename_test_parted', 'a'); + +-- rename index/table with weird syntax +ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2; +ALTER TABLE alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2; +ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2; +ALTER TABLE alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2; + +-- also, rename index/table with proper syntax +ALTER INDEX alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3; +ALTER TABLE alter_idx_rename_test_2 RENAME TO alter_idx_rename_test_3; +ALTER INDEX alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3; +ALTER TABLE alter_idx_rename_test_parted_2 RENAME TO alter_idx_rename_test_parted_3; + +SELECT 'alter_idx_rename_test_3'::regclass, 'alter_idx_rename_test_idx_3'::regclass; +SELECT 'alter_idx_rename_test_parted_3'::regclass, 'alter_idx_rename_test_parted_idx_3'::regclass; + +ALTER INDEX alter_idx_rename_test_idx_3 RENAME TO alter_idx_rename_test_idx_4; +DROP INDEX alter_idx_rename_test_idx_4; +DROP TABLE alter_idx_rename_test_3; + +DROP INDEX alter_idx_rename_test_parted_idx_3; +DROP TABLE alter_idx_rename_test_parted_3; diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index 1366f4fb4..0ae3aa9e0 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -459,24 +459,36 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '11.0-1'; SELECT * FROM multi_extension.print_extension_changes(); +-- Test downgrade to 11.0-1 from 11.0-2 +ALTER EXTENSION citus UPDATE TO '11.0-2'; +ALTER EXTENSION citus UPDATE TO '11.0-1'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + -- Snapshot of state at 11.0-2 ALTER EXTENSION citus UPDATE TO '11.0-2'; SELECT * FROM multi_extension.print_extension_changes(); --- Test downgrade script (result should be empty) -ALTER EXTENSION citus UPDATE TO '11.0-1'; +-- Test downgrade to 11.0-2 from 11.0-3 +ALTER EXTENSION citus UPDATE TO '11.0-3'; ALTER EXTENSION citus UPDATE TO '11.0-2'; +-- Should be empty result since upgrade+downgrade should be a no-op +SELECT * FROM multi_extension.print_extension_changes(); + +-- Snapshot of state at 11.0-3 +ALTER EXTENSION citus UPDATE TO '11.0-3'; +SELECT * FROM multi_extension.print_extension_changes(); + +-- Test downgrade to 11.0-3 from 11.1-1 +ALTER EXTENSION citus UPDATE TO '11.1-1'; +ALTER EXTENSION citus UPDATE TO '11.0-3'; +-- Should be empty result since upgrade+downgrade should be a no-op SELECT * FROM multi_extension.print_extension_changes(); -- Snapshot of state at 11.1-1 ALTER EXTENSION citus UPDATE TO '11.1-1'; SELECT * FROM multi_extension.print_extension_changes(); --- Test downgrade script (result should be empty) -ALTER EXTENSION citus UPDATE TO '11.0-2'; -ALTER EXTENSION citus UPDATE TO '11.1-1'; -SELECT * FROM multi_extension.print_extension_changes(); - DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/sql/multi_utilities.sql b/src/test/regress/sql/multi_utilities.sql index eb4432726..90d40d586 100644 --- a/src/test/regress/sql/multi_utilities.sql +++ b/src/test/regress/sql/multi_utilities.sql @@ -170,13 +170,10 @@ WHERE tablename = 'dustbunnies_990002' ORDER BY attname; \c - - :master_host :master_port SET citus.log_remote_commands TO ON; --- verify warning for unqualified VACUUM -VACUUM; - -- check for multiple table vacuum VACUUM dustbunnies, second_dustbunnies; --- and warning when using targeted VACUUM without DDL propagation +-- and do not propagate when using targeted VACUUM without DDL propagation SET citus.enable_ddl_propagation to false; VACUUM dustbunnies; ANALYZE dustbunnies; @@ -198,3 +195,129 @@ SELECT worker_create_or_alter_role(NULL, 'create role dontcrash', NULL); -- confirm that citus_create_restore_point works SELECT 1 FROM citus_create_restore_point('regression-test'); + +SET citus.shard_count TO 1; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 970000; + +SET citus.log_remote_commands TO OFF; + +CREATE TABLE local_vacuum_table(id int primary key, b text); + +CREATE TABLE reference_vacuum_table(id int); +SELECT create_reference_table('reference_vacuum_table'); + +CREATE TABLE distributed_vacuum_table(id int); +SELECT create_distributed_table('distributed_vacuum_table', 'id'); + +SET citus.log_remote_commands TO ON; + +-- should propagate to all workers because no table is specified +VACUUM; + +-- should not propagate because no distributed table is specified +insert into local_vacuum_table select i from generate_series(1,1000000) i; +delete from local_vacuum_table; +VACUUM local_vacuum_table; +SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') ); + +-- vacuum full deallocates pages of dead tuples whereas normal vacuum only marks dead tuples on visibility map +VACUUM FULL local_vacuum_table; +SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') ); + +-- should propagate to all workers because table is reference table +VACUUM reference_vacuum_table; + +-- should propagate to all workers because table is distributed table +VACUUM distributed_vacuum_table; + +-- only distributed_vacuum_table and reference_vacuum_table should propagate +VACUUM distributed_vacuum_table, local_vacuum_table, reference_vacuum_table; + +-- only reference_vacuum_table should propagate +VACUUM local_vacuum_table, reference_vacuum_table; + +-- vacuum (disable_page_skipping) aggressively process pages of the relation, it does not respect visibility map +VACUUM (DISABLE_PAGE_SKIPPING true) local_vacuum_table; +VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table; + +-- vacuum (index_cleanup on, parallel 1) should execute index vacuuming and index cleanup phases in parallel +insert into local_vacuum_table select i from generate_series(1,1000000) i; +delete from local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') ); + +insert into local_vacuum_table select i from generate_series(1,1000000) i; +delete from local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +SELECT pg_size_pretty( pg_total_relation_size('local_vacuum_table') ); + +-- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true) +insert into local_vacuum_table select i from generate_series(1,1000000) i; +delete from local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; +SELECT pg_total_relation_size('local_vacuum_table') as size1 \gset + +insert into local_vacuum_table select i from generate_series(1,1000000) i; +delete from local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; +SELECT pg_total_relation_size('local_vacuum_table') as size2 \gset + +SELECT :size1 > :size2 as truncate_less_size; + +-- vacuum (analyze) should be analyzing the table to generate statistics after vacuuming +select analyze_count from pg_stat_all_tables where relname = 'local_vacuum_table' or relname = 'reference_vacuum_table'; +vacuum (analyze) local_vacuum_table, reference_vacuum_table; + +-- give enough time for stats to be updated.(updated per 500ms by default) +select pg_sleep(1); + +select analyze_count from pg_stat_all_tables where relname = 'local_vacuum_table' or relname = 'reference_vacuum_table'; + +-- should not propagate because ddl propagation is disabled +SET citus.enable_ddl_propagation TO OFF; +VACUUM distributed_vacuum_table; +SET citus.enable_ddl_propagation TO ON; + +SET citus.log_remote_commands TO OFF; + +-- ANALYZE tests +CREATE TABLE local_analyze_table(id int); + +CREATE TABLE reference_analyze_table(id int); +SELECT create_reference_table('reference_analyze_table'); + +CREATE TABLE distributed_analyze_table(id int); +SELECT create_distributed_table('distributed_analyze_table', 'id'); + +CREATE TABLE loc (a INT, b INT); +CREATE TABLE dist (a INT); +SELECT create_distributed_table ('dist', 'a'); + +SET citus.log_remote_commands TO ON; + +-- should propagate to all workers because no table is specified +ANALYZE; + +-- should not propagate because no distributed table is specified +ANALYZE local_analyze_table; + +-- should propagate to all workers because table is reference table +ANALYZE reference_analyze_table; + +-- should propagate to all workers because table is distributed table +ANALYZE distributed_analyze_table; + +-- only distributed_analyze_table and reference_analyze_table should propagate +ANALYZE distributed_analyze_table, local_analyze_table, reference_analyze_table; + +-- only reference_analyze_table should propagate +ANALYZE local_analyze_table, reference_analyze_table; + +-- should not propagate because ddl propagation is disabled +SET citus.enable_ddl_propagation TO OFF; +ANALYZE distributed_analyze_table; +SET citus.enable_ddl_propagation TO ON; + +-- analyze only specified columns for corresponding tables +ANALYZE loc(b), dist(a); diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql index 2c400efca..e40d52c10 100644 --- a/src/test/regress/sql/pg14.sql +++ b/src/test/regress/sql/pg14.sql @@ -12,7 +12,7 @@ SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 980000; SET citus.shard_count TO 2; --- test the new vacuum option, process_toast +-- test the new vacuum option, process_toast and also auto option for index_cleanup CREATE TABLE t1 (a int); SELECT create_distributed_table('t1','a'); SET citus.log_remote_commands TO ON; @@ -21,6 +21,34 @@ VACUUM (FULL, PROCESS_TOAST) t1; VACUUM (FULL, PROCESS_TOAST true) t1; VACUUM (FULL, PROCESS_TOAST false) t1; VACUUM (PROCESS_TOAST false) t1; +VACUUM (INDEX_CLEANUP AUTO) t1; +VACUUM (INDEX_CLEANUP) t1; +VACUUM (INDEX_CLEANUP AuTo) t1; +VACUUM (INDEX_CLEANUP false) t1; +VACUUM (INDEX_CLEANUP true) t1; +VACUUM (INDEX_CLEANUP "AUTOX") t1; +VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1; +VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1; + +-- vacuum (process_toast true) should be vacuuming toast tables (default is true) +CREATE TABLE local_vacuum_table(name text); +select reltoastrelid from pg_class where relname='local_vacuum_table' +\gset + +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; +SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; + +-- vacuum (process_toast false) should not be vacuuming toast tables (default is true) +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; +SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; + +DROP TABLE local_vacuum_table; SET citus.log_remote_commands TO OFF; create table dist(a int, b int); @@ -713,6 +741,30 @@ CREATE STATISTICS ctlt1_expr_stat ON (a || b) FROM ctlt1; CREATE TABLE ctlt_all (LIKE ctlt1 INCLUDING ALL); SELECT create_distributed_table('ctlt1', 'a'); CREATE TABLE ctlt_all_2 (LIKE ctlt1 INCLUDING ALL); + +CREATE TABLE compression_and_defaults ( + data text COMPRESSION lz4 DEFAULT '"{}"'::text COLLATE "C" NOT NULL PRIMARY KEY, + rev text +) +WITH ( + autovacuum_vacuum_scale_factor='0.01', + fillfactor='75' +); + +SELECT create_distributed_table('compression_and_defaults', 'data', colocate_with:='none'); + +CREATE TABLE compression_and_generated_col ( + data text COMPRESSION lz4 GENERATED ALWAYS AS (rev || '{]') STORED COLLATE "C" NOT NULL, + rev text +) +WITH ( + autovacuum_vacuum_scale_factor='0.01', + fillfactor='75' +); +SELECT create_distributed_table('compression_and_generated_col', 'rev', colocate_with:='none'); + +DROP TABLE compression_and_defaults, compression_and_generated_col; + -- cleanup set client_min_messages to error; drop extension postgres_fdw cascade; diff --git a/src/test/regress/sql/worker_split_copy_test.sql b/src/test/regress/sql/worker_split_copy_test.sql new file mode 100644 index 000000000..324cc87c2 --- /dev/null +++ b/src/test/regress/sql/worker_split_copy_test.sql @@ -0,0 +1,67 @@ +CREATE SCHEMA worker_split_copy_test; +SET search_path TO worker_split_copy_test; +SET citus.shard_count TO 2; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 81070000; + +-- BEGIN: Create distributed table and insert data. + +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table"(id int primary key, value char); +SELECT create_distributed_table('"test !/ \n _""dist_123_table"', 'id'); + +INSERT INTO "test !/ \n _""dist_123_table" (id, value) (SELECT g.id, 'N' FROM generate_series(1, 1000) AS g(id)); + +-- END: Create distributed table and insert data. + +-- BEGIN: Switch to Worker1, Create target shards in worker for local 2-way split copy. +\c - - - :worker_1_port +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070015"(id int primary key, value char); +CREATE TABLE worker_split_copy_test."test !/ \n _""dist_123_table_81070016"(id int primary key, value char); +-- End: Switch to Worker1, Create target shards in worker for local 2-way split copy. + +-- BEGIN: List row count for source shard and targets shard in Worker1. +\c - - - :worker_1_port +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070000"; +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015"; +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016"; + +\c - - - :worker_2_port +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070001"; +-- END: List row count for source shard and targets shard in Worker1. + +-- BEGIN: Set worker_1_node and worker_2_node +\c - - - :worker_1_port +SELECT nodeid AS worker_1_node FROM pg_dist_node WHERE nodeport=:worker_1_port \gset +SELECT nodeid AS worker_2_node FROM pg_dist_node WHERE nodeport=:worker_2_port \gset +-- END: Set worker_1_node and worker_2_node + +-- BEGIN: Trigger 2-way local shard split copy. +-- Ensure we will perform text copy. +SET citus.enable_binary_protocol = false; +SELECT * from worker_split_copy( + 81070000, -- source shard id to copy + ARRAY[ + -- split copy info for split children 1 + ROW(81070015, -- destination shard id + -2147483648, -- split range begin + -1073741824, --split range end + :worker_1_node)::citus.split_copy_info, + -- split copy info for split children 2 + ROW(81070016, --destination shard id + -1073741823, --split range begin + -1, --split range end + :worker_1_node)::citus.split_copy_info + ] + ); +-- END: Trigger 2-way local shard split copy. + +-- BEGIN: List updated row count for local targets shard. +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070015"; +SELECT COUNT(*) FROM worker_split_copy_test."test !/ \n _""dist_123_table_81070016"; +-- END: List updated row count for local targets shard. + +-- BEGIN: CLEANUP. +\c - - - :master_port +SET client_min_messages TO WARNING; +DROP SCHEMA worker_split_copy_test CASCADE; +-- END: CLEANUP.