From 27ba19f7e10654bd29d5b7ec6befbc7e4e6a4244 Mon Sep 17 00:00:00 2001 From: Marco Slot Date: Sun, 10 Oct 2021 20:03:20 +0200 Subject: [PATCH 1/2] Fix a flappy test in drop_column_partitioned_table --- src/test/regress/expected/drop_column_partitioned_table.out | 2 +- src/test/regress/sql/drop_column_partitioned_table.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/drop_column_partitioned_table.out b/src/test/regress/expected/drop_column_partitioned_table.out index c91fcd2bd..8a60cc854 100644 --- a/src/test/regress/expected/drop_column_partitioned_table.out +++ b/src/test/regress/expected/drop_column_partitioned_table.out @@ -146,7 +146,7 @@ WITH all_shardids AS (SELECT * FROM sensors_shardid UNION SELECT * FROM sensors_2000_shardid UNION SELECT * FROM sensors_2001_shardid UNION SELECT * FROM sensors_2002_shardid UNION SELECT * FROM sensors_2003_shardid UNION SELECT * FROM sensors_2004_shardid) -SELECT logicalrelid, shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid IN (SELECT * FROM all_shardids); +SELECT logicalrelid, shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid IN (SELECT * FROM all_shardids) ORDER BY 1,2,3,4; logicalrelid | shardid | shardminvalue | shardmaxvalue --------------------------------------------------------------------- sensors | 2580001 | -1073741824 | -1 diff --git a/src/test/regress/sql/drop_column_partitioned_table.sql b/src/test/regress/sql/drop_column_partitioned_table.sql index 4325d3db4..991c6e60a 100644 --- a/src/test/regress/sql/drop_column_partitioned_table.sql +++ b/src/test/regress/sql/drop_column_partitioned_table.sql @@ -98,7 +98,7 @@ WITH all_shardids AS (SELECT * FROM sensors_shardid UNION SELECT * FROM sensors_2000_shardid UNION SELECT * FROM sensors_2001_shardid UNION SELECT * FROM sensors_2002_shardid UNION SELECT * FROM sensors_2003_shardid UNION SELECT * FROM sensors_2004_shardid) -SELECT logicalrelid, shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid IN (SELECT * FROM all_shardids); +SELECT logicalrelid, shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid IN (SELECT * FROM all_shardids) ORDER BY 1,2,3,4; VACUUM ANALYZE sensors, sensors_2000, sensors_2001, sensors_2002, sensors_2003; From fba93df4b09d88c89fe87b92194de8b53dd2bcc9 Mon Sep 17 00:00:00 2001 From: Marco Slot Date: Thu, 21 Oct 2021 15:17:01 +0200 Subject: [PATCH 2/2] Remove copy into new append shard logic --- src/backend/distributed/commands/multi_copy.c | 437 ++------- src/include/distributed/commands/multi_copy.h | 6 + src/test/regress/.gitignore | 3 + src/test/regress/Makefile | 2 +- src/test/regress/expected/.gitignore | 2 - .../regress/expected/citus_local_tables.out | 2 +- .../citus_update_table_statistics.out | 40 +- .../drop_column_partitioned_table.out | 9 +- .../expected/isolation_append_copy_vs_all.out | 827 ------------------ .../isolation_dump_global_wait_edges_0.out | 104 +++ .../expected/isolation_range_copy_vs_all.out | 213 +---- .../expected/isolation_select_vs_all.out | 630 ++++++------- .../expected/isolation_truncate_vs_all.out | 142 +-- .../multi_alter_table_add_constraints.out | 17 +- .../regress/expected/multi_create_table.out | 12 + .../multi_null_minmax_value_pruning.out | 106 +-- .../multi_repartition_join_pruning.out | 66 +- .../regress/expected/multi_schema_support.out | 7 +- .../expected/non_colocated_join_order.out | 52 -- src/test/regress/expected/single_node.out | 13 + src/test/regress/expected/subquery_append.out | 21 +- .../regress/expected/upgrade_basic_after.out | 28 +- .../regress/expected/upgrade_basic_before.out | 14 +- .../upgrade_pg_dist_object_test_after.out | 2 +- .../input/multi_agg_type_conversion.source | 4 +- .../input/multi_alter_table_statements.source | 14 +- .../input/multi_append_table_to_shard.source | 6 +- src/test/regress/input/multi_copy.source | 122 +-- .../regress/input/multi_create_schema.source | 15 - src/test/regress/input/multi_load_data.source | 4 +- .../input/multi_load_large_records.source | 23 - .../regress/input/multi_load_more_data.source | 12 +- src/test/regress/isolation_schedule | 1 - src/test/regress/multi_1_schedule | 12 - src/test/regress/multi_schedule | 2 +- src/test/regress/multi_schedule_hyperscale | 3 - .../multi_schedule_hyperscale_superuser | 10 +- .../output/multi_agg_type_conversion.source | 3 +- .../multi_alter_table_statements.source | 482 +++++----- .../output/multi_append_table_to_shard.source | 8 +- src/test/regress/output/multi_copy.source | 731 ++++++++-------- .../regress/output/multi_create_schema.source | 20 - .../regress/output/multi_load_data.source | 4 +- .../output/multi_load_large_records.source | 26 - .../output/multi_load_more_data.source | 17 +- .../spec/isolation_append_copy_vs_all.spec | 118 --- .../spec/isolation_master_append_table.spec | 5 +- .../spec/isolation_range_copy_vs_all.spec | 17 +- .../regress/spec/isolation_select_vs_all.spec | 4 +- .../spec/isolation_truncate_vs_all.spec | 4 +- src/test/regress/sql/.gitignore | 2 - .../sql/citus_update_table_statistics.sql | 8 +- .../sql/drop_column_partitioned_table.sql | 9 +- .../sql/multi_alter_table_add_constraints.sql | 15 +- src/test/regress/sql/multi_create_table.sql | 2 + .../sql/multi_null_minmax_value_pruning.sql | 49 +- .../sql/multi_repartition_join_pruning.sql | 4 +- src/test/regress/sql/multi_schema_support.sql | 5 +- .../regress/sql/non_colocated_join_order.sql | 70 -- src/test/regress/sql/single_node.sql | 7 + src/test/regress/sql/subquery_append.sql | 11 +- src/test/regress/sql/upgrade_basic_after.sql | 12 +- src/test/regress/sql/upgrade_basic_before.sql | 12 +- 63 files changed, 1647 insertions(+), 2981 deletions(-) create mode 100644 src/test/regress/expected/isolation_dump_global_wait_edges_0.out delete mode 100644 src/test/regress/expected/non_colocated_join_order.out delete mode 100644 src/test/regress/input/multi_create_schema.source delete mode 100644 src/test/regress/input/multi_load_large_records.source delete mode 100644 src/test/regress/output/multi_create_schema.source delete mode 100644 src/test/regress/output/multi_load_large_records.source delete mode 100644 src/test/regress/spec/isolation_append_copy_vs_all.spec delete mode 100644 src/test/regress/sql/non_colocated_join_order.sql diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 7e99fcf9b..d683a2792 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -117,6 +117,9 @@ /* constant used in binary protocol */ static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0"; +/* custom Citus option for appending to a shard */ +#define APPEND_TO_SHARD_OPTION "append_to_shard" + /* * Data size threshold to switch over the active placement for a connection. * If this is too low, overhead of starting COPY commands will hurt the @@ -239,11 +242,6 @@ typedef enum LocalCopyStatus /* Local functions forward declarations */ static void CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag); -static void CopyToNewShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag, - Oid relationId); -static void OpenCopyConnectionsForNewShards(CopyStmt *copyStatement, - ShardConnections *shardConnections, - bool useBinaryCopyFormat); static List * RemoveOptionFromList(List *optionList, char *optionName); static bool BinaryOutputFunctionDefined(Oid typeId); static bool BinaryInputFunctionDefined(Oid typeId); @@ -257,9 +255,6 @@ static void SendCopyDataToPlacement(StringInfo dataBuffer, int64 shardId, MultiConnection *connection); static void ReportCopyError(MultiConnection *connection, PGresult *result); static uint32 AvailableColumnCount(TupleDesc tupleDescriptor); -static int64 StartCopyToNewShard(ShardConnections *shardConnections, - CopyStmt *copyStatement, bool useBinaryCopyFormat); -static int64 CreateEmptyShard(char *relationName); static Oid TypeForColumnName(Oid relationId, TupleDesc tupleDescriptor, char *columnName); static Oid * TypeArrayFromTupleDescriptor(TupleDesc tupleDescriptor); @@ -332,6 +327,7 @@ static void RemovePlacementStateFromCopyConnectionStateBuffer(CopyConnectionStat connectionState, CopyPlacementState * placementState); +static uint64 ProcessAppendToShardOption(Oid relationId, CopyStmt *copyStatement); static uint64 ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *columnNulls); @@ -403,14 +399,11 @@ CitusCopyFrom(CopyStmt *copyStatement, QueryCompletionCompat *completionTag) if (IsCitusTableTypeCacheEntry(cacheEntry, HASH_DISTRIBUTED) || IsCitusTableTypeCacheEntry(cacheEntry, RANGE_DISTRIBUTED) || + IsCitusTableTypeCacheEntry(cacheEntry, APPEND_DISTRIBUTED) || IsCitusTableTypeCacheEntry(cacheEntry, CITUS_TABLE_WITH_NO_DIST_KEY)) { CopyToExistingShards(copyStatement, completionTag); } - else if (IsCitusTableTypeCacheEntry(cacheEntry, APPEND_DISTRIBUTED)) - { - CopyToNewShards(copyStatement, completionTag, relationId); - } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -508,6 +501,14 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(tableId, columnNameList, partitionColumnIndex, executorState, NULL); + + /* if the user specified an explicit append-to_shard option, write to it */ + uint64 appendShardId = ProcessAppendToShardOption(tableId, copyStatement); + if (appendShardId != INVALID_SHARD_ID) + { + copyDest->appendShardId = appendShardId; + } + DestReceiver *dest = (DestReceiver *) copyDest; dest->rStartup(dest, 0, tupleDescriptor); @@ -609,196 +610,6 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT } -/* - * CopyToNewShards implements the COPY table_name FROM ... for append-partitioned - * tables where we create new shards into which to copy rows. - */ -static void -CopyToNewShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag, Oid - relationId) -{ - /* allocate column values and nulls arrays */ - Relation distributedRelation = table_open(relationId, RowExclusiveLock); - TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation); - uint32 columnCount = tupleDescriptor->natts; - Datum *columnValues = palloc0(columnCount * sizeof(Datum)); - bool *columnNulls = palloc0(columnCount * sizeof(bool)); - - EState *executorState = CreateExecutorState(); - MemoryContext executorTupleContext = GetPerTupleMemoryContext(executorState); - ExprContext *executorExpressionContext = GetPerTupleExprContext(executorState); - - const char *delimiterCharacter = "\t"; - const char *nullPrintCharacter = "\\N"; - - ErrorContextCallback errorCallback; - - int64 currentShardId = INVALID_SHARD_ID; - uint64 shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L; - uint64 copiedDataSizeInBytes = 0; - uint64 processedRowCount = 0; - - ShardConnections *shardConnections = - (ShardConnections *) palloc0(sizeof(ShardConnections)); - - /* initialize copy state to read from COPY data source */ - CopyFromState copyState = BeginCopyFrom_compat(NULL, - distributedRelation, - NULL, - copyStatement->filename, - copyStatement->is_program, - NULL, - copyStatement->attlist, - copyStatement->options); - - CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); - copyOutState->delim = (char *) delimiterCharacter; - copyOutState->null_print = (char *) nullPrintCharacter; - copyOutState->null_print_client = (char *) nullPrintCharacter; - copyOutState->binary = CanUseBinaryCopyFormat(tupleDescriptor); - copyOutState->fe_msgbuf = makeStringInfo(); - copyOutState->rowcontext = executorTupleContext; - - FmgrInfo *columnOutputFunctions = ColumnOutputFunctions(tupleDescriptor, - copyOutState->binary); - - /* set up callback to identify error line number */ - errorCallback.callback = CopyFromErrorCallback; - errorCallback.arg = (void *) copyState; - errorCallback.previous = error_context_stack; - - /* - * From here on we use copyStatement as the template for the command - * that we send to workers. This command does not have an attribute - * list since NextCopyFrom will generate a value for all columns. - * We also strip options. - */ - copyStatement->attlist = NIL; - copyStatement->options = NIL; - - if (copyOutState->binary) - { - DefElem *binaryFormatOption = - makeDefElem("format", (Node *) makeString("binary"), -1); - - copyStatement->options = lappend(copyStatement->options, binaryFormatOption); - } - - while (true) - { - ResetPerTupleExprContext(executorState); - - /* switch to tuple memory context and start showing line number in errors */ - error_context_stack = &errorCallback; - MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext); - - /* parse a row from the input */ - bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext, - columnValues, columnNulls); - - if (!nextRowFound) - { - /* switch to regular memory context and stop showing line number in errors */ - MemoryContextSwitchTo(oldContext); - error_context_stack = errorCallback.previous; - break; - } - - CHECK_FOR_INTERRUPTS(); - - /* switch to regular memory context and stop showing line number in errors */ - MemoryContextSwitchTo(oldContext); - error_context_stack = errorCallback.previous; - - /* - * If copied data size is zero, this means either this is the first - * line in the copy or we just filled the previous shard up to its - * capacity. Either way, we need to create a new shard and - * start copying new rows into it. - */ - if (copiedDataSizeInBytes == 0) - { - /* create shard and open connections to shard placements */ - currentShardId = StartCopyToNewShard(shardConnections, copyStatement, - copyOutState->binary); - - /* send copy binary headers to shard placements */ - if (copyOutState->binary) - { - SendCopyBinaryHeaders(copyOutState, currentShardId, - shardConnections->connectionList); - } - } - - /* replicate row to shard placements */ - resetStringInfo(copyOutState->fe_msgbuf); - AppendCopyRowData(columnValues, columnNulls, tupleDescriptor, - copyOutState, columnOutputFunctions, NULL); - SendCopyDataToAll(copyOutState->fe_msgbuf, currentShardId, - shardConnections->connectionList); - - uint64 messageBufferSize = copyOutState->fe_msgbuf->len; - copiedDataSizeInBytes = copiedDataSizeInBytes + messageBufferSize; - - /* - * If we filled up this shard to its capacity, send copy binary footers - * to shard placements, and update shard statistics. - */ - if (copiedDataSizeInBytes > shardMaxSizeInBytes) - { - Assert(currentShardId != INVALID_SHARD_ID); - - if (copyOutState->binary) - { - SendCopyBinaryFooters(copyOutState, currentShardId, - shardConnections->connectionList); - } - - EndRemoteCopy(currentShardId, shardConnections->connectionList); - UpdateShardStatistics(shardConnections->shardId); - - copiedDataSizeInBytes = 0; - currentShardId = INVALID_SHARD_ID; - } - - processedRowCount += 1; - -#if PG_VERSION_NUM >= PG_VERSION_14 - pgstat_progress_update_param(PROGRESS_COPY_TUPLES_PROCESSED, processedRowCount); -#endif - } - - /* - * For the last shard, send copy binary footers to shard placements, - * and update shard statistics. If no row is send, there is no shard - * to finalize the copy command. - */ - if (copiedDataSizeInBytes > 0) - { - Assert(currentShardId != INVALID_SHARD_ID); - - if (copyOutState->binary) - { - SendCopyBinaryFooters(copyOutState, currentShardId, - shardConnections->connectionList); - } - EndRemoteCopy(currentShardId, shardConnections->connectionList); - UpdateShardStatistics(shardConnections->shardId); - } - - EndCopyFrom(copyState); - table_close(distributedRelation, NoLock); - - /* check for cancellation one last time before returning */ - CHECK_FOR_INTERRUPTS(); - - if (completionTag != NULL) - { - CompleteCopyQueryTagCompat(completionTag, processedRowCount); - } -} - - static void CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount) { @@ -839,105 +650,6 @@ RemoveOptionFromList(List *optionList, char *optionName) } -/* - * OpenCopyConnectionsForNewShards opens a connection for each placement of a shard and - * starts a COPY transaction if necessary. If a connection cannot be opened, - * then the transaction is rollbacked. - */ -static void -OpenCopyConnectionsForNewShards(CopyStmt *copyStatement, - ShardConnections *shardConnections, - bool useBinaryCopyFormat) -{ - int failedPlacementCount = 0; - ListCell *placementCell = NULL; - List *connectionList = NULL; - int64 shardId = shardConnections->shardId; - bool raiseInterrupts = true; - MemoryContext localContext = - AllocSetContextCreateExtended(CurrentMemoryContext, - "OpenCopyConnectionsForNewShards", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); - - - /* release active placement list at the end of this function */ - MemoryContext oldContext = MemoryContextSwitchTo(localContext); - - List *activePlacementList = ActiveShardPlacementList(shardId); - - MemoryContextSwitchTo(oldContext); - - foreach(placementCell, activePlacementList) - { - ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell); - char *nodeUser = CurrentUserName(); - uint32 connectionFlags = FOR_DML; - - /* - * For hash partitioned tables, connection establishment happens in - * CopyGetPlacementConnection(). - */ - Assert(placement->partitionMethod != DISTRIBUTE_BY_HASH); - - MultiConnection *connection = GetPlacementConnection(connectionFlags, placement, - nodeUser); - - /* - * This code-path doesn't support optional connections, so we don't expect - * NULL connections. - */ - Assert(connection != NULL); - - if (PQstatus(connection->pgConn) != CONNECTION_OK) - { - ReportConnectionError(connection, ERROR); - } - - /* - * Errors are supposed to cause immediate aborts (i.e. we don't - * want to/can't invalidate placements), mark the connection as - * critical so later errors cause failures. - */ - MarkRemoteTransactionCritical(connection); - ClaimConnectionExclusively(connection); - RemoteTransactionBeginIfNecessary(connection); - - StringInfo copyCommand = ConstructCopyStatement(copyStatement, - shardConnections->shardId); - - if (!SendRemoteCommand(connection, copyCommand->data)) - { - ReportConnectionError(connection, ERROR); - } - PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); - if (PQresultStatus(result) != PGRES_COPY_IN) - { - ReportResultError(connection, result, ERROR); - } - PQclear(result); - connectionList = lappend(connectionList, connection); - } - - /* if all placements failed, error out */ - if (failedPlacementCount == list_length(activePlacementList)) - { - ereport(ERROR, (errmsg("could not connect to any active placements"))); - } - - /* - * We should just error out and code execution should - * never reach to this point. This is the case for all tables. - */ - Assert(failedPlacementCount == 0); - - shardConnections->connectionList = connectionList; - - MemoryContextReset(localContext); -} - - /* * CanUseBinaryCopyFormat iterates over columns of the relation and looks for a * column whose type is array of user-defined type or composite type. If it finds @@ -1830,48 +1542,6 @@ AppendCopyBinaryFooters(CopyOutState footerOutputState) } -/* - * StartCopyToNewShard creates a new shard and related shard placements and - * opens connections to shard placements. - */ -static int64 -StartCopyToNewShard(ShardConnections *shardConnections, CopyStmt *copyStatement, - bool useBinaryCopyFormat) -{ - char *relationName = copyStatement->relation->relname; - char *schemaName = copyStatement->relation->schemaname; - char *qualifiedName = quote_qualified_identifier(schemaName, relationName); - int64 shardId = CreateEmptyShard(qualifiedName); - - shardConnections->shardId = shardId; - - shardConnections->connectionList = NIL; - - /* connect to shards placements and start transactions */ - OpenCopyConnectionsForNewShards(copyStatement, shardConnections, - useBinaryCopyFormat); - - return shardId; -} - - -/* - * CreateEmptyShard creates a new shard and related shard placements from the - * local master node. - */ -static int64 -CreateEmptyShard(char *relationName) -{ - text *relationNameText = cstring_to_text(relationName); - Datum relationNameDatum = PointerGetDatum(relationNameText); - Datum shardIdDatum = DirectFunctionCall1(master_create_empty_shard, - relationNameDatum); - int64 shardId = DatumGetInt64(shardIdDatum); - - return shardId; -} - - /* *INDENT-OFF* */ @@ -2283,14 +1953,17 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation, } /* error if any shard missing min/max values */ - if (IsCitusTableTypeCacheEntry(cacheEntry, DISTRIBUTED_TABLE) && - cacheEntry->hasUninitializedShardInterval) + if (cacheEntry->hasUninitializedShardInterval) { - ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("could not start copy"), - errdetail("Distributed relation \"%s\" has shards " - "with missing shardminvalue/shardmaxvalue.", - relationName))); + if (IsCitusTableTypeCacheEntry(cacheEntry, HASH_DISTRIBUTED) || + IsCitusTableTypeCacheEntry(cacheEntry, RANGE_DISTRIBUTED)) + { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("could not start copy"), + errdetail("Distributed relation \"%s\" has shards " + "with missing shardminvalue/shardmaxvalue.", + relationName))); + } } /* prevent concurrent placement changes and non-commutative DML statements */ @@ -2670,6 +2343,58 @@ RemovePlacementStateFromCopyConnectionStateBuffer(CopyConnectionState *connectio } +/* + * ProcessAppendToShardOption returns the value of append_to_shard if set, + * and removes the option from the options list. + */ +static uint64 +ProcessAppendToShardOption(Oid relationId, CopyStmt *copyStatement) +{ + uint64 appendShardId = INVALID_SHARD_ID; + bool appendToShardSet = false; + + DefElem *defel = NULL; + foreach_ptr(defel, copyStatement->options) + { + if (strncmp(defel->defname, APPEND_TO_SHARD_OPTION, NAMEDATALEN) == 0) + { + appendShardId = defGetInt64(defel); + appendToShardSet = true; + break; + } + } + + if (appendToShardSet) + { + if (!IsCitusTableType(relationId, APPEND_DISTRIBUTED)) + { + ereport(ERROR, (errmsg(APPEND_TO_SHARD_OPTION " is only valid for " + "append-distributed tables"))); + } + + /* throws an error if shard does not exist */ + ShardInterval *shardInterval = LoadShardInterval(appendShardId); + + /* also check whether shard belongs to table */ + if (shardInterval->relationId != relationId) + { + ereport(ERROR, (errmsg("shard " UINT64_FORMAT " does not belong to table %s", + appendShardId, get_rel_name(relationId)))); + } + + copyStatement->options = + RemoveOptionFromList(copyStatement->options, APPEND_TO_SHARD_OPTION); + } + else if (IsCitusTableType(relationId, APPEND_DISTRIBUTED)) + { + ereport(ERROR, (errmsg("COPY into append-distributed table requires using the " + APPEND_TO_SHARD_OPTION " option"))); + } + + return appendShardId; +} + + /* * ContainsLocalPlacement returns true if the current node has * a local placement for the given shard id. @@ -2703,6 +2428,13 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu int partitionColumnIndex = copyDest->partitionColumnIndex; Datum partitionColumnValue = 0; CopyCoercionData *columnCoercionPaths = copyDest->columnCoercionPaths; + CitusTableCacheEntry *cacheEntry = + GetCitusTableCacheEntry(copyDest->distributedRelationId); + + if (IsCitusTableTypeCacheEntry(cacheEntry, APPEND_DISTRIBUTED)) + { + return copyDest->appendShardId; + } /* * Find the partition column value and corresponding shard interval @@ -2743,8 +2475,6 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu * For reference table, this function blindly returns the tables single * shard. */ - CitusTableCacheEntry *cacheEntry = - GetCitusTableCacheEntry(copyDest->distributedRelationId); ShardInterval *shardInterval = FindShardInterval(partitionColumnValue, cacheEntry); if (shardInterval == NULL) { @@ -3230,7 +2960,8 @@ CitusCopyTo(CopyStmt *copyStatement, QueryCompletionCompat *completionTag) if (shardIntervalCell == list_head(shardIntervalList)) { /* remove header after the first shard */ - RemoveOptionFromList(copyStatement->options, "header"); + copyStatement->options = + RemoveOptionFromList(copyStatement->options, "header"); } } diff --git a/src/include/distributed/commands/multi_copy.h b/src/include/distributed/commands/multi_copy.h index 105c05b98..f7a50644d 100644 --- a/src/include/distributed/commands/multi_copy.h +++ b/src/include/distributed/commands/multi_copy.h @@ -140,6 +140,12 @@ typedef struct CitusCopyDestReceiver * files as if they are shards. */ char *colocatedIntermediateResultIdPrefix; + + /* + * When copying into append-partitioned tables, the destination shard is chosen + * upfront. + */ + uint64 appendShardId; } CitusCopyDestReceiver; diff --git a/src/test/regress/.gitignore b/src/test/regress/.gitignore index 0458dc3ad..8bbe973b4 100644 --- a/src/test/regress/.gitignore +++ b/src/test/regress/.gitignore @@ -25,3 +25,6 @@ # python *.pyc + +# core dumps +core diff --git a/src/test/regress/Makefile b/src/test/regress/Makefile index 5187c05d4..6636a082a 100644 --- a/src/test/regress/Makefile +++ b/src/test/regress/Makefile @@ -43,7 +43,7 @@ output_files := $(patsubst $(citus_abs_srcdir)/output/%.source,expected/%.out, $ # intermediate, for muscle memory backward compatibility. check: check-full # check-full triggers all tests that ought to be run routinely -check-full: check-multi check-multi-mx check-worker check-operations check-follower-cluster check-failure +check-full: check-multi check-multi-mx check-multi-1 check-worker check-operations check-follower-cluster check-isolation check-failure ISOLATION_DEPDIR=.deps/isolation diff --git a/src/test/regress/expected/.gitignore b/src/test/regress/expected/.gitignore index 5ba403db7..b42f7fe1b 100644 --- a/src/test/regress/expected/.gitignore +++ b/src/test/regress/expected/.gitignore @@ -10,10 +10,8 @@ /multi_behavioral_analytics_create_table_superuser.out /multi_complex_count_distinct.out /multi_copy.out -/multi_create_schema.out /multi_load_data.out /multi_load_data_superuser.out -/multi_load_large_records.out /multi_load_more_data.out /multi_mx_copy_data.out /multi_outer_join.out diff --git a/src/test/regress/expected/citus_local_tables.out b/src/test/regress/expected/citus_local_tables.out index 871425744..207f51db7 100644 --- a/src/test/regress/expected/citus_local_tables.out +++ b/src/test/regress/expected/citus_local_tables.out @@ -401,7 +401,7 @@ SELECT citus_add_local_table_to_metadata('"LocalTabLE.1!?!9012345678901234567890 -- create some objects after citus_add_local_table_to_metadata CREATE INDEX "my!Index2" ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id) WITH ( fillfactor = 90 ) WHERE id < 20; NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456" -NOTICE: executing the command locally: CREATE INDEX "my!Index2_1504022" ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) WITH (fillfactor = '90' )WHERE (id < 20) +NOTICE: executing the command locally: CREATE INDEX "my!Index2_1504022" ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) WITH (fillfactor = '90' ) WHERE (id < 20) CREATE UNIQUE INDEX uniqueIndex2 ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id); NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456" NOTICE: executing the command locally: CREATE UNIQUE INDEX uniqueindex2_1504022 ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) diff --git a/src/test/regress/expected/citus_update_table_statistics.out b/src/test/regress/expected/citus_update_table_statistics.out index 617296c21..d512369d3 100644 --- a/src/test/regress/expected/citus_update_table_statistics.out +++ b/src/test/regress/expected/citus_update_table_statistics.out @@ -22,10 +22,10 @@ SELECT create_distributed_table('test_table_statistics_hash', 'id'); INSERT INTO test_table_statistics_hash SELECT i FROM generate_series(0, 10000)i; -- originally shardlength (size of the shard) is zero SELECT - ds.logicalrelid::regclass::text AS tablename, - ds.shardid AS shardid, + ds.logicalrelid::regclass::text AS tablename, + ds.shardid AS shardid, dsp.placementid AS placementid, - shard_name(ds.logicalrelid, ds.shardid) AS shardname, + shard_name(ds.logicalrelid, ds.shardid) AS shardname, ds.shardminvalue AS shardminvalue, ds.shardmaxvalue AS shardmaxvalue FROM pg_dist_shard ds JOIN pg_dist_shard_placement dsp USING (shardid) @@ -82,10 +82,10 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx RESET citus.log_remote_commands; RESET citus.multi_shard_modify_mode; SELECT - ds.logicalrelid::regclass::text AS tablename, - ds.shardid AS shardid, + ds.logicalrelid::regclass::text AS tablename, + ds.shardid AS shardid, dsp.placementid AS placementid, - shard_name(ds.logicalrelid, ds.shardid) AS shardname, + shard_name(ds.logicalrelid, ds.shardid) AS shardname, ds.shardminvalue as shardminvalue, ds.shardmaxvalue as shardmaxvalue FROM pg_dist_shard ds JOIN pg_dist_shard_placement dsp USING (shardid) @@ -120,14 +120,16 @@ SELECT create_distributed_table('test_table_statistics_append', 'id', 'append'); (1 row) -COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH CSV; -COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH CSV; --- originally shardminvalue and shardmaxvalue will be 0,3 and 4, 7 +SELECT master_create_empty_shard('test_table_statistics_append') AS shardid1 \gset +SELECT master_create_empty_shard('test_table_statistics_append') AS shardid2 \gset +COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH (format 'csv', append_to_shard :shardid1); +COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH (format 'csv', append_to_shard :shardid2); +-- shardminvalue and shardmaxvalue are NULL SELECT - ds.logicalrelid::regclass::text AS tablename, - ds.shardid AS shardid, + ds.logicalrelid::regclass::text AS tablename, + ds.shardid AS shardid, dsp.placementid AS placementid, - shard_name(ds.logicalrelid, ds.shardid) AS shardname, + shard_name(ds.logicalrelid, ds.shardid) AS shardname, ds.shardminvalue as shardminvalue, ds.shardmaxvalue as shardmaxvalue FROM pg_dist_shard ds JOIN pg_dist_shard_placement dsp USING (shardid) @@ -135,10 +137,10 @@ WHERE ds.logicalrelid::regclass::text in ('test_table_statistics_append') ORDER BY 2, 3; tablename | shardid | placementid | shardname | shardminvalue | shardmaxvalue --------------------------------------------------------------------- - test_table_statistics_append | 981008 | 982016 | test_table_statistics_append_981008 | 0 | 3 - test_table_statistics_append | 981008 | 982017 | test_table_statistics_append_981008 | 0 | 3 - test_table_statistics_append | 981009 | 982018 | test_table_statistics_append_981009 | 4 | 7 - test_table_statistics_append | 981009 | 982019 | test_table_statistics_append_981009 | 4 | 7 + test_table_statistics_append | 981008 | 982016 | test_table_statistics_append_981008 | | + test_table_statistics_append | 981008 | 982017 | test_table_statistics_append_981008 | | + test_table_statistics_append | 981009 | 982018 | test_table_statistics_append_981009 | | + test_table_statistics_append | 981009 | 982019 | test_table_statistics_append_981009 | | (4 rows) -- delete some data to change shardminvalues of a shards @@ -168,10 +170,10 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx RESET citus.log_remote_commands; RESET citus.multi_shard_modify_mode; SELECT - ds.logicalrelid::regclass::text AS tablename, - ds.shardid AS shardid, + ds.logicalrelid::regclass::text AS tablename, + ds.shardid AS shardid, dsp.placementid AS placementid, - shard_name(ds.logicalrelid, ds.shardid) AS shardname, + shard_name(ds.logicalrelid, ds.shardid) AS shardname, ds.shardminvalue as shardminvalue, ds.shardmaxvalue as shardmaxvalue FROM pg_dist_shard ds JOIN pg_dist_shard_placement dsp USING (shardid) diff --git a/src/test/regress/expected/drop_column_partitioned_table.out b/src/test/regress/expected/drop_column_partitioned_table.out index 8a60cc854..7151071e9 100644 --- a/src/test/regress/expected/drop_column_partitioned_table.out +++ b/src/test/regress/expected/drop_column_partitioned_table.out @@ -124,7 +124,8 @@ FROM WHERE logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass, - 'sensors_2003'::regclass, 'sensors_2004'::regclass); + 'sensors_2003'::regclass, 'sensors_2004'::regclass) +ORDER BY 1,2; logicalrelid | column_to_column_name --------------------------------------------------------------------- sensors | measureid @@ -357,7 +358,8 @@ FROM WHERE logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass, - 'sensors_2003'::regclass, 'sensors_2004'::regclass); + 'sensors_2003'::regclass, 'sensors_2004'::regclass) +ORDER BY 1,2; logicalrelid | column_to_column_name --------------------------------------------------------------------- sensors | measureid @@ -377,7 +379,8 @@ FROM WHERE logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass, - 'sensors_2003'::regclass, 'sensors_2004'::regclass); + 'sensors_2003'::regclass, 'sensors_2004'::regclass) +ORDER BY 1,2; logicalrelid | column_to_column_name --------------------------------------------------------------------- sensors | measureid diff --git a/src/test/regress/expected/isolation_append_copy_vs_all.out b/src/test/regress/expected/isolation_append_copy_vs_all.out index 4176289b4..e69de29bb 100644 --- a/src/test/regress/expected/isolation_append_copy_vs_all.out +++ b/src/test/regress/expected/isolation_append_copy_vs_all.out @@ -1,827 +0,0 @@ -Parsed test spec with 2 sessions - -starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 15 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-router-select: SELECT * FROM append_copy WHERE id = 1; -id|data|int_data ---------------------------------------------------------------------- - 1| b | 1 -(1 row) - -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2; -id|data|int_data ---------------------------------------------------------------------- - 0| a | 0 - 1| b | 1 - 2| c | 2 - 3| d | 3 - 4| e | 4 -(5 rows) - -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-adaptive-select: - SET citus.enable_repartition_joins TO ON; - SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; - -id|data|int_data|id|data|int_data ---------------------------------------------------------------------- - 0| a | 0| 0| a | 0 - 1| b | 1| 1| b | 1 - 2| c | 2| 2| c | 2 - 3| d | 3| 3| d | 3 - 4| e | 4| 4| e | 4 -(5 rows) - -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-insert: INSERT INTO append_copy VALUES(0, 'k', 0); -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 11 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-insert-select: INSERT INTO append_copy SELECT * FROM append_copy; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 15 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-update: UPDATE append_copy SET data = 'l' WHERE id = 0; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-delete: DELETE FROM append_copy WHERE id = 1; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 9 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-truncate: TRUNCATE append_copy; -step s1-commit: COMMIT; -step s2-truncate: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 0 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-drop: DROP TABLE append_copy; -step s1-commit: COMMIT; -step s2-drop: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -ERROR: relation "append_copy" does not exist - -starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); -step s1-commit: COMMIT; -step s2-ddl-create-index: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - -step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,2) -(localhost,57638,t,2) -(2 rows) - - -starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-ddl-drop-index: DROP INDEX append_copy_index; -step s1-commit: COMMIT; -step s2-ddl-drop-index: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - -step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,0) -(localhost,57638,t,0) -(2 rows) - - -starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY append_copy_index ON append_copy(id); -step s1-commit: COMMIT; -step s2-ddl-create-index-concurrently: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - -step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,1) -(localhost,57638,t,1) -(2 rows) - - -starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; -step s1-commit: COMMIT; -step s2-ddl-add-column: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - -step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) -(2 rows) - - -starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; -step s1-begin: BEGIN; -step s1-copy-additional-column: COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; -step s2-ddl-drop-column: ALTER TABLE append_copy DROP new_column; -step s1-commit: COMMIT; -step s2-ddl-drop-column: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - -step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"") -(localhost,57638,t,"") -(2 rows) - - -starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; -step s1-commit: COMMIT; -step s2-ddl-rename-column: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - -step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) -(2 rows) - - -starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-table-size: SELECT citus_total_relation_size('append_copy'); -citus_total_relation_size ---------------------------------------------------------------------- - 32768 -(1 row) - -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-master-drop-all-shards: SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); -step s1-commit: COMMIT; -step s2-master-drop-all-shards: <... completed> -citus_drop_all_shards ---------------------------------------------------------------------- - 2 -(1 row) - -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 0 -(1 row) - - -starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-drop: DROP TABLE append_copy; -step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int); -step s1-begin: BEGIN; -step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append'); -step s1-commit: COMMIT; -step s2-distribute-table: <... completed> -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 0 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-router-select: SELECT * FROM append_copy WHERE id = 1; -id|data|int_data ---------------------------------------------------------------------- - 1| b | 1 -(1 row) - -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2; -id|data|int_data ---------------------------------------------------------------------- - 0| a | 0 - 1| b | 1 - 2| c | 2 - 3| d | 3 - 4| e | 4 -(5 rows) - -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-adaptive-select: - SET citus.enable_repartition_joins TO ON; - SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; - -id|data|int_data|id|data|int_data ---------------------------------------------------------------------- - 0| a | 0| 0| a | 0 - 1| b | 1| 1| b | 1 - 2| c | 2| 2| c | 2 - 3| d | 3| 3| d | 3 - 4| e | 4| 4| e | 4 -(5 rows) - -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-insert: INSERT INTO append_copy VALUES(0, 'k', 0); -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 11 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-insert-select: INSERT INTO append_copy SELECT * FROM append_copy; -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 15 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-update: UPDATE append_copy SET data = 'l' WHERE id = 0; -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-delete: DELETE FROM append_copy WHERE id = 1; -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 9 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-truncate: TRUNCATE append_copy; -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s2-copy: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 5 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-drop: DROP TABLE append_copy; -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s2-copy: <... completed> -ERROR: relation "append_copy" does not exist -step s1-select-count: SELECT COUNT(*) FROM append_copy; -ERROR: relation "append_copy" does not exist - -starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s2-copy: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - -step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,2) -(localhost,57638,t,2) -(2 rows) - - -starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); -step s1-begin: BEGIN; -step s1-ddl-drop-index: DROP INDEX append_copy_index; -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s2-copy: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - -step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,0) -(localhost,57638,t,0) -(2 rows) - - -starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s2-copy: <... completed> -ERROR: missing data for column "new_column" -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 5 -(1 row) - -step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) -(2 rows) - - -starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; -step s1-begin: BEGIN; -step s1-ddl-drop-column: ALTER TABLE append_copy DROP new_column; -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s2-copy: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - -step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,"") -(localhost,57638,t,"") -(2 rows) - - -starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s2-copy: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - -step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); -run_command_on_workers ---------------------------------------------------------------------- -(localhost,57637,t,new_column) -(localhost,57638,t,new_column) -(2 rows) - - -starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-table-size: SELECT citus_total_relation_size('append_copy'); -citus_total_relation_size ---------------------------------------------------------------------- - 32768 -(1 row) - -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 10 -(1 row) - - -starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; -step s1-begin: BEGIN; -step s1-master-drop-all-shards: SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); -citus_drop_all_shards ---------------------------------------------------------------------- - 1 -(1 row) - -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s2-copy: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 5 -(1 row) - - -starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s1-drop: DROP TABLE append_copy; -step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int); -step s1-begin: BEGIN; -step s1-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append'); -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s1-commit: COMMIT; -step s2-copy: <... completed> -step s1-select-count: SELECT COUNT(*) FROM append_copy; -count ---------------------------------------------------------------------- - 5 -(1 row) - diff --git a/src/test/regress/expected/isolation_dump_global_wait_edges_0.out b/src/test/regress/expected/isolation_dump_global_wait_edges_0.out new file mode 100644 index 000000000..e81f56241 --- /dev/null +++ b/src/test/regress/expected/isolation_dump_global_wait_edges_0.out @@ -0,0 +1,104 @@ +Parsed test spec with 4 sessions + +starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s1-update: + UPDATE distributed_table SET y = 1 WHERE x = 1; + +step s2-update: + UPDATE distributed_table SET y = 2 WHERE x = 1; + +step detector-dump-wait-edges: + SELECT + waiting_transaction_num, + blocking_transaction_num, + blocking_transaction_waiting + FROM + dump_global_wait_edges() + ORDER BY + waiting_transaction_num, + blocking_transaction_num, + blocking_transaction_waiting; + SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; + +waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting +--------------------------------------------------------------------- + 406| 405|f +(1 row) + +transactionnumber|waitingtransactionnumbers +--------------------------------------------------------------------- + 405| + 406| 405 +(2 rows) + +step s1-abort: + ABORT; + +step s2-update: <... completed> +step s2-abort: + ABORT; + + +starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort +step s1-begin: + BEGIN; + +step s2-begin: + BEGIN; + +step s3-begin: + BEGIN; + +step s1-update: + UPDATE distributed_table SET y = 1 WHERE x = 1; + +step s2-update: + UPDATE distributed_table SET y = 2 WHERE x = 1; + +step s3-update: + UPDATE distributed_table SET y = 3 WHERE x = 1; + +step detector-dump-wait-edges: + SELECT + waiting_transaction_num, + blocking_transaction_num, + blocking_transaction_waiting + FROM + dump_global_wait_edges() + ORDER BY + waiting_transaction_num, + blocking_transaction_num, + blocking_transaction_waiting; + SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; + +waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting +--------------------------------------------------------------------- + 410| 409|f + 411| 409|f + 411| 410|t +(3 rows) + +transactionnumber|waitingtransactionnumbers +--------------------------------------------------------------------- + 409| + 410|409 + 411|409,410 +(3 rows) + +step s1-abort: + ABORT; + +step s2-update: <... completed> +step s2-abort: + ABORT; + +step s3-update: <... completed> +step s3-abort: + ABORT; + diff --git a/src/test/regress/expected/isolation_range_copy_vs_all.out b/src/test/regress/expected/isolation_range_copy_vs_all.out index d66a63cf6..f5722c84c 100644 --- a/src/test/regress/expected/isolation_range_copy_vs_all.out +++ b/src/test/regress/expected/isolation_range_copy_vs_all.out @@ -1,11 +1,6 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -19,11 +14,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -42,11 +32,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -69,11 +54,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -99,11 +79,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -117,11 +92,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -135,11 +105,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -153,11 +118,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -171,11 +131,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -190,11 +145,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -205,11 +155,6 @@ step s1-select-count: SELECT COUNT(*) FROM range_copy; ERROR: relation "range_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -225,17 +170,12 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,2) -(localhost,57638,t,2) +(localhost,57637,t,1) +(localhost,57638,t,1) (2 rows) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-begin: BEGIN; @@ -258,11 +198,6 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -284,11 +219,6 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -310,11 +240,6 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -337,11 +262,6 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -363,11 +283,6 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -386,11 +301,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -404,11 +314,6 @@ count starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -428,16 +333,17 @@ count starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-drop: DROP TABLE range_copy; step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; -step s2-distribute-table: SELECT create_distributed_table('range_copy', 'id', 'range'); +step s2-distribute-table: + SET citus.shard_replication_factor TO 1; + SET citus.next_shard_id TO 3004005; + SELECT create_distributed_table('range_copy', 'id', 'range'); + UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005; + UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006; + step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table @@ -453,11 +359,6 @@ count starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM range_copy WHERE id = 1; @@ -476,11 +377,6 @@ count starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM range_copy ORDER BY 1, 2; @@ -503,11 +399,6 @@ count starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-adaptive-select: @@ -533,11 +424,6 @@ count starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO range_copy VALUES(0, 'k', 0); @@ -551,11 +437,6 @@ count starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO range_copy SELECT * FROM range_copy; @@ -569,11 +450,6 @@ count starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE range_copy SET data = 'l' WHERE id = 0; @@ -587,11 +463,6 @@ count starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM range_copy WHERE id = 1; @@ -605,11 +476,6 @@ count starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE range_copy; @@ -624,11 +490,6 @@ count starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE range_copy; @@ -640,11 +501,6 @@ step s1-select-count: SELECT COUNT(*) FROM range_copy; ERROR: relation "range_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); @@ -660,17 +516,12 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,2) -(localhost,57638,t,2) +(localhost,57637,t,1) +(localhost,57638,t,1) (2 rows) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-begin: BEGIN; @@ -693,11 +544,6 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; @@ -720,11 +566,6 @@ run_command_on_workers starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; @@ -747,11 +588,6 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE range_copy RENAME data TO new_column; @@ -773,17 +609,12 @@ run_command_on_workers starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('range_copy'); citus_total_relation_size --------------------------------------------------------------------- - 32768 + 24576 (1 row) step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; @@ -796,11 +627,6 @@ count starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM range_copy; @@ -814,35 +640,26 @@ count starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); citus_drop_all_shards --------------------------------------------------------------------- - 1 + 2 (1 row) step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> +ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM range_copy; count --------------------------------------------------------------------- - 5 + 0 (1 row) starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count -create_distributed_table ---------------------------------------------------------------------- - -(1 row) - step s1-drop: DROP TABLE range_copy; step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int); step s1-begin: BEGIN; diff --git a/src/test/regress/expected/isolation_select_vs_all.out b/src/test/regress/expected/isolation_select_vs_all.out index 734ff7a5f..06f8dc332 100644 --- a/src/test/regress/expected/isolation_select_vs_all.out +++ b/src/test/regress/expected/isolation_select_vs_all.out @@ -1,12 +1,12 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-router-select s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -34,12 +34,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -71,12 +71,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -111,12 +111,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -148,12 +148,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -189,12 +189,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -233,12 +233,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -273,12 +273,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -317,12 +317,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -364,12 +364,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-insert s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -392,12 +392,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-insert-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -420,12 +420,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-update s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -448,12 +448,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-delete s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -476,12 +476,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-truncate s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -505,12 +505,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-drop s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -530,12 +530,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -554,7 +554,7 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) +(localhost,57637,t,0) (localhost,57638,t,1) (2 rows) @@ -565,12 +565,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-router-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; @@ -602,12 +602,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-disable-binary-protocol s1-router-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-disable-binary-protocol: -- Workaround router-select blocking blocking create-index-concurrently @@ -630,7 +630,7 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) +(localhost,57637,t,0) (localhost,57638,t,1) (2 rows) @@ -641,12 +641,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -666,7 +666,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -677,12 +677,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-router-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; @@ -714,12 +714,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -739,7 +739,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -750,12 +750,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-table-size s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -766,7 +766,7 @@ id|data|int_data step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size --------------------------------------------------------------------- - 32768 + 16384 (1 row) step s1-commit: COMMIT; @@ -783,12 +783,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-router-select s2-master-modify-multiple-shards s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id|data|int_data @@ -811,12 +811,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-master-drop-all-shards s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-master-drop-all-shards: SELECT citus_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); citus_drop_all_shards @@ -838,9 +838,9 @@ restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-router-select s2-distribute-table s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) step s1-drop: DROP TABLE select_append; @@ -871,12 +871,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-insert s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -899,12 +899,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-insert-select s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -927,12 +927,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-update s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -955,12 +955,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-delete s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -983,12 +983,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-truncate s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -1011,12 +1011,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-drop s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -1032,12 +1032,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-router-select s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -1056,7 +1056,7 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) +(localhost,57637,t,0) (localhost,57638,t,1) (2 rows) @@ -1067,12 +1067,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-router-select s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX select_append_index; @@ -1104,12 +1104,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-router-select s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -1129,7 +1129,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -1140,12 +1140,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-router-select s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; @@ -1177,12 +1177,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-router-select s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -1202,7 +1202,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -1213,17 +1213,17 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-table-size s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size --------------------------------------------------------------------- - 32768 + 16384 (1 row) step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -1246,12 +1246,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; @@ -1274,12 +1274,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT citus_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); citus_drop_all_shards @@ -1301,9 +1301,9 @@ restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-router-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) step s1-drop: DROP TABLE select_append; @@ -1334,12 +1334,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1366,12 +1366,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1398,12 +1398,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-update s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1430,12 +1430,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-delete s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1462,12 +1462,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-truncate s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1495,12 +1495,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-drop s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1524,12 +1524,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1552,7 +1552,7 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) +(localhost,57637,t,0) (localhost,57638,t,1) (2 rows) @@ -1563,12 +1563,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-real-time-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -1604,12 +1604,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1632,7 +1632,7 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) +(localhost,57637,t,0) (localhost,57638,t,1) (2 rows) @@ -1643,12 +1643,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1672,7 +1672,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -1683,12 +1683,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-real-time-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -1724,12 +1724,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1753,7 +1753,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -1764,12 +1764,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-table-size s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1784,7 +1784,7 @@ id|data|int_data step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size --------------------------------------------------------------------- - 32768 + 16384 (1 row) step s1-commit: COMMIT; @@ -1801,12 +1801,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-real-time-select s2-master-modify-multiple-shards s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id|data|int_data @@ -1833,9 +1833,9 @@ restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-real-time-select s2-distribute-table s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) step s1-drop: DROP TABLE select_append; @@ -1866,12 +1866,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-insert s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -1898,12 +1898,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-insert-select s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -1930,12 +1930,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-update s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -1962,12 +1962,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-delete s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -1994,12 +1994,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-truncate s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -2022,12 +2022,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-drop s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -2043,12 +2043,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-real-time-select s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -2071,7 +2071,7 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) +(localhost,57637,t,0) (localhost,57638,t,1) (2 rows) @@ -2082,12 +2082,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-real-time-select s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX select_append_index; @@ -2123,12 +2123,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-real-time-select s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -2152,7 +2152,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -2163,12 +2163,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-real-time-select s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; @@ -2204,12 +2204,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-real-time-select s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -2233,7 +2233,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -2244,17 +2244,17 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-table-size s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size --------------------------------------------------------------------- - 32768 + 16384 (1 row) step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -2281,12 +2281,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; @@ -2313,9 +2313,9 @@ restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-real-time-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) step s1-drop: DROP TABLE select_append; @@ -2346,12 +2346,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-insert s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2381,12 +2381,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-insert-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2416,12 +2416,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-update s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2451,12 +2451,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-delete s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2486,12 +2486,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-truncate s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2522,12 +2522,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-drop s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2554,12 +2554,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2585,7 +2585,7 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) +(localhost,57637,t,0) (localhost,57638,t,1) (2 rows) @@ -2596,12 +2596,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-adaptive-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-adaptive-select: @@ -2640,12 +2640,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2671,7 +2671,7 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) +(localhost,57637,t,0) (localhost,57638,t,1) (2 rows) @@ -2682,12 +2682,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2714,7 +2714,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -2725,12 +2725,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-adaptive-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-adaptive-select: @@ -2769,12 +2769,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2801,7 +2801,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -2812,12 +2812,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-table-size s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2835,7 +2835,7 @@ id|data|int_data|id|data|int_data step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size --------------------------------------------------------------------- - 32768 + 16384 (1 row) step s1-commit: COMMIT; @@ -2852,12 +2852,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-adaptive-select s2-master-modify-multiple-shards s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-adaptive-select: SET citus.enable_repartition_joins TO ON; @@ -2887,9 +2887,9 @@ restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-adaptive-select s2-distribute-table s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) step s1-drop: DROP TABLE select_append; @@ -2923,12 +2923,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-insert s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-adaptive-select: @@ -2958,12 +2958,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-insert-select s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-adaptive-select: @@ -2993,12 +2993,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-update s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-adaptive-select: @@ -3028,12 +3028,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-delete s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-adaptive-select: @@ -3063,12 +3063,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-truncate s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-adaptive-select: @@ -3094,12 +3094,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-drop s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; step s2-adaptive-select: @@ -3118,12 +3118,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-adaptive-select s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-adaptive-select: @@ -3149,7 +3149,7 @@ count step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,1) +(localhost,57637,t,0) (localhost,57638,t,1) (2 rows) @@ -3160,12 +3160,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-adaptive-select s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX select_append_index; @@ -3204,12 +3204,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-adaptive-select s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-adaptive-select: @@ -3236,7 +3236,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -3247,12 +3247,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-adaptive-select s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; @@ -3291,12 +3291,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-adaptive-select s1-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-adaptive-select: @@ -3323,7 +3323,7 @@ count step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers --------------------------------------------------------------------- -(localhost,57637,t,new_column) +(localhost,57637,t,"") (localhost,57638,t,new_column) (2 rows) @@ -3334,17 +3334,17 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-table-size s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size --------------------------------------------------------------------- - 32768 + 16384 (1 row) step s2-adaptive-select: @@ -3374,12 +3374,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) -step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; +step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM select_append; step s2-adaptive-select: @@ -3409,9 +3409,9 @@ restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-adaptive-select s1-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 6780300 (1 row) step s1-drop: DROP TABLE select_append; diff --git a/src/test/regress/expected/isolation_truncate_vs_all.out b/src/test/regress/expected/isolation_truncate_vs_all.out index 17cf1f603..7514be591 100644 --- a/src/test/regress/expected/isolation_truncate_vs_all.out +++ b/src/test/regress/expected/isolation_truncate_vs_all.out @@ -1,12 +1,12 @@ Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -27,12 +27,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -53,12 +53,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-drop s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -75,12 +75,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -108,12 +108,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-truncate s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -142,12 +142,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-truncate s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY truncate_append_index ON truncate_append(id); @@ -173,12 +173,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -206,12 +206,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-truncate s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -240,12 +240,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -273,12 +273,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-table-size s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -304,12 +304,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -330,12 +330,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-drop-all-shards s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -361,9 +361,9 @@ restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-truncate s2-distribute-table s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) step s1-drop: DROP TABLE truncate_append; @@ -393,12 +393,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; @@ -419,12 +419,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-truncate s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE truncate_append; @@ -442,12 +442,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); @@ -475,12 +475,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -509,12 +509,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; @@ -542,12 +542,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; @@ -576,12 +576,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE truncate_append RENAME data TO new_column; @@ -609,18 +609,18 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-truncate s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('truncate_append'); citus_total_relation_size --------------------------------------------------------------------- - 32768 + 16384 (1 row) step s2-truncate: TRUNCATE truncate_append; @@ -639,12 +639,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-truncate s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM truncate_append; @@ -665,12 +665,12 @@ restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-drop-all-shards s2-truncate s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) -step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; +step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-drop-all-shards: SELECT citus_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); @@ -696,9 +696,9 @@ restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-distribute-table s2-truncate s1-commit s2-commit s1-select-count -create_distributed_table +master_create_empty_shard --------------------------------------------------------------------- - + 5990340 (1 row) step s1-drop: DROP TABLE truncate_append; diff --git a/src/test/regress/expected/multi_alter_table_add_constraints.out b/src/test/regress/expected/multi_alter_table_add_constraints.out index a0700e790..56ef04335 100644 --- a/src/test/regress/expected/multi_alter_table_add_constraints.out +++ b/src/test/regress/expected/multi_alter_table_add_constraints.out @@ -76,6 +76,7 @@ SELECT create_distributed_table('products_append', 'product_no', 'append'); (1 row) +SELECT master_create_empty_shard('products_append') AS shardid \gset -- Can only add primary key constraint on distribution column (or group -- of columns including distribution column) -- Command below should error out since 'name' is not a distribution column @@ -90,7 +91,7 @@ WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. --- Error out since first and third rows have the same product_no -\COPY products_append FROM STDIN DELIMITER AS ','; +COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid); ERROR: duplicate key value violates unique constraint "p_key_1450033" DETAIL: Key (product_no)=(1) already exists. DROP TABLE products_append; @@ -163,6 +164,7 @@ SELECT create_distributed_table('unique_test_table_append', 'id', 'append'); (1 row) +SELECT master_create_empty_shard('unique_test_table_append') AS shardid \gset -- Can only add unique constraint on distribution column (or group -- of columns including distribution column) -- Command below should error out since 'name' is not a distribution column @@ -177,7 +179,7 @@ WARNING: table "unique_test_table_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. -- Error out. Table can not have two rows with the same id. -\COPY unique_test_table_append FROM STDIN DELIMITER AS ','; +COPY unique_test_table_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid); ERROR: duplicate key value violates unique constraint "unn_id_1450067" DETAIL: Key (id)=(X) already exists. DROP TABLE unique_test_table_append; @@ -250,12 +252,13 @@ SELECT create_distributed_table('products_append', 'product_no', 'append'); (1 row) +SELECT master_create_empty_shard('products_append') AS shardid \gset -- Can add column and table check constraints ALTER TABLE products_append ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products_append ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); -- Error out,since the third row conflicting with the p_multi_check -\COPY products_append FROM STDIN DELIMITER AS ','; -ERROR: new row for relation "products_append_1450101" violates check constraint "p_multi_check" +COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid); +ERROR: new row for relation "products_append_1450101" violates check constraint "p_multi_check_1450101" DETAIL: Failing row contains (1, Product_3, 8, 10). DROP TABLE products_append; -- Check "EXCLUSION CONSTRAINT" @@ -323,6 +326,7 @@ SELECT create_distributed_table('products_append', 'product_no','append'); (1 row) +SELECT master_create_empty_shard('products_append') AS shardid \gset -- Can only add exclusion constraint on distribution column (or group of column -- including distribution column) -- Command below should error out since 'name' is not a distribution column @@ -337,7 +341,7 @@ WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. -- Error out since first and third can not pass the exclusion check. -\COPY products_append FROM STDIN DELIMITER AS ','; +COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid); ERROR: conflicting key value violates exclusion constraint "exc_pno_name_1450135" DETAIL: Key (product_no, name)=(1, Product_1) conflicts with existing key (product_no, name)=(1, Product_1). DROP TABLE products_append; @@ -394,9 +398,10 @@ SELECT create_distributed_table('products_append', 'product_no', 'append'); (1 row) +SELECT master_create_empty_shard('products_append') AS shardid \gset ALTER TABLE products_append ALTER COLUMN name SET NOT NULL; -- Error out since name and product_no columns can not handle NULL value. -\COPY products_append FROM STDIN DELIMITER AS ','; +COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid); DROP TABLE products_append; -- Tests for ADD CONSTRAINT is not only subcommand CREATE TABLE products ( diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index 9d3f55ac9..a09f1fbcc 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -112,6 +112,12 @@ SELECT create_distributed_table('customer_append', 'c_custkey', 'append'); (1 row) +SELECT master_create_empty_shard('customer_append'); + master_create_empty_shard +--------------------------------------------------------------------- + 360006 +(1 row) + CREATE TABLE nation ( n_nationkey integer not null, n_name char(25) not null, @@ -155,6 +161,12 @@ SELECT create_distributed_table('part_append', 'p_partkey', 'append'); (1 row) +SELECT master_create_empty_shard('part_append'); + master_create_empty_shard +--------------------------------------------------------------------- + 360009 +(1 row) + CREATE TABLE supplier ( s_suppkey integer not null, diff --git a/src/test/regress/expected/multi_null_minmax_value_pruning.out b/src/test/regress/expected/multi_null_minmax_value_pruning.out index cbb938ebe..a531b065f 100644 --- a/src/test/regress/expected/multi_null_minmax_value_pruning.out +++ b/src/test/regress/expected/multi_null_minmax_value_pruning.out @@ -3,37 +3,48 @@ -- -- This test checks that we can handle null min/max values in shard statistics -- and that we don't partition or join prune shards that have null values. -SET client_min_messages TO DEBUG2; +CREATE SCHEMA multi_null_minmax_value_pruning; +SET search_path TO multi_null_minmax_value_pruning; SET citus.explain_all_tasks TO on; --- to avoid differing explain output - executor doesn't matter, --- because were testing pruning here. --- Change configuration to treat lineitem and orders tables as large SET citus.log_multi_join_order to true; SET citus.enable_repartition_joins to ON; -SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; - shardminvalue | shardmaxvalue +SET citus.next_shard_id = 290000; +CREATE TABLE lineitem (LIKE public.lineitem); +SELECT create_distributed_table('lineitem', 'l_orderkey', 'range'); + create_distributed_table --------------------------------------------------------------------- - 1 | 1000 + (1 row) -SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; - shardminvalue | shardmaxvalue +SELECT master_create_empty_shard('lineitem') as lineitem_shardid1 \gset +SELECT master_create_empty_shard('lineitem') as lineitem_shardid2 \gset +CREATE TABLE orders (LIKE public.orders); +SELECT create_distributed_table('orders', 'o_orderkey', 'range'); + create_distributed_table --------------------------------------------------------------------- - 1 | 1000 + (1 row) +SELECT master_create_empty_shard('orders') as orders_shardid1 \gset +SELECT master_create_empty_shard('orders') as orders_shardid2 \gset +SET client_min_messages TO DEBUG2; +UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '6000' WHERE shardid = :lineitem_shardid1 OR shardid = :orders_shardid1; +UPDATE pg_dist_shard SET shardminvalue = '6001', shardmaxvalue = '20000' WHERE shardid = :lineitem_shardid2 OR shardid = :orders_shardid2; +UPDATE pg_dist_partition SET colocationid = 87091 WHERE logicalrelid = 'orders'::regclass OR logicalrelid = 'lineitem'::regclass; -- Check that partition and join pruning works when min/max values exist -- Adding l_orderkey = 1 to make the query not router executable -SELECT coordinator_plan($Q$ +SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; $Q$); -DEBUG: Creating router plan -CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement +DEBUG: Router planner cannot handle multi-shard select queries +CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement +LOG: join order: [ "lineitem" ] +CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) - Task Count: 1 + Task Count: 2 (2 rows) EXPLAIN (COSTS FALSE) @@ -41,8 +52,8 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; DEBUG: Router planner cannot handle multi-shard select queries LOG: join order: [ "lineitem" ][ local partition join "orders" ] -DEBUG: join prunable for intervals [-2147483648,-1] and [0,2147483647] -DEBUG: join prunable for intervals [0,2147483647] and [-2147483648,-1] +DEBUG: join prunable for intervals [1,6000] and [6001,20000] +DEBUG: join prunable for intervals [6001,20000] and [1,6000] QUERY PLAN --------------------------------------------------------------------- Aggregate @@ -53,38 +64,36 @@ DEBUG: join prunable for intervals [0,2147483647] and [-2147483648,-1] Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Hash Join - Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) - -> Seq Scan on lineitem_360000 lineitem + Hash Cond: (orders.o_orderkey = lineitem.l_orderkey) + -> Seq Scan on orders_290002 orders -> Hash - -> Seq Scan on orders_360002 orders + -> Seq Scan on lineitem_290000 lineitem -> Task Node: host=localhost port=xxxxx dbname=regression -> Aggregate -> Hash Join - Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) - -> Seq Scan on lineitem_360001 lineitem + Hash Cond: (orders.o_orderkey = lineitem.l_orderkey) + -> Seq Scan on orders_290003 orders -> Hash - -> Seq Scan on orders_360003 orders + -> Seq Scan on lineitem_290001 lineitem (20 rows) -- Now set the minimum value for a shard to null. Then check that we don't apply -- partition or join pruning for the shard with null min value. Since it is not -- supported with single-repartition join, dual-repartition has been used. -UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; -SELECT coordinator_plan($Q$ +UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = :lineitem_shardid1; +SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; $Q$); -DEBUG: Distributed planning for a fast-path router query -CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement -DEBUG: Creating router plan -CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement -DEBUG: query has a single distribution column value: 9030 -CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement +DEBUG: Router planner cannot handle multi-shard select queries +CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement +LOG: join order: [ "lineitem" ] +CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) - Task Count: 1 + Task Count: 2 (2 rows) EXPLAIN (COSTS FALSE) @@ -137,21 +146,19 @@ DETAIL: Creating dependency on merge taskId 12 -- Next, set the maximum value for another shard to null. Then check that we -- don't apply partition or join pruning for this other shard either. Since it -- is not supported with single-repartition join, dual-repartition has been used. -UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; -SELECT coordinator_plan($Q$ +UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = :lineitem_shardid2; +SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; $Q$); -DEBUG: Distributed planning for a fast-path router query -CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement -DEBUG: Creating router plan -CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement -DEBUG: query has a single distribution column value: 9030 -CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement +DEBUG: Router planner cannot handle multi-shard select queries +CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement +LOG: join order: [ "lineitem" ] +CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) - Task Count: 1 + Task Count: 2 (2 rows) EXPLAIN (COSTS FALSE) @@ -204,17 +211,13 @@ DETAIL: Creating dependency on merge taskId 12 -- Last, set the minimum value to 0 and check that we don't treat it as null. We -- should apply partition and join pruning for this shard now. Since it is not -- supported with single-repartition join, dual-repartition has been used. -UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; -SELECT coordinator_plan($Q$ +UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = :lineitem_shardid1; +SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; $Q$); -DEBUG: Distributed planning for a fast-path router query -CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement DEBUG: Creating router plan -CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement -DEBUG: query has a single distribution column value: 9030 -CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement +CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement coordinator_plan --------------------------------------------------------------------- Custom Scan (Citus Adaptive) @@ -268,7 +271,8 @@ DETAIL: Creating dependency on merge taskId 12 Merge Task Count: 4 (10 rows) --- Set minimum and maximum values for two shards back to their original values -UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000; -UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001; -SET client_min_messages TO NOTICE; +RESET client_min_messages; +DROP SCHEMA multi_null_minmax_value_pruning CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table lineitem +drop cascades to table orders diff --git a/src/test/regress/expected/multi_repartition_join_pruning.out b/src/test/regress/expected/multi_repartition_join_pruning.out index 285263eea..8c0a26800 100644 --- a/src/test/regress/expected/multi_repartition_join_pruning.out +++ b/src/test/regress/expected/multi_repartition_join_pruning.out @@ -199,20 +199,48 @@ FROM orders, customer_append WHERE o_custkey = c_custkey AND - c_custkey < 0; + c_custkey < 0 AND c_custkey > 0; DEBUG: Router planner does not support append-partitioned tables. +DEBUG: join prunable for task partitionId 0 and 1 +DEBUG: join prunable for task partitionId 0 and 2 +DEBUG: join prunable for task partitionId 0 and 3 +DEBUG: join prunable for task partitionId 1 and 0 +DEBUG: join prunable for task partitionId 1 and 2 +DEBUG: join prunable for task partitionId 1 and 3 +DEBUG: join prunable for task partitionId 2 and 0 +DEBUG: join prunable for task partitionId 2 and 1 +DEBUG: join prunable for task partitionId 2 and 3 +DEBUG: join prunable for task partitionId 3 and 0 +DEBUG: join prunable for task partitionId 3 and 1 +DEBUG: join prunable for task partitionId 3 and 2 +DEBUG: pruning merge fetch taskId 1 +DETAIL: Creating dependency on merge taskId 3 +DEBUG: pruning merge fetch taskId 2 +DETAIL: Creating dependency on merge taskId 4 +DEBUG: pruning merge fetch taskId 4 +DETAIL: Creating dependency on merge taskId 6 +DEBUG: pruning merge fetch taskId 5 +DETAIL: Creating dependency on merge taskId 8 +DEBUG: pruning merge fetch taskId 7 +DETAIL: Creating dependency on merge taskId 9 +DEBUG: pruning merge fetch taskId 8 +DETAIL: Creating dependency on merge taskId 12 +DEBUG: pruning merge fetch taskId 10 +DETAIL: Creating dependency on merge taskId 12 +DEBUG: pruning merge fetch taskId 11 +DETAIL: Creating dependency on merge taskId 16 QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - Task Count: 0 + Task Count: 4 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 2 Merge Task Count: 4 -> MapMergeJob - Map Task Count: 0 - Merge Task Count: 0 + Map Task Count: 3 + Merge Task Count: 4 (10 rows) SELECT @@ -221,8 +249,36 @@ FROM orders, customer_append WHERE o_custkey = c_custkey AND - c_custkey < 0; + c_custkey < 0 AND c_custkey > 0; DEBUG: Router planner does not support append-partitioned tables. +DEBUG: join prunable for task partitionId 0 and 1 +DEBUG: join prunable for task partitionId 0 and 2 +DEBUG: join prunable for task partitionId 0 and 3 +DEBUG: join prunable for task partitionId 1 and 0 +DEBUG: join prunable for task partitionId 1 and 2 +DEBUG: join prunable for task partitionId 1 and 3 +DEBUG: join prunable for task partitionId 2 and 0 +DEBUG: join prunable for task partitionId 2 and 1 +DEBUG: join prunable for task partitionId 2 and 3 +DEBUG: join prunable for task partitionId 3 and 0 +DEBUG: join prunable for task partitionId 3 and 1 +DEBUG: join prunable for task partitionId 3 and 2 +DEBUG: pruning merge fetch taskId 1 +DETAIL: Creating dependency on merge taskId 3 +DEBUG: pruning merge fetch taskId 2 +DETAIL: Creating dependency on merge taskId 4 +DEBUG: pruning merge fetch taskId 4 +DETAIL: Creating dependency on merge taskId 6 +DEBUG: pruning merge fetch taskId 5 +DETAIL: Creating dependency on merge taskId 8 +DEBUG: pruning merge fetch taskId 7 +DETAIL: Creating dependency on merge taskId 9 +DEBUG: pruning merge fetch taskId 8 +DETAIL: Creating dependency on merge taskId 12 +DEBUG: pruning merge fetch taskId 10 +DETAIL: Creating dependency on merge taskId 12 +DEBUG: pruning merge fetch taskId 11 +DETAIL: Creating dependency on merge taskId 16 count --------------------------------------------------------------------- 0 diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index debed430e..3a015a8b0 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -115,13 +115,14 @@ CREATE TABLE nation_append_search_path( n_regionkey integer not null, n_comment varchar(152) ); -SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); - master_create_distributed_table +SELECT create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); + create_distributed_table --------------------------------------------------------------------- (1 row) -\copy nation_append_search_path FROM STDIN with delimiter '|'; +SELECT master_create_empty_shard('nation_append_search_path') AS shardid \gset +copy nation_append_search_path FROM STDIN with (delimiter '|', append_to_shard :shardid); -- create shard with master_create_worker_shards CREATE TABLE test_schema_support.nation_hash( n_nationkey integer not null, diff --git a/src/test/regress/expected/non_colocated_join_order.out b/src/test/regress/expected/non_colocated_join_order.out deleted file mode 100644 index 5d646633d..000000000 --- a/src/test/regress/expected/non_colocated_join_order.out +++ /dev/null @@ -1,52 +0,0 @@ --- --- NON_COLOCATED_JOIN_ORDER --- --- Tests to check placements of shards must be equal to choose local join logic. -CREATE TABLE test_table_1(id int, value_1 int); -SELECT master_create_distributed_table('test_table_1', 'id', 'append'); - master_create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -\copy test_table_1 FROM STDIN DELIMITER ',' -\copy test_table_1 FROM STDIN DELIMITER ',' -CREATE TABLE test_table_2(id int, value_1 int); -SELECT master_create_distributed_table('test_table_2', 'id', 'append'); - master_create_distributed_table ---------------------------------------------------------------------- - -(1 row) - -\copy test_table_2 FROM STDIN DELIMITER ',' -\copy test_table_2 FROM STDIN DELIMITER ',' -SET citus.log_multi_join_order to TRUE; -SET client_min_messages to DEBUG1; -SET citus.enable_repartition_joins TO on; --- when joining append tables we always get dual re-partition joins -SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id; -LOG: join order: [ "test_table_1" ][ dual partition join "test_table_2" ] - count ---------------------------------------------------------------------- - 6 -(1 row) - --- Add two shards placement of interval [8,10] to test_table_1 -SET citus.shard_replication_factor to 2; -\copy test_table_1 FROM STDIN DELIMITER ',' --- Add two shards placement of interval [8,10] to test_table_2 -SET citus.shard_replication_factor to 1; -\copy test_table_2 FROM STDIN DELIMITER ',' --- Although shard interval of relation are same, since they have different amount of placements --- for interval [8,10] repartition join logic will be triggered. -SET citus.enable_repartition_joins to ON; -SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id; -LOG: join order: [ "test_table_1" ][ dual partition join "test_table_2" ] - count ---------------------------------------------------------------------- - 9 -(1 row) - -SET client_min_messages TO default; -DROP TABLE test_table_1; -DROP TABLE test_table_2; diff --git a/src/test/regress/expected/single_node.out b/src/test/regress/expected/single_node.out index 4e0e9f98a..d23eb5600 100644 --- a/src/test/regress/expected/single_node.out +++ b/src/test/regress/expected/single_node.out @@ -625,6 +625,19 @@ BEGIN; 32 INSERT INTO test SELECT i,i FROM generate_series(0,100)i; ROLLBACK; +-- master_create_empty_shard on coordinator +BEGIN; +CREATE TABLE append_table (a INT, b INT); +SELECT create_distributed_table('append_table','a','append'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT master_create_empty_shard('append_table'); +NOTICE: Creating placements for the append partitioned tables on the coordinator is not supported, skipping coordinator ... +ERROR: could only create 0 of 1 of required shard replicas +END; -- alter table inside a tx block BEGIN; ALTER TABLE test ADD COLUMN z single_node.new_type; diff --git a/src/test/regress/expected/subquery_append.out b/src/test/regress/expected/subquery_append.out index 1316bb2e6..493c0bc37 100644 --- a/src/test/regress/expected/subquery_append.out +++ b/src/test/regress/expected/subquery_append.out @@ -8,18 +8,9 @@ SELECT create_distributed_table('append_table', 'key', 'append'); (1 row) -SELECT 1 FROM master_create_empty_shard('append_table'); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - -SELECT 1 FROM master_create_empty_shard('append_table'); - ?column? ---------------------------------------------------------------------- - 1 -(1 row) - +SELECT master_create_empty_shard('append_table') AS shardid1 \gset +SELECT master_create_empty_shard('append_table') AS shardid2 \gset +SELECT master_create_empty_shard('append_table') AS shardid3 \gset CREATE TABLE ref_table (value int); CREATE INDEX ON ref_table (value); SELECT create_reference_table('ref_table'); @@ -28,9 +19,9 @@ SELECT create_reference_table('ref_table'); (1 row) -\COPY append_table (key,value) FROM STDIN WITH CSV -\COPY append_table (key,value) FROM STDIN WITH CSV -\COPY ref_table FROM STDIN WITH CSV +COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid1); +COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid2); +COPY ref_table FROM STDIN WITH CSV; -- exercise some optimizer pushdown features with subqueries SELECT count(*) FROM (SELECT random() FROM append_table) u; count diff --git a/src/test/regress/expected/upgrade_basic_after.out b/src/test/regress/expected/upgrade_basic_after.out index 29c195d34..ee0e93999 100644 --- a/src/test/regress/expected/upgrade_basic_after.out +++ b/src/test/regress/expected/upgrade_basic_after.out @@ -69,7 +69,7 @@ SELECT logicalrelid FROM pg_dist_partition t_ab r tr - t_append + t_range (6 rows) SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4::bit(8) @@ -80,14 +80,14 @@ SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4 relnamespace='upgrade_basic'::regnamespace AND tgname LIKE 'truncate_trigger_%' ORDER BY tgrelid::regclass; - tgrelid | tgfoid | tgisinternal | tgenabled | tgtype + tgrelid | tgfoid | tgisinternal | tgenabled | tgtype --------------------------------------------------------------------- - t | citus_truncate_trigger | t | O | 00100000 - tp | citus_truncate_trigger | t | O | 00100000 - t_ab | citus_truncate_trigger | t | O | 00100000 - r | citus_truncate_trigger | t | O | 00100000 - tr | citus_truncate_trigger | t | O | 00100000 - t_append | citus_truncate_trigger | t | O | 00100000 + t | citus_truncate_trigger | t | O | 00100000 + tp | citus_truncate_trigger | t | O | 00100000 + t_ab | citus_truncate_trigger | t | O | 00100000 + r | citus_truncate_trigger | t | O | 00100000 + tr | citus_truncate_trigger | t | O | 00100000 + t_range | citus_truncate_trigger | t | O | 00100000 (6 rows) SELECT * FROM t ORDER BY a; @@ -305,7 +305,7 @@ SELECT * FROM t3 ORDER BY a; (3 rows) SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard - WHERE logicalrelid = 't_append'::regclass + WHERE logicalrelid = 't_range'::regclass ORDER BY shardminvalue, shardmaxvalue; shardminvalue | shardmaxvalue --------------------------------------------------------------------- @@ -313,7 +313,7 @@ SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard 5 | 7 (2 rows) -SELECT * FROM t_append ORDER BY id; +SELECT * FROM t_range ORDER BY id; id | value_1 --------------------------------------------------------------------- 1 | 2 @@ -324,9 +324,11 @@ SELECT * FROM t_append ORDER BY id; 7 | 4 (6 rows) -\copy t_append FROM STDIN DELIMITER ',' +SELECT master_create_empty_shard('t_range') AS new_shard_id \gset +UPDATE pg_dist_shard SET shardminvalue = '9', shardmaxvalue = '11' WHERE shardid = :new_shard_id; +\copy t_range FROM STDIN with (DELIMITER ',') SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard - WHERE logicalrelid = 't_append'::regclass + WHERE logicalrelid = 't_range'::regclass ORDER BY shardminvalue, shardmaxvalue; shardminvalue | shardmaxvalue --------------------------------------------------------------------- @@ -335,7 +337,7 @@ SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard 9 | 11 (3 rows) -SELECT * FROM t_append ORDER BY id; +SELECT * FROM t_range ORDER BY id; id | value_1 --------------------------------------------------------------------- 1 | 2 diff --git a/src/test/regress/expected/upgrade_basic_before.out b/src/test/regress/expected/upgrade_basic_before.out index 11c1701eb..28a358428 100644 --- a/src/test/regress/expected/upgrade_basic_before.out +++ b/src/test/regress/expected/upgrade_basic_before.out @@ -69,12 +69,16 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; -CREATE TABLE t_append(id int, value_1 int); -SELECT master_create_distributed_table('t_append', 'id', 'append'); - master_create_distributed_table +CREATE TABLE t_range(id int, value_1 int); +SELECT create_distributed_table('t_range', 'id', 'range'); + create_distributed_table --------------------------------------------------------------------- (1 row) -\copy t_append FROM STDIN DELIMITER ',' -\copy t_append FROM STDIN DELIMITER ',' +SELECT master_create_empty_shard('t_range') as shardid1 \gset +SELECT master_create_empty_shard('t_range') as shardid2 \gset +UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '3' WHERE shardid = :shardid1; +UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '7' WHERE shardid = :shardid2; +\copy t_range FROM STDIN with (DELIMITER ',') +\copy t_range FROM STDIN with (DELIMITER ',') diff --git a/src/test/regress/expected/upgrade_pg_dist_object_test_after.out b/src/test/regress/expected/upgrade_pg_dist_object_test_after.out index eb45cf0f1..616b4fc32 100644 --- a/src/test/regress/expected/upgrade_pg_dist_object_test_after.out +++ b/src/test/regress/expected/upgrade_pg_dist_object_test_after.out @@ -24,7 +24,7 @@ drop cascades to table upgrade_basic.t_ab drop cascades to table upgrade_basic.t2 drop cascades to table upgrade_basic.r drop cascades to table upgrade_basic.tr -drop cascades to table upgrade_basic.t_append +drop cascades to table upgrade_basic.t_range -- as we updated citus to available version, -- "isn" extension -- "new_schema" schema diff --git a/src/test/regress/input/multi_agg_type_conversion.source b/src/test/regress/input/multi_agg_type_conversion.source index 34daa58a0..8c7413bc2 100644 --- a/src/test/regress/input/multi_agg_type_conversion.source +++ b/src/test/regress/input/multi_agg_type_conversion.source @@ -2,7 +2,6 @@ -- MULTI_AGG_TYPE_CONVERSION -- - -- Test aggregate type conversions using sums of integers and division operator SELECT sum(l_suppkey) FROM lineitem; SELECT sum(l_suppkey) / 2 FROM lineitem; @@ -19,8 +18,9 @@ CREATE TABLE aggregate_type ( double_value float(40) not null, interval_value interval not null); SELECT create_distributed_table('aggregate_type', 'float_value', 'append'); +SELECT master_create_empty_shard('aggregate_type') AS shardid \gset -\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' +copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' with (append_to_shard :shardid); -- Test conversions using aggregates on floats and division diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index f0a7394c4..f9cddb86b 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -29,7 +29,8 @@ CREATE TABLE lineitem_alter ( ) WITH ( fillfactor = 80 ); SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); -\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +SELECT master_create_empty_shard('lineitem_alter') AS shardid \gset +copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid); -- verify that the storage options made it to the table definitions SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; @@ -65,7 +66,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; -- \copy to verify that default values take effect -\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +SELECT master_create_empty_shard('lineitem_alter') as shardid \gset +copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid); SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; @@ -80,7 +82,10 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; -- \copy should fail because it will try to insert NULLs for a NOT NULL column -- Note, this operation will create a table on the workers but it won't be in the metadata -\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +BEGIN; +SELECT master_create_empty_shard('lineitem_alter') as shardid \gset +copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid); +END; -- Verify that DROP NOT NULL works @@ -88,7 +93,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; -- \copy should succeed now -\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +SELECT master_create_empty_shard('lineitem_alter') as shardid \gset +copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid); SELECT count(*) from lineitem_alter; -- Verify that SET DATA TYPE works diff --git a/src/test/regress/input/multi_append_table_to_shard.source b/src/test/regress/input/multi_append_table_to_shard.source index 37e6203cf..9d459c3c1 100644 --- a/src/test/regress/input/multi_append_table_to_shard.source +++ b/src/test/regress/input/multi_append_table_to_shard.source @@ -20,6 +20,8 @@ CREATE TABLE multi_append_table_to_shard_left left_text TEXT not null ); SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); +SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid1 \gset +SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid2 \gset CREATE TABLE multi_append_table_to_shard_right_reference_hash ( @@ -32,8 +34,8 @@ SELECT create_distributed_table('multi_append_table_to_shard_right_reference_has -- Replicate 'left' table on both workers SELECT set_config('citus.shard_replication_factor', '2', false); -\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' +copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid1); +copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid2); -- Place 'right' table on both workers \copy multi_append_table_to_shard_right_reference FROM '@abs_srcdir@/data/agg.data' diff --git a/src/test/regress/input/multi_copy.source b/src/test/regress/input/multi_copy.source index d624bc0ee..058b693f6 100644 --- a/src/test/regress/input/multi_copy.source +++ b/src/test/regress/input/multi_copy.source @@ -35,6 +35,9 @@ COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; notinteger,customernot \. +-- Test invalid option +COPY customer_copy_hash (c_custkey,c_name) FROM STDIN (append_to_shard 1); + -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; @@ -231,46 +234,55 @@ CREATE TABLE customer_copy_append ( c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117)); -SELECT master_create_distributed_table('customer_copy_append', 'c_custkey', 'append'); +SELECT create_distributed_table('customer_copy_append', 'c_custkey', 'append'); -- Test syntax error -COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); +BEGIN; +SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset +COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); 1,customer1 2,customer2 notinteger,customernot \. +END; -- Test that no shard is created for failing copy SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; -- Test empty copy -COPY customer_copy_append FROM STDIN; +BEGIN; +SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset +COPY customer_copy_append FROM STDIN WITH (append_to_shard :shardid); \. +END; --- Test that no shard is created for copying zero rows +-- Test that a shard is created SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; -- Test proper copy -COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); +BEGIN; +SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset +COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); 1,customer1 2,customer2 \. +END; -- Check whether data was copied properly SELECT * FROM customer_copy_append; -- Manipulate manipulate and check shard statistics for append-partitioned table shard -UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = 560131; -UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560131; +UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = 560132; +UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560132; -SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131; -SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131; +SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560132; +SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560132; -- Update shard statistics for append-partitioned shard -SELECT master_update_shard_statistics(560131); +SELECT master_update_shard_statistics(560132); -SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131; -SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131; +SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560132; +SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560132; -- Create lineitem table CREATE TABLE lineitem_copy_append ( @@ -290,33 +302,18 @@ CREATE TABLE lineitem_copy_append ( l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null); -SELECT master_create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append'); +SELECT create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append'); --- Test multiple shard creation -SET citus.shard_max_size TO '256kB'; - -COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'; +BEGIN; +SELECT master_create_empty_shard('lineitem_copy_append') AS shardid \gset +COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid); +END; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::regclass; --- Test round robin shard policy -SET citus.shard_replication_factor TO 1; - -COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'; - -SELECT - pg_dist_shard_placement.shardid, - pg_dist_shard_placement.nodeport -FROM - pg_dist_shard, - pg_dist_shard_placement -WHERE - pg_dist_shard.shardid = pg_dist_shard_placement.shardid AND - logicalrelid = 'lineitem_copy_append'::regclass -ORDER BY - pg_dist_shard.shardid DESC -LIMIT - 5; +-- trigger some errors on the append_to_shard option +COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard 1); +COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard 560000); -- Test schema support on append partitioned tables CREATE SCHEMA append; @@ -330,11 +327,13 @@ CREATE TABLE append.customer_copy ( c_mktsegment char(10), c_comment varchar(117)); -SELECT master_create_distributed_table('append.customer_copy', 'c_custkey', 'append'); +SELECT create_distributed_table('append.customer_copy', 'c_custkey', 'append'); +SELECT master_create_empty_shard('append.customer_copy') AS shardid1 \gset +SELECT master_create_empty_shard('append.customer_copy') AS shardid2 \gset -- Test copy from the master node -COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|'); -COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|'); +COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard :shardid1); +COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid2); -- Test the content of the table SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.customer_copy; @@ -421,8 +420,10 @@ CREATE TABLE packed_numbers_append ( packed_numbers number_pack[] ); -SELECT master_create_distributed_table('packed_numbers_append', 'id', 'append'); -COPY packed_numbers_append FROM :'temp_dir''copy_test_array_of_composite'; +SELECT create_distributed_table('packed_numbers_append', 'id', 'append'); +SELECT master_create_empty_shard('packed_numbers_append') AS shardid \gset + +COPY packed_numbers_append FROM :'temp_dir''copy_test_array_of_composite' WITH (append_to_shard :shardid); -- Verify data is actually copied SELECT * FROM packed_numbers_append; @@ -434,8 +435,10 @@ CREATE TABLE super_packed_numbers_append ( super_packed_number super_number_pack ); -SELECT master_create_distributed_table('super_packed_numbers_append', 'id', 'append'); -COPY super_packed_numbers_append FROM :'temp_dir''copy_test_composite_of_composite'; +SELECT create_distributed_table('super_packed_numbers_append', 'id', 'append'); +SELECT master_create_empty_shard('super_packed_numbers_append') AS shardid \gset + +COPY super_packed_numbers_append FROM :'temp_dir''copy_test_composite_of_composite' WITH (append_to_shard :shardid); -- Verify data is actually copied SELECT * FROM super_packed_numbers_append; @@ -448,9 +451,10 @@ CREATE TABLE composite_partition_column_table( composite_column number_pack ); -SELECT master_create_distributed_table('composite_partition_column_table', 'composite_column', 'append'); +SELECT create_distributed_table('composite_partition_column_table', 'composite_column', 'append'); +SELECT master_create_empty_shard('composite_partition_column_table') AS shardid \gset -\COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv'); +COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); 1,"(1,1)" 2,"(2,2)" \. @@ -458,20 +462,22 @@ SELECT master_create_distributed_table('composite_partition_column_table', 'comp -- Test copy on append distributed tables do not create shards on removed workers CREATE TABLE numbers_append (a int, b int); -SELECT master_create_distributed_table('numbers_append', 'a', 'append'); +SELECT create_distributed_table('numbers_append', 'a', 'append'); -- no shards is created yet SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); +SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset +SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset + +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1); 1,1 2,2 \. -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); -3,5 +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2); 4,6 \. @@ -487,12 +493,15 @@ SELECT master_disable_node('localhost', :worker_1_port); SET citus.shard_replication_factor TO 1; -- add two new shards and verify they are created at the other node -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); +SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset +SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset + +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1); 5,7 6,8 \. -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2); 7,9 8,10 \. @@ -507,12 +516,15 @@ SELECT 1 FROM master_activate_node('localhost', :worker_1_port); RESET client_min_messages; RESET citus.shard_replication_factor; -- add two new shards and verify they are created at both workers -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); +SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset +SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset + +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1); 9,11 10,12 \. -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2); 11,13 12,14 \. @@ -625,10 +637,6 @@ SELECT shardid, shardstate, nodename, nodeport \c - :default_user - :worker_1_port ALTER USER test_user WITH login; --- there is a dangling shard in worker_2, drop it -\c - test_user - :worker_2_port -DROP TABLE numbers_hash_other_560176; - \c - test_user - :master_port DROP TABLE numbers_hash; @@ -644,7 +652,7 @@ CREATE TABLE numbers_hash(a int, b int); SELECT create_distributed_table('numbers_hash', 'a'); \c - - - :worker_1_port -ALTER TABLE numbers_hash_560180 DROP COLUMN b; +ALTER TABLE numbers_hash_560170 DROP COLUMN b; \c - - - :master_port -- operation will fail to modify a shard and roll back diff --git a/src/test/regress/input/multi_create_schema.source b/src/test/regress/input/multi_create_schema.source deleted file mode 100644 index 4580214b5..000000000 --- a/src/test/regress/input/multi_create_schema.source +++ /dev/null @@ -1,15 +0,0 @@ - -SET citus.next_shard_id TO 250000; - - -CREATE SCHEMA tpch -CREATE TABLE nation ( - n_nationkey integer not null, - n_name char(25) not null, - n_regionkey integer not null, - n_comment varchar(152)); -SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append'); - -\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' - -SELECT count(*) from tpch.nation; diff --git a/src/test/regress/input/multi_load_data.source b/src/test/regress/input/multi_load_data.source index 6021bcd34..b9b2d995c 100644 --- a/src/test/regress/input/multi_load_data.source +++ b/src/test/regress/input/multi_load_data.source @@ -18,9 +18,9 @@ SET citus.next_shard_id TO 290000; \copy orders_reference FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard 360006) \copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' -\copy part_append FROM '@abs_srcdir@/data/part.data' with delimiter '|' +\copy part_append FROM '@abs_srcdir@/data/part.data' with (delimiter '|', append_to_shard 360009) \copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' diff --git a/src/test/regress/input/multi_load_large_records.source b/src/test/regress/input/multi_load_large_records.source deleted file mode 100644 index 0614f918c..000000000 --- a/src/test/regress/input/multi_load_large_records.source +++ /dev/null @@ -1,23 +0,0 @@ --- --- MULTI_STAGE_LARGE_RECORDS --- --- Tests for loading data with large records (i.e. greater than the read buffer --- size, which is 32kB) in a distributed cluster. These tests make sure that we --- are creating shards of correct size even when records are large. - - -SET citus.next_shard_id TO 300000; - - -SET citus.shard_max_size TO "256kB"; - -CREATE TABLE large_records_table (data_id integer, data text); -SELECT master_create_distributed_table('large_records_table', 'data_id', 'append'); - -\copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' - -SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class - WHERE pg_class.oid=logicalrelid AND relname='large_records_table' - ORDER BY shardid; - -RESET citus.shard_max_size; diff --git a/src/test/regress/input/multi_load_more_data.source b/src/test/regress/input/multi_load_more_data.source index a6bd78b29..a8d2bab81 100644 --- a/src/test/regress/input/multi_load_more_data.source +++ b/src/test/regress/input/multi_load_more_data.source @@ -14,9 +14,15 @@ SET citus.next_shard_id TO 280000; \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' -\copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' -\copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -\copy part_append FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' +SELECT master_create_empty_shard('customer_append') AS shardid1 \gset +SELECT master_create_empty_shard('customer_append') AS shardid2 \gset + +copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid1); +copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with (delimiter '|', append_to_shard :shardid2); + +SELECT master_create_empty_shard('part_append') AS shardid \gset + +copy part_append FROM '@abs_srcdir@/data/part.more.data' with (delimiter '|', append_to_shard :shardid); -- Exchange partition files in binary format in remaining tests ALTER SYSTEM SET citus.binary_worker_copy_format TO on; diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index a43391504..ba736f9c7 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -44,7 +44,6 @@ test: isolation_create_distributed_table isolation_master_append_table test: isolation_multi_shard_modify_vs_all test: isolation_modify_with_subquery_vs_dml test: isolation_hash_copy_vs_all -test: isolation_append_copy_vs_all test: isolation_range_copy_vs_all test: isolation_partitioned_copy_vs_all test: isolation_select_vs_all diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 29ea6e496..dabe94529 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -127,12 +127,6 @@ test: with_modifying cte_prepared_modify cte_nested_modification test: ensure_no_intermediate_data_leak test: with_executors with_join with_partitioning with_transactions with_dml - -# ---------- -# Tests to check our large record loading and shard deletion behavior -# ---------- -test: multi_load_large_records - # ---------- # Tests around DDL statements run on distributed tables # ---------- @@ -140,12 +134,6 @@ test: multi_index_statements test: multi_alter_table_statements test: multi_alter_table_add_constraints -# ---------- -# multi_create_schema tests creation, loading, and querying of a table in a new -# schema (namespace). -# ---------- -test: multi_create_schema - # ---------- # Tests to check if we inform the user about potential caveats of creating new # databases, schemas, roles, and authentication information. diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 34dabd565..715b032a5 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -50,7 +50,7 @@ test: set_operation_and_local_tables test: subqueries_deep subquery_view subquery_partitioning subqueries_not_supported test: subquery_in_targetlist subquery_in_where subquery_complex_target_list subquery_append test: subquery_prepared_statements -test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins non_colocated_join_order +test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins test: cte_inline recursive_view_local_table values test: pg13 pg12 # run pg14 sequentially as it syncs metadata diff --git a/src/test/regress/multi_schedule_hyperscale b/src/test/regress/multi_schedule_hyperscale index 92fd948ef..5d3e5b622 100644 --- a/src/test/regress/multi_schedule_hyperscale +++ b/src/test/regress/multi_schedule_hyperscale @@ -53,7 +53,6 @@ test: insert_select_connection_leak # ---------- test: subquery_basics subquery_local_tables subquery_executors set_operations set_operation_and_local_tables test: subquery_partitioning subquery_complex_target_list subqueries_not_supported -test: non_colocated_join_order test: subquery_prepared_statements pg12 cte_inline # ---------- @@ -114,8 +113,6 @@ test: with_executors with_partitioning with_dml # ---------- # Tests to check our large record loading and shard deletion behavior # ---------- -test: multi_load_large_records -test: multi_master_delete_protocol test: multi_shard_modify # ---------- diff --git a/src/test/regress/multi_schedule_hyperscale_superuser b/src/test/regress/multi_schedule_hyperscale_superuser index 17977537f..b5a73dba9 100644 --- a/src/test/regress/multi_schedule_hyperscale_superuser +++ b/src/test/regress/multi_schedule_hyperscale_superuser @@ -59,7 +59,7 @@ test: multi_partitioning_utils # ---------- test: subquery_local_tables subquery_executors subquery_and_cte set_operations set_operation_and_local_tables test: subqueries_deep subquery_view subquery_partitioning subqueries_not_supported subquery_in_where -test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins non_colocated_join_order +test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins test: subquery_prepared_statements pg12 cte_inline # ---------- @@ -122,8 +122,6 @@ test: with_executors with_partitioning with_dml # ---------- # Tests to check our large record loading and shard deletion behavior # ---------- -test: multi_load_large_records -test: multi_master_delete_protocol test: multi_shard_modify # ---------- @@ -131,12 +129,6 @@ test: multi_shard_modify # ---------- test: multi_alter_table_add_constraints -# ---------- -# multi_create_schema tests creation, loading, and querying of a table in a new -# schema (namespace). -# ---------- -test: multi_create_schema - # ---------- # Tests to check the sequential and parallel executions of DDL and modification # commands diff --git a/src/test/regress/output/multi_agg_type_conversion.source b/src/test/regress/output/multi_agg_type_conversion.source index 5fc377c69..14c9265e0 100644 --- a/src/test/regress/output/multi_agg_type_conversion.source +++ b/src/test/regress/output/multi_agg_type_conversion.source @@ -39,7 +39,8 @@ SELECT create_distributed_table('aggregate_type', 'float_value', 'append'); (1 row) -\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' +SELECT master_create_empty_shard('aggregate_type') AS shardid \gset +copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' with (append_to_shard :shardid); -- Test conversions using aggregates on floats and division SELECT min(float_value), max(float_value), sum(float_value), count(float_value), avg(float_value) diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index 6ed766169..576d540de 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -25,23 +25,24 @@ CREATE TABLE lineitem_alter ( ) WITH ( fillfactor = 80 ); SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) -\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +SELECT master_create_empty_shard('lineitem_alter') AS shardid \gset +copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid); -- verify that the storage options made it to the table definitions SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; - relname | reloptions ---------------------------------------------------------------------- + relname | reloptions +----------------+----------------- lineitem_alter | {fillfactor=80} (1 row) \c - - - :worker_1_port SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' ORDER BY relname; - relname | reloptions ---------------------------------------------------------------------- + relname | reloptions +-----------------------+----------------- lineitem_alter_220000 | {fillfactor=80} (1 row) @@ -59,8 +60,8 @@ FROM (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) ORDER BY attnum; - attname | atttypid ---------------------------------------------------------------------- + attname | atttypid +-----------------+------------------- tableoid | oid cmax | cid xmax | xid @@ -92,8 +93,8 @@ ORDER BY attnum; \c - - - :master_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- + Column | Type | Modifiers +-----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null @@ -110,22 +111,22 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null - float_column | double precision | - date_column | date | + float_column | double precision | + date_column | date | int_column1 | integer | default 1 int_column2 | integer | default 2 - null_column | integer | + null_column | integer | (21 rows) SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; - float_column | count ---------------------------------------------------------------------- + float_column | count +--------------+------- | 6000 (1 row) SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; - int_column1 | count ---------------------------------------------------------------------- + int_column1 | count +-------------+------- 1 | 6000 (1 row) @@ -133,17 +134,18 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; -- \copy to verify that default values take effect -\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +SELECT master_create_empty_shard('lineitem_alter') as shardid \gset +copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid); SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; - float_column | count ---------------------------------------------------------------------- + float_column | count +--------------+------- | 6000 1 | 6000 (2 rows) SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; - int_column1 | count ---------------------------------------------------------------------- + int_column1 | count +-------------+------- | 6000 1 | 6000 (2 rows) @@ -151,8 +153,8 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; -- Verify that SET NOT NULL works ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- + Column | Type | Modifiers +-----------------+-----------------------+-------------------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null @@ -170,24 +172,27 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite l_shipmode | character(10) | not null l_comment | character varying(44) | not null float_column | double precision | default 1 - date_column | date | - int_column1 | integer | + date_column | date | + int_column1 | integer | int_column2 | integer | not null default 2 - null_column | integer | + null_column | integer | (21 rows) -- Drop default so that NULLs will be inserted for this column ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; -- \copy should fail because it will try to insert NULLs for a NOT NULL column -- Note, this operation will create a table on the workers but it won't be in the metadata -\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -ERROR: null value in column "int_column2" violates not-null constraint +BEGIN; +SELECT master_create_empty_shard('lineitem_alter') as shardid \gset +copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid); +ERROR: null value in column "int_column2" of relation "lineitem_alter_220002" violates not-null constraint DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). +END; -- Verify that DROP NOT NULL works ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- + Column | Type | Modifiers +-----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null @@ -205,32 +210,33 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite l_shipmode | character(10) | not null l_comment | character varying(44) | not null float_column | double precision | default 1 - date_column | date | - int_column1 | integer | - int_column2 | integer | - null_column | integer | + date_column | date | + int_column1 | integer | + int_column2 | integer | + null_column | integer | (21 rows) -- \copy should succeed now -\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' +SELECT master_create_empty_shard('lineitem_alter') as shardid \gset +copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid); SELECT count(*) from lineitem_alter; - count ---------------------------------------------------------------------- + count +------- 18000 (1 row) -- Verify that SET DATA TYPE works SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; - int_column2 | pg_typeof | count ---------------------------------------------------------------------- + int_column2 | pg_typeof | count +-------------+-----------+------- | integer | 6000 2 | integer | 12000 (2 rows) ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- + Column | Type | Modifiers +-----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null @@ -248,15 +254,15 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite l_shipmode | character(10) | not null l_comment | character varying(44) | not null float_column | double precision | default 1 - date_column | date | - int_column1 | integer | - int_column2 | double precision | - null_column | integer | + date_column | date | + int_column1 | integer | + int_column2 | double precision | + null_column | integer | (21 rows) SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; - int_column2 | pg_typeof | count ---------------------------------------------------------------------- + int_column2 | pg_typeof | count +-------------+------------------+------- | double precision | 6000 2 | double precision | 12000 (2 rows) @@ -268,8 +274,8 @@ ALTER TABLE lineitem_alter DROP COLUMN date_column; -- Verify that RENAME COLUMN works ALTER TABLE lineitem_alter RENAME COLUMN l_orderkey TO l_orderkey_renamed; SELECT SUM(l_orderkey_renamed) FROM lineitem_alter; - sum ---------------------------------------------------------------------- + sum +---------- 53620791 (1 row) @@ -287,14 +293,14 @@ ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS int_column2; -- Verify with IF EXISTS for extant table ALTER TABLE IF EXISTS lineitem_alter RENAME COLUMN l_orderkey_renamed TO l_orderkey; SELECT SUM(l_orderkey) FROM lineitem_alter; - sum ---------------------------------------------------------------------- + sum +---------- 53620791 (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- + Column | Type | Modifiers +-----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null @@ -311,15 +317,15 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null - null_column | integer | + null_column | integer | (17 rows) -- Verify that we can execute commands with multiple subcommands ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER, ADD COLUMN int_column2 INTEGER; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- + Column | Type | Modifiers +-----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null @@ -336,9 +342,9 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null - null_column | integer | - int_column1 | integer | - int_column2 | integer | + null_column | integer | + int_column1 | integer | + int_column2 | integer | (19 rows) ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER, @@ -347,8 +353,8 @@ ERROR: alter table command is currently unsupported DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VALIDATE CONSTRAINT, SET (), RESET (), ATTACH|DETACH PARTITION and TYPE subcommands are supported. ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- + Column | Type | Modifiers +-----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null @@ -365,7 +371,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null - null_column | integer | + null_column | integer | (17 rows) -- Verify that we cannot execute alter commands on the distribution column @@ -386,11 +392,13 @@ DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VAL -- types ALTER TABLE lineitem_alter ADD COLUMN new_column non_existent_type; ERROR: type "non_existent_type" does not exist +LINE 1: ALTER TABLE lineitem_alter ADD COLUMN new_column non_existen... + ^ ALTER TABLE lineitem_alter ALTER COLUMN null_column SET NOT NULL; -ERROR: column "null_column" contains null values -CONTEXT: while executing command on localhost:xxxxx +ERROR: column "null_column" of relation "lineitem_alter_220000" contains null values +CONTEXT: while executing command on localhost:57637 ALTER TABLE lineitem_alter ALTER COLUMN l_partkey SET DEFAULT 'a'; -ERROR: invalid input syntax for integer: "a" +ERROR: invalid input syntax for type integer: "a" -- Verify that we error out on RENAME CONSTRAINT statement ALTER TABLE lineitem_alter RENAME CONSTRAINT constraint_a TO constraint_b; ERROR: renaming constraints belonging to distributed tables is currently unsupported @@ -404,8 +412,8 @@ NOTICE: relation "non_existent_table" does not exist, skipping -- Verify that none of the failed alter table commands took effect on the master -- node SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- + Column | Type | Modifiers +-----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null @@ -422,7 +430,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null - null_column | integer | + null_column | integer | (17 rows) -- verify that non-propagated ddl commands are allowed inside a transaction block @@ -431,8 +439,8 @@ BEGIN; CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber); COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +--------------+---------------- temp_index_1 | lineitem_alter (1 row) @@ -443,8 +451,8 @@ BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +--------------+---------------- temp_index_2 | lineitem_alter (1 row) @@ -455,8 +463,8 @@ CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); ALTER TABLE lineitem_alter ADD COLUMN first integer; COMMIT; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; - Column | Type | Modifiers ---------------------------------------------------------------------- + Column | Type | Modifiers +-----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null @@ -473,14 +481,14 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null - null_column | integer | - first | integer | + null_column | integer | + first | integer | (18 rows) SELECT "Column", "Type", "Definition" FROM index_attrs WHERE relid = 'temp_index_2'::regclass; - Column | Type | Definition ---------------------------------------------------------------------- + Column | Type | Definition +------------+--------+------------ l_orderkey | bigint | l_orderkey (1 row) @@ -492,8 +500,8 @@ CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); ROLLBACK; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +-----------+----------- (0 rows) -- ensure that errors cause full rollback @@ -503,8 +511,8 @@ CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); ERROR: relation "temp_index_2" already exists ROLLBACK; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +-----------+----------- (0 rows) -- verify that SAVEPOINT is allowed... @@ -521,8 +529,8 @@ CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); ROLLBACK TO my_savepoint; COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +--------------+---------------- temp_index_2 | lineitem_alter (1 row) @@ -536,12 +544,12 @@ BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); ALTER TABLE lineitem_alter ADD COLUMN first integer; ERROR: column "first" of relation "lineitem_alter_220000" already exists -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on localhost:57638 COMMIT; -- Nothing from the block should have committed SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +-----------+----------- (0 rows) -- Create single-shard table (to avoid deadlocks in the upcoming test hackery) @@ -549,78 +557,78 @@ CREATE TABLE single_shard_items (id integer NOT NULL, name text); SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('single_shard_items', 'id', 'hash'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) -- Verify that ALTER TABLE .. REPLICATION IDENTITY [USING INDEX]* .. works CREATE UNIQUE INDEX replica_idx on single_shard_items(id); SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; - relreplident ---------------------------------------------------------------------- + relreplident +-------------- d (1 row) SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +------------------------ (localhost,57637,t,d) (localhost,57638,t,d) (2 rows) ALTER TABLE single_shard_items REPLICA IDENTITY nothing; SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; - relreplident ---------------------------------------------------------------------- + relreplident +-------------- n (1 row) SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +------------------------ (localhost,57637,t,n) (localhost,57638,t,n) (2 rows) ALTER TABLE single_shard_items REPLICA IDENTITY full; SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; - relreplident ---------------------------------------------------------------------- + relreplident +-------------- f (1 row) SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +------------------------ (localhost,57637,t,f) (localhost,57638,t,f) (2 rows) ALTER TABLE single_shard_items REPLICA IDENTITY USING INDEX replica_idx; SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; - relreplident ---------------------------------------------------------------------- + relreplident +-------------- i (1 row) SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +------------------------ (localhost,57637,t,i) (localhost,57638,t,i) (2 rows) ALTER TABLE single_shard_items REPLICA IDENTITY default, REPLICA IDENTITY USING INDEX replica_idx, REPLICA IDENTITY nothing; SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; - relreplident ---------------------------------------------------------------------- + relreplident +-------------- n (1 row) SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); - run_command_on_workers ---------------------------------------------------------------------- + run_command_on_workers +------------------------ (localhost,57637,t,n) (localhost,57638,t,n) (2 rows) @@ -650,11 +658,11 @@ CREATE INDEX single_index_3 ON single_shard_items(name); COMMIT; ERROR: duplicate key value violates unique constraint "ddl_commands_command_key" DETAIL: Key (command)=(CREATE INDEX) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on localhost:57638 -- Nothing from the block should have committed SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +-----------+----------- (0 rows) -- Even if 1PC is picked for multi-shard commands @@ -666,11 +674,11 @@ CREATE INDEX single_index_3 ON single_shard_items(name); COMMIT; ERROR: duplicate key value violates unique constraint "ddl_commands_command_key" DETAIL: Key (command)=(CREATE INDEX) already exists. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on localhost:57638 -- Nothing from the block should have committed SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +-----------+----------- (0 rows) \c - - - :worker_2_port @@ -682,8 +690,8 @@ DROP TABLE ddl_commands; BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); SELECT count(*) FROM lineitem_alter; - count ---------------------------------------------------------------------- + count +------- 18000 (1 row) @@ -691,16 +699,16 @@ ROLLBACK; -- and before BEGIN; SELECT count(*) FROM lineitem_alter; - count ---------------------------------------------------------------------- + count +------- 18000 (1 row) CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +--------------+---------------- temp_index_2 | lineitem_alter (1 row) @@ -709,15 +717,15 @@ DROP INDEX temp_index_2; SET citus.multi_shard_commit_protocol TO '2pc'; CREATE INDEX temp_index_3 ON lineitem_alter(l_orderkey); SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +--------------+---------------- temp_index_3 | lineitem_alter (1 row) DROP INDEX temp_index_3; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +-----------+----------- (0 rows) RESET citus.multi_shard_commit_protocol; @@ -725,9 +733,9 @@ RESET citus.multi_shard_commit_protocol; CREATE TABLE test_ab (a int, b int); SET citus.shard_count TO 8; SELECT create_distributed_table('test_ab', 'a', 'hash'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) INSERT INTO test_ab VALUES (2, 10); @@ -735,11 +743,11 @@ INSERT INTO test_ab VALUES (2, 11); CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a); ERROR: could not create unique index "temp_unique_index_1_220011" DETAIL: Key (a)=(2) is duplicated. -CONTEXT: while executing command on localhost:xxxxx +CONTEXT: while executing command on localhost:57638 SELECT shardid FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard WHERE logicalrelid='test_ab'::regclass AND shardstate=3; - shardid ---------------------------------------------------------------------- + shardid +--------- (0 rows) -- Check that the schema on the worker still looks reasonable @@ -749,8 +757,8 @@ FROM (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) ORDER BY attnum; - attname | atttypid ---------------------------------------------------------------------- + attname | atttypid +-------------------------------+------------------- tableoid | oid cmax | cid xmax | xid @@ -786,24 +794,24 @@ ORDER BY attnum; \c - - - :master_port -- verify that we can rename distributed tables SHOW citus.enable_ddl_propagation; - citus.enable_ddl_propagation ---------------------------------------------------------------------- + citus.enable_ddl_propagation +------------------------------ on (1 row) ALTER TABLE lineitem_alter RENAME TO lineitem_renamed; -- verify rename is performed SELECT relname FROM pg_class WHERE relname = 'lineitem_renamed'; - relname ---------------------------------------------------------------------- + relname +------------------ lineitem_renamed (1 row) -- show rename worked on one worker, too \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_renamed%' ORDER BY relname; - relname ---------------------------------------------------------------------- + relname +------------------------- lineitem_renamed_220000 lineitem_renamed_220001 lineitem_renamed_220003 @@ -815,8 +823,8 @@ ALTER TABLE lineitem_renamed RENAME TO lineitem_alter; -- show rename worked on one worker, too \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; - relname ---------------------------------------------------------------------- + relname +----------------------- lineitem_alter_220000 lineitem_alter_220001 lineitem_alter_220003 @@ -826,15 +834,15 @@ SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> -- verify that we can set and reset storage parameters ALTER TABLE lineitem_alter SET(fillfactor=40); SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; - relname | reloptions ---------------------------------------------------------------------- + relname | reloptions +----------------+----------------- lineitem_alter | {fillfactor=40} (1 row) \c - - - :worker_1_port SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; - relname | reloptions ---------------------------------------------------------------------- + relname | reloptions +-----------------------+----------------- lineitem_alter_220000 | {fillfactor=40} lineitem_alter_220001 | {fillfactor=40} lineitem_alter_220003 | {fillfactor=40} @@ -843,18 +851,18 @@ SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AN \c - - - :master_port ALTER TABLE lineitem_alter RESET(fillfactor); SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; - relname | reloptions ---------------------------------------------------------------------- - lineitem_alter | + relname | reloptions +----------------+------------ + lineitem_alter | (1 row) \c - - - :worker_1_port SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; - relname | reloptions ---------------------------------------------------------------------- - lineitem_alter_220000 | - lineitem_alter_220001 | - lineitem_alter_220003 | + relname | reloptions +-----------------------+------------ + lineitem_alter_220000 | + lineitem_alter_220001 | + lineitem_alter_220003 | (3 rows) \c - - - :master_port @@ -863,16 +871,16 @@ CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber); ALTER INDEX temp_index_1 RENAME TO idx_lineitem_linenumber; -- verify rename is performed SELECT relname FROM pg_class WHERE relname = 'idx_lineitem_linenumber'; - relname ---------------------------------------------------------------------- + relname +------------------------- idx_lineitem_linenumber (1 row) -- show rename worked on one worker, too \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'idx_lineitem_linenumber%' ORDER BY relname; - relname ---------------------------------------------------------------------- + relname +-------------------------------- idx_lineitem_linenumber_220000 idx_lineitem_linenumber_220001 idx_lineitem_linenumber_220003 @@ -887,8 +895,8 @@ SET citus.enable_ddl_propagation to false; ALTER TABLE lineitem_alter RENAME TO lineitem_renamed; -- verify rename is performed SELECT relname FROM pg_class WHERE relname = 'lineitem_alter' or relname = 'lineitem_renamed'; - relname ---------------------------------------------------------------------- + relname +------------------ lineitem_renamed (1 row) @@ -900,21 +908,23 @@ ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int; \c - - - :worker_1_port SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0; ERROR: column "column_only_added_to_master" does not exist +LINE 1: SELECT column_only_added_to_master FROM lineitem_alter_22000... + ^ \c - - - :master_port -- ddl propagation flag is reset to default, disable it again SET citus.enable_ddl_propagation to false; -- following query succeeds since it accesses an previously existing column SELECT l_orderkey FROM lineitem_alter LIMIT 0; - l_orderkey ---------------------------------------------------------------------- + l_orderkey +------------ (0 rows) -- make master and workers have the same schema again ALTER TABLE lineitem_alter DROP COLUMN column_only_added_to_master; -- now this should succeed SELECT * FROM lineitem_alter LIMIT 0; - l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment | null_column ---------------------------------------------------------------------- + l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment | null_column +------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+----------------+------------+-----------+------------- (0 rows) -- previously unsupported statements are accepted by postgresql now @@ -929,16 +939,16 @@ ERROR: cannot execute ALTER TABLE command dropping partition column -- Citus would have prevented that. CREATE UNIQUE INDEX unique_lineitem_partkey on lineitem_alter(l_partkey); SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +-------------------------+---------------- unique_lineitem_partkey | lineitem_alter (1 row) -- verify index is not created on worker \c - - - :worker_1_port SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alter_%'; - indexname | tablename ---------------------------------------------------------------------- + indexname | tablename +-----------+----------- (0 rows) \c - - - :master_port @@ -947,9 +957,9 @@ SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 2; CREATE TABLE sequence_deadlock_test (a serial, b serial); SELECT create_distributed_table('sequence_deadlock_test', 'a'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) BEGIN; @@ -968,9 +978,9 @@ CREATE TABLE trigger_table ( value text ); SELECT create_distributed_table('trigger_table', 'id'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) -- first set a trigger on a shard @@ -987,8 +997,8 @@ FOR EACH ROW EXECUTE PROCEDURE update_value(); \c - - - :master_port INSERT INTO trigger_table VALUES (1, 'trigger disabled'); SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; - value | count ---------------------------------------------------------------------- + value | count +-----------------+------- trigger enabled | 1 (1 row) @@ -996,8 +1006,8 @@ ALTER TABLE trigger_table DISABLE TRIGGER ALL; ERROR: triggers are only supported for local tables added to metadata INSERT INTO trigger_table VALUES (1, 'trigger disabled'); SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; - value | count ---------------------------------------------------------------------- + value | count +-----------------+------- trigger enabled | 2 (1 row) @@ -1005,8 +1015,8 @@ ALTER TABLE trigger_table ENABLE TRIGGER ALL; ERROR: triggers are only supported for local tables added to metadata INSERT INTO trigger_table VALUES (1, 'trigger disabled'); SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; - value | count ---------------------------------------------------------------------- + value | count +-----------------+------- trigger enabled | 3 (1 row) @@ -1028,8 +1038,8 @@ DROP TABLE lineitem_alter; -- during the unsuccessful COPY \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; - relname ---------------------------------------------------------------------- + relname +----------------------- lineitem_alter_220002 (1 row) @@ -1038,9 +1048,9 @@ SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; BEGIN; CREATE TABLE test_table_1(id int); SELECT create_distributed_table('test_table_1','id'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) ALTER TABLE test_table_1 ADD CONSTRAINT u_key UNIQUE(id); @@ -1049,8 +1059,8 @@ END; -- There should be no test_table_1 shard on workers \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%'; - relname ---------------------------------------------------------------------- + relname +--------- (0 rows) \c - - - :master_port @@ -1058,15 +1068,15 @@ SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%'; CREATE TABLE logged_test(id int); ALTER TABLE logged_test SET UNLOGGED; SELECT create_distributed_table('logged_test', 'id'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) \c - - - :worker_1_port SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; - relname | logged_info ---------------------------------------------------------------------- + relname | logged_info +--------------------+------------- logged_test_220022 | unlogged logged_test_220023 | unlogged logged_test_220024 | unlogged @@ -1077,15 +1087,15 @@ SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logg -- verify SET LOGGED/UNLOGGED works after distributing the table ALTER TABLE logged_test SET LOGGED; SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; - relname | logged_info ---------------------------------------------------------------------- + relname | logged_info +-------------+------------- logged_test | logged (1 row) \c - - - :worker_1_port SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; - relname | logged_info ---------------------------------------------------------------------- + relname | logged_info +--------------------+------------- logged_test_220022 | logged logged_test_220023 | logged logged_test_220024 | logged @@ -1095,15 +1105,15 @@ SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logg \c - - - :master_port ALTER TABLE logged_test SET UNLOGGED; SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; - relname | logged_info ---------------------------------------------------------------------- + relname | logged_info +-------------+------------- logged_test | unlogged (1 row) \c - - - :worker_1_port SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; - relname | logged_info ---------------------------------------------------------------------- + relname | logged_info +--------------------+------------- logged_test_220022 | unlogged logged_test_220023 | unlogged logged_test_220024 | unlogged @@ -1115,22 +1125,22 @@ DROP TABLE logged_test; -- Test WITH options on a normal simple hash-distributed table CREATE TABLE hash_dist(id bigint primary key, f1 text) WITH (fillfactor=40); SELECT create_distributed_table('hash_dist','id'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) -- verify that the storage options made it to the table definitions SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist'; - relname | reloptions ---------------------------------------------------------------------- + relname | reloptions +-----------+----------------- hash_dist | {fillfactor=40} (1 row) \c - - - :worker_1_port SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist%' ORDER BY relname; - relname | reloptions ---------------------------------------------------------------------- + relname | reloptions +------------------+----------------- hash_dist_220026 | {fillfactor=40} hash_dist_220027 | {fillfactor=40} hash_dist_220028 | {fillfactor=40} @@ -1141,15 +1151,15 @@ SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'h -- verify that we can set and reset index storage parameters ALTER INDEX hash_dist_pkey SET(fillfactor=40); SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; - relname | reloptions ---------------------------------------------------------------------- + relname | reloptions +----------------+----------------- hash_dist_pkey | {fillfactor=40} (1 row) \c - - - :worker_1_port SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; - relname | reloptions ---------------------------------------------------------------------- + relname | reloptions +-----------------------+----------------- hash_dist_pkey_220026 | {fillfactor=40} hash_dist_pkey_220027 | {fillfactor=40} hash_dist_pkey_220028 | {fillfactor=40} @@ -1159,19 +1169,19 @@ SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' OR \c - - - :master_port ALTER INDEX hash_dist_pkey RESET(fillfactor); SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; - relname | reloptions ---------------------------------------------------------------------- - hash_dist_pkey | + relname | reloptions +----------------+------------ + hash_dist_pkey | (1 row) \c - - - :worker_1_port SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; - relname | reloptions ---------------------------------------------------------------------- - hash_dist_pkey_220026 | - hash_dist_pkey_220027 | - hash_dist_pkey_220028 | - hash_dist_pkey_220029 | + relname | reloptions +-----------------------+------------ + hash_dist_pkey_220026 | + hash_dist_pkey_220027 | + hash_dist_pkey_220028 | + hash_dist_pkey_220029 | (4 rows) \c - - - :master_port @@ -1183,15 +1193,15 @@ DETAIL: Only RENAME TO, SET (), RESET (), ATTACH PARTITION and SET STATISTICS a CREATE UNIQUE INDEX another_index ON hash_dist(id) WITH (fillfactor=50); -- show the index and its storage options on coordinator, then workers SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index'; - relname | reloptions ---------------------------------------------------------------------- + relname | reloptions +---------------+----------------- another_index | {fillfactor=50} (1 row) \c - - - :worker_1_port SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index%' ORDER BY relname; - relname | reloptions ---------------------------------------------------------------------- + relname | reloptions +----------------------+----------------- another_index_220026 | {fillfactor=50} another_index_220027 | {fillfactor=50} another_index_220028 | {fillfactor=50} @@ -1207,9 +1217,9 @@ DROP INDEX another_index; SET citus.shard_replication_factor TO 1; CREATE TABLE test_table_1(id int); SELECT create_distributed_table('test_table_1', 'id'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) ALTER TABLE test_table_1 ADD COLUMN test_col int UNIQUE; @@ -1224,9 +1234,9 @@ DETAIL: Adding a column with a constraint in one command is not supported becau HINT: You can issue each command separately such as ALTER TABLE test_table_1 ADD COLUMN test_col data_type; ALTER TABLE test_table_1 ADD CONSTRAINT constraint_name CHECK (check_expression); CREATE TABLE reference_table(i int UNIQUE); SELECT create_reference_table('reference_table'); - create_reference_table ---------------------------------------------------------------------- - + create_reference_table +------------------------ + (1 row) ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES reference_table(i) ON DELETE CASCADE; @@ -1240,9 +1250,9 @@ HINT: You can issue each command separately such as ALTER TABLE test_table_1 AD DROP TABLE reference_table; CREATE TABLE referenced_table(i int UNIQUE); SELECT create_distributed_table('referenced_table', 'i'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) ALTER TABLE test_table_1 ADD COLUMN test_col int REFERENCES referenced_table(i); diff --git a/src/test/regress/output/multi_append_table_to_shard.source b/src/test/regress/output/multi_append_table_to_shard.source index d2a72a960..3bf4f8899 100644 --- a/src/test/regress/output/multi_append_table_to_shard.source +++ b/src/test/regress/output/multi_append_table_to_shard.source @@ -25,6 +25,8 @@ SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number (1 row) +SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid1 \gset +SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid2 \gset CREATE TABLE multi_append_table_to_shard_right_reference_hash ( right_number INTEGER not null, @@ -45,8 +47,8 @@ SELECT set_config('citus.shard_replication_factor', '2', false); 2 (1 row) -\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' +copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid1); +copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid2); -- Place 'right' table on both workers \copy multi_append_table_to_shard_right_reference FROM '@abs_srcdir@/data/agg.data' -- Reset shard replication factor to ensure tasks will be assigned to both workers @@ -81,7 +83,7 @@ SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage' FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right_reference_hash'::regclass::oid = logicalrelid; -ERROR: cannot append to shardId 230001 +ERROR: cannot append to shardId 230003 DETAIL: We currently don't support appending to shards in hash-partitioned, reference and local tables -- Clean up after test DROP TABLE multi_append_table_to_shard_stage; diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index a353321e3..1d2e3107e 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -14,9 +14,9 @@ CREATE TABLE customer_copy_hash ( c_comment varchar(117), primary key (c_custkey)); SELECT master_create_distributed_table('customer_copy_hash', 'c_custkey', 'hash'); - master_create_distributed_table ---------------------------------------------------------------------- - + master_create_distributed_table +--------------------------------- + (1 row) -- Test COPY into empty hash-partitioned table @@ -25,21 +25,24 @@ ERROR: could not find any shards into which to copy DETAIL: No shards exist for distributed table "customer_copy_hash". HINT: Run master_create_worker_shards to create shards and try again. SELECT master_create_worker_shards('customer_copy_hash', 64, 1); - master_create_worker_shards ---------------------------------------------------------------------- - + master_create_worker_shards +----------------------------- + (1 row) -- Test empty copy COPY customer_copy_hash FROM STDIN; -- Test syntax error COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; -ERROR: invalid input syntax for integer: "1,customer1" +ERROR: invalid input syntax for type integer: "1,customer1" CONTEXT: COPY customer_copy_hash, line 1, column c_custkey: "1,customer1" +-- Test invalid option +COPY customer_copy_hash (c_custkey,c_name) FROM STDIN (append_to_shard 1); +ERROR: append_to_shard is only valid for append-distributed tables -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; - count ---------------------------------------------------------------------- + count +------- 0 (1 row) @@ -50,8 +53,8 @@ ERROR: duplicate key value violates unique constraint "customer_copy_hash_pkey_ DETAIL: Key (c_custkey)=(2) already exists. -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; - count ---------------------------------------------------------------------- + count +------- 0 (1 row) @@ -60,8 +63,8 @@ COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', HEADER true, FORCE_NULL (c_custkey)); -- Confirm that only first row was skipped SELECT count(*) FROM customer_copy_hash; - count ---------------------------------------------------------------------- + count +------- 3 (1 row) @@ -70,8 +73,8 @@ COPY customer_copy_hash (c_custkey, c_name, c_address) FROM STDIN WITH (FORMAT 'csv', QUOTE '"', FORCE_NOT_NULL (c_address)); -- Confirm that value is not null SELECT count(c_address) FROM customer_copy_hash WHERE c_custkey = 4; - count ---------------------------------------------------------------------- + count +------- 1 (1 row) @@ -80,20 +83,20 @@ COPY customer_copy_hash (c_custkey, c_name, c_address) FROM STDIN WITH (FORMAT 'csv', QUOTE '"', FORCE_NULL (c_address)); -- Confirm that value is null SELECT count(c_address) FROM customer_copy_hash WHERE c_custkey = 5; - count ---------------------------------------------------------------------- + count +------- 0 (1 row) -- Test null violation COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); -ERROR: null value in column "c_name" violates not-null constraint +ERROR: null value in column "c_name" of relation "customer_copy_hash_560001" violates not-null constraint DETAIL: Failing row contains (8, null, null, null, null, null, null, null). -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; - count ---------------------------------------------------------------------- + count +------- 5 (1 row) @@ -102,8 +105,8 @@ COPY customer_copy_hash (c_custkey, c_name) FROM PROGRAM 'echo 9 customer9' WITH (DELIMITER ' '); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash WHERE c_custkey = 9; - count ---------------------------------------------------------------------- + count +------- 1 (1 row) @@ -111,8 +114,8 @@ SELECT count(*) FROM customer_copy_hash WHERE c_custkey = 9; COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.2.data' WITH (DELIMITER '|'); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash; - count ---------------------------------------------------------------------- + count +------- 1006 (1 row) @@ -120,28 +123,28 @@ SELECT count(*) FROM customer_copy_hash; \copy customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|'); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash; - count ---------------------------------------------------------------------- + count +------- 2006 (1 row) -- Make sure that master_update_shard_statistics() only updates shard length for -- hash-partitioned tables SELECT master_update_shard_statistics(560000); - master_update_shard_statistics ---------------------------------------------------------------------- + master_update_shard_statistics +-------------------------------- 8192 (1 row) SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560000; - shardid | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- + shardid | shardminvalue | shardmaxvalue +---------+---------------+--------------- 560000 | -2147483648 | -2080374785 (1 row) SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560000; - shardid | shardlength ---------------------------------------------------------------------- + shardid | shardlength +---------+------------- 560000 | 8192 (1 row) @@ -151,15 +154,15 @@ CREATE TABLE customer_with_default( c_name varchar(25) not null, c_time timestamp default now()); SELECT master_create_distributed_table('customer_with_default', 'c_custkey', 'hash'); - master_create_distributed_table ---------------------------------------------------------------------- - + master_create_distributed_table +--------------------------------- + (1 row) SELECT master_create_worker_shards('customer_with_default', 64, 1); - master_create_worker_shards ---------------------------------------------------------------------- - + master_create_worker_shards +----------------------------- + (1 row) -- Test with default values for now() function @@ -167,8 +170,8 @@ COPY customer_with_default (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); -- Confirm that data was copied with now() function SELECT count(*) FROM customer_with_default where c_time IS NOT NULL; - count ---------------------------------------------------------------------- + count +------- 2 (1 row) @@ -177,8 +180,8 @@ ALTER TABLE customer_copy_hash ADD COLUMN extra1 INT DEFAULT 0; ALTER TABLE customer_copy_hash ADD COLUMN extra2 INT DEFAULT 0; COPY customer_copy_hash (c_custkey, c_name, extra1, extra2) FROM STDIN CSV; SELECT * FROM customer_copy_hash WHERE extra1 = 1; - c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | extra1 | extra2 ---------------------------------------------------------------------- + c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | extra1 | extra2 +-----------+------------+-----------+-------------+---------+-----------+--------------+-----------+--------+-------- 10 | customer10 | | | | | | | 1 | 5 (1 row) @@ -186,8 +189,8 @@ SELECT * FROM customer_copy_hash WHERE extra1 = 1; ALTER TABLE customer_copy_hash DROP COLUMN extra1; COPY customer_copy_hash (c_custkey, c_name, extra2) FROM STDIN CSV; SELECT * FROM customer_copy_hash WHERE c_custkey = 11; - c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | extra2 ---------------------------------------------------------------------- + c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | extra2 +-----------+------------+-----------+-------------+---------+-----------+--------------+-----------+-------- 11 | customer11 | | | | | | | 5 (1 row) @@ -195,9 +198,9 @@ SELECT * FROM customer_copy_hash WHERE c_custkey = 11; ALTER TABLE customer_copy_hash DROP COLUMN extra2; COPY customer_copy_hash (c_custkey, c_name) FROM STDIN CSV; SELECT * FROM customer_copy_hash WHERE c_custkey = 12; - c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment ---------------------------------------------------------------------- - 12 | customer12 | | | | | | + c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment +-----------+------------+-----------+-------------+---------+-----------+--------------+----------- + 12 | customer12 | | | | | | (1 row) -- Create a new range-partitioned table into which to COPY @@ -212,9 +215,9 @@ CREATE TABLE customer_copy_range ( c_comment varchar(117), primary key (c_custkey)); SELECT master_create_distributed_table('customer_copy_range', 'c_custkey', 'range'); - master_create_distributed_table ---------------------------------------------------------------------- - + master_create_distributed_table +--------------------------------- + (1 row) -- Test COPY into empty range-partitioned table @@ -234,15 +237,15 @@ COPY customer_copy_range FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITE -- Check whether data went into the right shard (maybe) SELECT min(c_custkey), max(c_custkey), avg(c_custkey), count(*) FROM customer_copy_range WHERE c_custkey <= 500; - min | max | avg | count ---------------------------------------------------------------------- + min | max | avg | count +-----+-----+----------------------+------- 1 | 500 | 250.5000000000000000 | 500 (1 row) -- Check whether data was copied SELECT count(*) FROM customer_copy_range; - count ---------------------------------------------------------------------- + count +------- 1000 (1 row) @@ -250,14 +253,14 @@ SELECT count(*) FROM customer_copy_range; UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = :new_shard_id; SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; - shardid | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- + shardid | shardminvalue | shardmaxvalue +---------+---------------+--------------- 560129 | 1501 | 2000 (1 row) SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_shard_id; - shardid | shardlength ---------------------------------------------------------------------- + shardid | shardlength +---------+------------- 560129 | 0 560129 | 0 (2 rows) @@ -265,20 +268,20 @@ SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_sh -- Update shard statistics for range-partitioned shard and check that only the -- shard length is updated. SELECT master_update_shard_statistics(:new_shard_id); - master_update_shard_statistics ---------------------------------------------------------------------- + master_update_shard_statistics +-------------------------------- 131072 (1 row) SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; - shardid | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- + shardid | shardminvalue | shardmaxvalue +---------+---------------+--------------- 560129 | 1501 | 2000 (1 row) SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_shard_id; - shardid | shardlength ---------------------------------------------------------------------- + shardid | shardlength +---------+------------- 560129 | 131072 560129 | 131072 (2 rows) @@ -296,76 +299,85 @@ CREATE TABLE customer_copy_append ( c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117)); -SELECT master_create_distributed_table('customer_copy_append', 'c_custkey', 'append'); - master_create_distributed_table ---------------------------------------------------------------------- - +SELECT create_distributed_table('customer_copy_append', 'c_custkey', 'append'); + create_distributed_table +-------------------------- + (1 row) -- Test syntax error -COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); -ERROR: invalid input syntax for integer: "notinteger" +BEGIN; +SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset +COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); +ERROR: invalid input syntax for type integer: "notinteger" CONTEXT: COPY customer_copy_append, line 3, column c_custkey: "notinteger" +END; -- Test that no shard is created for failing copy SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; - count ---------------------------------------------------------------------- + count +------- 0 (1 row) -- Test empty copy -COPY customer_copy_append FROM STDIN; --- Test that no shard is created for copying zero rows +BEGIN; +SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset +COPY customer_copy_append FROM STDIN WITH (append_to_shard :shardid); +END; +-- Test that a shard is created SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; - count ---------------------------------------------------------------------- - 0 + count +------- + 1 (1 row) -- Test proper copy -COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); +BEGIN; +SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset +COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); +END; -- Check whether data was copied properly SELECT * FROM customer_copy_append; - c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment ---------------------------------------------------------------------- - 1 | customer1 | | | | | | - 2 | customer2 | | | | | | + c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment +-----------+-----------+-----------+-------------+---------+-----------+--------------+----------- + 1 | customer1 | | | | | | + 2 | customer2 | | | | | | (2 rows) -- Manipulate manipulate and check shard statistics for append-partitioned table shard -UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = 560131; -UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560131; -SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131; - shardid | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- - 560131 | 1501 | 2000 +UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = 560132; +UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560132; +SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560132; + shardid | shardminvalue | shardmaxvalue +---------+---------------+--------------- + 560132 | 1501 | 2000 (1 row) -SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131; - shardid | shardlength ---------------------------------------------------------------------- - 560131 | 0 - 560131 | 0 +SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560132; + shardid | shardlength +---------+------------- + 560132 | 0 + 560132 | 0 (2 rows) -- Update shard statistics for append-partitioned shard -SELECT master_update_shard_statistics(560131); - master_update_shard_statistics ---------------------------------------------------------------------- +SELECT master_update_shard_statistics(560132); + master_update_shard_statistics +-------------------------------- 8192 (1 row) -SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131; - shardid | shardminvalue | shardmaxvalue ---------------------------------------------------------------------- - 560131 | 1 | 2 +SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560132; + shardid | shardminvalue | shardmaxvalue +---------+---------------+--------------- + 560132 | 1 | 2 (1 row) -SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131; - shardid | shardlength ---------------------------------------------------------------------- - 560131 | 8192 - 560131 | 8192 +SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560132; + shardid | shardlength +---------+------------- + 560132 | 8192 + 560132 | 8192 (2 rows) -- Create lineitem table @@ -386,46 +398,27 @@ CREATE TABLE lineitem_copy_append ( l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null); -SELECT master_create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append'); - master_create_distributed_table ---------------------------------------------------------------------- - +SELECT create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append'); + create_distributed_table +-------------------------- + (1 row) --- Test multiple shard creation -SET citus.shard_max_size TO '256kB'; -COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'; +BEGIN; +SELECT master_create_empty_shard('lineitem_copy_append') AS shardid \gset +COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid); +END; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::regclass; - count ---------------------------------------------------------------------- - 5 + count +------- + 1 (1 row) --- Test round robin shard policy -SET citus.shard_replication_factor TO 1; -COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'; -SELECT - pg_dist_shard_placement.shardid, - pg_dist_shard_placement.nodeport -FROM - pg_dist_shard, - pg_dist_shard_placement -WHERE - pg_dist_shard.shardid = pg_dist_shard_placement.shardid AND - logicalrelid = 'lineitem_copy_append'::regclass -ORDER BY - pg_dist_shard.shardid DESC -LIMIT - 5; - shardid | nodeport ---------------------------------------------------------------------- - 560141 | 57637 - 560140 | 57638 - 560139 | 57637 - 560138 | 57638 - 560137 | 57637 -(5 rows) - +-- trigger some errors on the append_to_shard option +COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard 1); +ERROR: could not find valid entry for shard 1 +COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard 560000); +ERROR: shard 560000 does not belong to table lineitem_copy_append -- Test schema support on append partitioned tables CREATE SCHEMA append; CREATE TABLE append.customer_copy ( @@ -437,19 +430,21 @@ CREATE TABLE append.customer_copy ( c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117)); -SELECT master_create_distributed_table('append.customer_copy', 'c_custkey', 'append'); - master_create_distributed_table ---------------------------------------------------------------------- - +SELECT create_distributed_table('append.customer_copy', 'c_custkey', 'append'); + create_distributed_table +-------------------------- + (1 row) +SELECT master_create_empty_shard('append.customer_copy') AS shardid1 \gset +SELECT master_create_empty_shard('append.customer_copy') AS shardid2 \gset -- Test copy from the master node -COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|'); -COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|'); +COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard :shardid1); +COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid2); -- Test the content of the table SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.customer_copy; - min | max | avg | count ---------------------------------------------------------------------- + min | max | avg | count +-----+------+-----------------------+------- 1 | 7000 | 4443.8028800000000000 | 2000 (1 row) @@ -458,23 +453,23 @@ CREATE TABLE "customer_with_special_\\_character"( c_custkey integer, c_name varchar(25) not null); SELECT master_create_distributed_table('"customer_with_special_\\_character"', 'c_custkey', 'hash'); - master_create_distributed_table ---------------------------------------------------------------------- - + master_create_distributed_table +--------------------------------- + (1 row) SELECT master_create_worker_shards('"customer_with_special_\\_character"', 4, 1); - master_create_worker_shards ---------------------------------------------------------------------- - + master_create_worker_shards +----------------------------- + (1 row) COPY "customer_with_special_\\_character" (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); -- Confirm that data was copied SELECT count(*) FROM "customer_with_special_\\_character"; - count ---------------------------------------------------------------------- + count +------- 2 (1 row) @@ -483,23 +478,23 @@ CREATE TABLE "1_customer"( c_custkey integer, c_name varchar(25) not null); SELECT master_create_distributed_table('"1_customer"', 'c_custkey', 'hash'); - master_create_distributed_table ---------------------------------------------------------------------- - + master_create_distributed_table +--------------------------------- + (1 row) SELECT master_create_worker_shards('"1_customer"', 4, 1); - master_create_worker_shards ---------------------------------------------------------------------- - + master_create_worker_shards +----------------------------- + (1 row) COPY "1_customer" (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); -- Confirm that data was copied SELECT count(*) FROM "1_customer"; - count ---------------------------------------------------------------------- + count +------- 2 (1 row) @@ -518,23 +513,23 @@ CREATE TABLE packed_numbers_hash ( packed_numbers number_pack[] ); SELECT master_create_distributed_table('packed_numbers_hash', 'id', 'hash'); - master_create_distributed_table ---------------------------------------------------------------------- - + master_create_distributed_table +--------------------------------- + (1 row) SELECT master_create_worker_shards('packed_numbers_hash', 4, 1); - master_create_worker_shards ---------------------------------------------------------------------- - + master_create_worker_shards +----------------------------- + (1 row) COPY (SELECT 1, ARRAY[ROW(42, 42), ROW(42, 42)]) TO :'temp_dir''copy_test_array_of_composite'; COPY packed_numbers_hash FROM :'temp_dir''copy_test_array_of_composite'; -- Verify data is actually copied SELECT * FROM packed_numbers_hash; - id | packed_numbers ---------------------------------------------------------------------- + id | packed_numbers +----+----------------------- 1 | {"(42,42)","(42,42)"} (1 row) @@ -544,23 +539,23 @@ CREATE TABLE super_packed_numbers_hash ( super_packed_number super_number_pack ); SELECT master_create_distributed_table('super_packed_numbers_hash', 'id', 'hash'); - master_create_distributed_table ---------------------------------------------------------------------- - + master_create_distributed_table +--------------------------------- + (1 row) SELECT master_create_worker_shards('super_packed_numbers_hash', 4, 1); - master_create_worker_shards ---------------------------------------------------------------------- - + master_create_worker_shards +----------------------------- + (1 row) COPY (SELECT 1, ROW(ROW(42, 42), ROW(42, 42))) TO :'temp_dir''copy_test_composite_of_composite'; COPY super_packed_numbers_hash FROM :'temp_dir''copy_test_composite_of_composite'; -- Verify data is actually copied SELECT * FROM super_packed_numbers_hash; - id | super_packed_number ---------------------------------------------------------------------- + id | super_packed_number +----+----------------------- 1 | ("(42,42)","(42,42)") (1 row) @@ -569,17 +564,18 @@ CREATE TABLE packed_numbers_append ( id integer, packed_numbers number_pack[] ); -SELECT master_create_distributed_table('packed_numbers_append', 'id', 'append'); - master_create_distributed_table ---------------------------------------------------------------------- - +SELECT create_distributed_table('packed_numbers_append', 'id', 'append'); + create_distributed_table +-------------------------- + (1 row) -COPY packed_numbers_append FROM :'temp_dir''copy_test_array_of_composite'; +SELECT master_create_empty_shard('packed_numbers_append') AS shardid \gset +COPY packed_numbers_append FROM :'temp_dir''copy_test_array_of_composite' WITH (append_to_shard :shardid); -- Verify data is actually copied SELECT * FROM packed_numbers_append; - id | packed_numbers ---------------------------------------------------------------------- + id | packed_numbers +----+----------------------- 1 | {"(42,42)","(42,42)"} (1 row) @@ -588,17 +584,18 @@ CREATE TABLE super_packed_numbers_append ( id integer, super_packed_number super_number_pack ); -SELECT master_create_distributed_table('super_packed_numbers_append', 'id', 'append'); - master_create_distributed_table ---------------------------------------------------------------------- - +SELECT create_distributed_table('super_packed_numbers_append', 'id', 'append'); + create_distributed_table +-------------------------- + (1 row) -COPY super_packed_numbers_append FROM :'temp_dir''copy_test_composite_of_composite'; +SELECT master_create_empty_shard('super_packed_numbers_append') AS shardid \gset +COPY super_packed_numbers_append FROM :'temp_dir''copy_test_composite_of_composite' WITH (append_to_shard :shardid); -- Verify data is actually copied SELECT * FROM super_packed_numbers_append; - id | super_packed_number ---------------------------------------------------------------------- + id | super_packed_number +----+----------------------- 1 | ("(42,42)","(42,42)") (1 row) @@ -607,99 +604,106 @@ CREATE TABLE composite_partition_column_table( id integer, composite_column number_pack ); -SELECT master_create_distributed_table('composite_partition_column_table', 'composite_column', 'append'); - master_create_distributed_table ---------------------------------------------------------------------- - +SELECT create_distributed_table('composite_partition_column_table', 'composite_column', 'append'); + create_distributed_table +-------------------------- + (1 row) -\COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv'); -WARNING: function min(number_pack) does not exist -HINT: No function matches the given name and argument types. You might need to add explicit type casts. -CONTEXT: while executing command on localhost:xxxxx -WARNING: could not get statistics for shard public.composite_partition_column_table_560162 -DETAIL: Setting shard statistics to NULL -ERROR: failure on connection marked as essential: localhost:xxxxx +SELECT master_create_empty_shard('composite_partition_column_table') AS shardid \gset +COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid); -- Test copy on append distributed tables do not create shards on removed workers CREATE TABLE numbers_append (a int, b int); -SELECT master_create_distributed_table('numbers_append', 'a', 'append'); - master_create_distributed_table ---------------------------------------------------------------------- - +SELECT create_distributed_table('numbers_append', 'a', 'append'); + create_distributed_table +-------------------------- + (1 row) -- no shards is created yet SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; - shardid | nodename | nodeport ---------------------------------------------------------------------- + shardid | nodename | nodeport +---------+----------+---------- (0 rows) -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); +SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset +SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1); +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2); -- verify there are shards at both workers SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 560163 | localhost | 57637 - 560164 | localhost | 57638 -(2 rows) + shardid | nodename | nodeport +---------+-----------+---------- + 560155 | localhost | 57637 + 560155 | localhost | 57638 + 560156 | localhost | 57638 + 560156 | localhost | 57637 +(4 rows) -- disable the first node SELECT master_disable_node('localhost', :worker_1_port); -NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57637) to activate this node back. - master_disable_node ---------------------------------------------------------------------- - +NOTICE: Node localhost:57637 has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57637) to activate this node back. + master_disable_node +--------------------- + (1 row) -- set replication factor to 1 so that copy will -- succeed without replication count error SET citus.shard_replication_factor TO 1; -- add two new shards and verify they are created at the other node -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); +SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset +SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1); +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2); SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 560163 | localhost | 57637 - 560164 | localhost | 57638 - 560165 | localhost | 57638 - 560166 | localhost | 57638 -(4 rows) + shardid | nodename | nodeport +---------+-----------+---------- + 560155 | localhost | 57637 + 560155 | localhost | 57638 + 560156 | localhost | 57638 + 560156 | localhost | 57637 + 560157 | localhost | 57638 + 560158 | localhost | 57638 +(6 rows) -- add the node back SET client_min_messages TO ERROR; SELECT 1 FROM master_activate_node('localhost', :worker_1_port); - ?column? ---------------------------------------------------------------------- + ?column? +---------- 1 (1 row) RESET client_min_messages; RESET citus.shard_replication_factor; -- add two new shards and verify they are created at both workers -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); -COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); +SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset +SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1); +COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2); SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; - shardid | nodename | nodeport ---------------------------------------------------------------------- - 560163 | localhost | 57637 - 560164 | localhost | 57638 - 560165 | localhost | 57638 - 560166 | localhost | 57638 - 560167 | localhost | 57637 - 560167 | localhost | 57638 - 560168 | localhost | 57638 - 560168 | localhost | 57637 -(8 rows) + shardid | nodename | nodeport +---------+-----------+---------- + 560155 | localhost | 57637 + 560155 | localhost | 57638 + 560156 | localhost | 57638 + 560156 | localhost | 57637 + 560157 | localhost | 57638 + 560158 | localhost | 57638 + 560159 | localhost | 57637 + 560159 | localhost | 57638 + 560160 | localhost | 57638 + 560160 | localhost | 57637 +(10 rows) DROP TABLE numbers_append; -- Test copy failures against connection failures @@ -708,8 +712,8 @@ CREATE USER test_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT * FROM run_command_on_workers('CREATE USER test_user'); - nodename | nodeport | success | result ---------------------------------------------------------------------- + nodename | nodeport | success | result +-----------+----------+---------+------------- localhost | 57637 | t | CREATE ROLE localhost | 57638 | t | CREATE ROLE (2 rows) @@ -718,9 +722,9 @@ SELECT * FROM run_command_on_workers('CREATE USER test_user'); SET citus.shard_count to 4; CREATE TABLE numbers_hash (a int, b int); SELECT create_distributed_table('numbers_hash', 'a'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); @@ -728,48 +732,48 @@ COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by shardid, nodeport; - shardid | shardstate | nodename | nodeport ---------------------------------------------------------------------- - 560169 | 1 | localhost | 57637 - 560169 | 1 | localhost | 57638 - 560170 | 1 | localhost | 57637 - 560170 | 1 | localhost | 57638 - 560171 | 1 | localhost | 57637 - 560171 | 1 | localhost | 57638 - 560172 | 1 | localhost | 57637 - 560172 | 1 | localhost | 57638 + shardid | shardstate | nodename | nodeport +---------+------------+-----------+---------- + 560161 | 1 | localhost | 57637 + 560161 | 1 | localhost | 57638 + 560162 | 1 | localhost | 57637 + 560162 | 1 | localhost | 57638 + 560163 | 1 | localhost | 57637 + 560163 | 1 | localhost | 57638 + 560164 | 1 | localhost | 57637 + 560164 | 1 | localhost | 57638 (8 rows) -- create a reference table CREATE TABLE numbers_reference(a int, b int); SELECT create_reference_table('numbers_reference'); - create_reference_table ---------------------------------------------------------------------- - + create_reference_table +------------------------ + (1 row) COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); -- create another hash distributed table CREATE TABLE numbers_hash_other(a int, b int); SELECT create_distributed_table('numbers_hash_other', 'a'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash_other'::regclass order by shardid, nodeport; - shardid | shardstate | nodename | nodeport ---------------------------------------------------------------------- - 560174 | 1 | localhost | 57637 - 560174 | 1 | localhost | 57638 - 560175 | 1 | localhost | 57637 - 560175 | 1 | localhost | 57638 - 560176 | 1 | localhost | 57637 - 560176 | 1 | localhost | 57638 - 560177 | 1 | localhost | 57637 - 560177 | 1 | localhost | 57638 + shardid | shardstate | nodename | nodeport +---------+------------+-----------+---------- + 560166 | 1 | localhost | 57637 + 560166 | 1 | localhost | 57638 + 560167 | 1 | localhost | 57637 + 560167 | 1 | localhost | 57638 + 560168 | 1 | localhost | 57637 + 560168 | 1 | localhost | 57638 + 560169 | 1 | localhost | 57637 + 560169 | 1 | localhost | 57638 (8 rows) -- manually corrupt pg_dist_shard such that both copies of one shard is placed in @@ -786,67 +790,64 @@ ALTER USER test_user WITH nologin; \c - test_user - :master_port -- reissue copy, and it should fail COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in +ERROR: connection to the remote node localhost:57637 failed with the following error: FATAL: role "test_user" is not permitted to log in CONTEXT: COPY numbers_hash, line 1: "1,1" -- verify shards in the none of the workers as marked invalid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by shardid, nodeport; - shardid | shardstate | nodename | nodeport ---------------------------------------------------------------------- - 560169 | 1 | localhost | 57637 - 560169 | 1 | localhost | 57638 - 560170 | 1 | localhost | 57637 - 560170 | 1 | localhost | 57638 - 560171 | 1 | localhost | 57637 - 560171 | 1 | localhost | 57638 - 560172 | 1 | localhost | 57637 - 560172 | 1 | localhost | 57638 + shardid | shardstate | nodename | nodeport +---------+------------+-----------+---------- + 560161 | 1 | localhost | 57637 + 560161 | 1 | localhost | 57638 + 560162 | 1 | localhost | 57637 + 560162 | 1 | localhost | 57638 + 560163 | 1 | localhost | 57637 + 560163 | 1 | localhost | 57638 + 560164 | 1 | localhost | 57637 + 560164 | 1 | localhost | 57638 (8 rows) -- try to insert into a reference table copy should fail COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in +ERROR: connection to the remote node localhost:57637 failed with the following error: FATAL: role "test_user" is not permitted to log in CONTEXT: COPY numbers_reference, line 1: "3,1" -- verify shards for reference table are still valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_reference'::regclass order by placementid; - shardid | shardstate | nodename | nodeport ---------------------------------------------------------------------- - 560173 | 1 | localhost | 57637 - 560173 | 1 | localhost | 57638 + shardid | shardstate | nodename | nodeport +---------+------------+-----------+---------- + 560165 | 1 | localhost | 57637 + 560165 | 1 | localhost | 57638 (2 rows) -- try to insert into numbers_hash_other. copy should fail and rollback -- since it can not insert into either copies of a shard. shards are expected to -- stay valid since the operation is rolled back. COPY numbers_hash_other FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in +ERROR: connection to the remote node localhost:57637 failed with the following error: FATAL: role "test_user" is not permitted to log in CONTEXT: COPY numbers_hash_other, line 1: "1,1" -- verify shards for numbers_hash_other are still valid -- since copy has failed altogether SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash_other'::regclass order by shardid, nodeport; - shardid | shardstate | nodename | nodeport ---------------------------------------------------------------------- - 560174 | 1 | localhost | 57637 - 560174 | 1 | localhost | 57638 - 560175 | 1 | localhost | 57637 - 560175 | 1 | localhost | 57638 - 560176 | 1 | localhost | 57637 - 560176 | 1 | localhost | 57638 - 560177 | 1 | localhost | 57637 - 560177 | 1 | localhost | 57638 + shardid | shardstate | nodename | nodeport +---------+------------+-----------+---------- + 560166 | 1 | localhost | 57637 + 560166 | 1 | localhost | 57638 + 560167 | 1 | localhost | 57637 + 560167 | 1 | localhost | 57638 + 560168 | 1 | localhost | 57637 + 560168 | 1 | localhost | 57638 + 560169 | 1 | localhost | 57637 + 560169 | 1 | localhost | 57638 (8 rows) -- re-enable test_user on the first worker \c - :default_user - :worker_1_port ALTER USER test_user WITH login; --- there is a dangling shard in worker_2, drop it -\c - test_user - :worker_2_port -DROP TABLE numbers_hash_other_560176; \c - test_user - :master_port DROP TABLE numbers_hash; DROP TABLE numbers_hash_other; @@ -857,23 +858,23 @@ DROP TABLE numbers_reference; SET citus.shard_count to 4; CREATE TABLE numbers_hash(a int, b int); SELECT create_distributed_table('numbers_hash', 'a'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) \c - - - :worker_1_port -ALTER TABLE numbers_hash_560180 DROP COLUMN b; +ALTER TABLE numbers_hash_560170 DROP COLUMN b; \c - - - :master_port -- operation will fail to modify a shard and roll back COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); -ERROR: column "b" of relation "numbers_hash_560180" does not exist -CONTEXT: while executing command on localhost:xxxxx -COPY numbers_hash, line 6: "6,6" +ERROR: column "b" of relation "numbers_hash_560170" does not exist +CONTEXT: while executing command on localhost:57637 +COPY numbers_hash, line 1: "1,1" -- verify no row is inserted SELECT count(a) FROM numbers_hash; - count ---------------------------------------------------------------------- + count +------- 0 (1 row) @@ -881,22 +882,22 @@ SELECT count(a) FROM numbers_hash; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by shardid, nodeport; - shardid | shardstate | nodename | nodeport ---------------------------------------------------------------------- - 560178 | 1 | localhost | 57637 - 560178 | 1 | localhost | 57638 - 560179 | 1 | localhost | 57637 - 560179 | 1 | localhost | 57638 - 560180 | 1 | localhost | 57637 - 560180 | 1 | localhost | 57638 - 560181 | 1 | localhost | 57637 - 560181 | 1 | localhost | 57638 + shardid | shardstate | nodename | nodeport +---------+------------+-----------+---------- + 560170 | 1 | localhost | 57637 + 560170 | 1 | localhost | 57638 + 560171 | 1 | localhost | 57637 + 560171 | 1 | localhost | 57638 + 560172 | 1 | localhost | 57637 + 560172 | 1 | localhost | 57638 + 560173 | 1 | localhost | 57637 + 560173 | 1 | localhost | 57638 (8 rows) DROP TABLE numbers_hash; SELECT * FROM run_command_on_workers('DROP USER test_user'); - nodename | nodeport | success | result ---------------------------------------------------------------------- + nodename | nodeport | success | result +-----------+----------+---------+----------- localhost | 57637 | t | DROP ROLE localhost | 57638 | t | DROP ROLE (2 rows) @@ -908,15 +909,15 @@ col1 aclitem NOT NULL, col2 character varying(255) NOT NULL ); SELECT create_reference_table('test_binaryless_builtin'); - create_reference_table ---------------------------------------------------------------------- - + create_reference_table +------------------------ + (1 row) \COPY test_binaryless_builtin FROM STDIN WITH (format CSV) SELECT * FROM test_binaryless_builtin; - col1 | col2 ---------------------------------------------------------------------- + col1 | col2 +---------------------+------- postgres=r/postgres | test (1 row) @@ -925,9 +926,9 @@ DROP TABLE test_binaryless_builtin; BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) \copy tt1 from STDIN; @@ -936,24 +937,24 @@ END; -- Test dropping a column in front of the partition column CREATE TABLE drop_copy_test_table (col1 int, col2 int, col3 int, col4 int); SELECT create_distributed_table('drop_copy_test_table','col3'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) ALTER TABLE drop_copy_test_table drop column col1; COPY drop_copy_test_table (col2,col3,col4) from STDIN with CSV; SELECT * FROM drop_copy_test_table WHERE col3 = 1; - col2 | col3 | col4 ---------------------------------------------------------------------- - | 1 | + col2 | col3 | col4 +------+------+------ + | 1 | (1 row) ALTER TABLE drop_copy_test_table drop column col4; COPY drop_copy_test_table (col2,col3) from STDIN with CSV; SELECT * FROM drop_copy_test_table WHERE col3 = 1; - col2 | col3 ---------------------------------------------------------------------- + col2 | col3 +------+------ | 1 | 1 (2 rows) @@ -962,8 +963,8 @@ DROP TABLE drop_copy_test_table; -- There should be no "tt1" shard on the worker nodes \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'tt1%'; - relname ---------------------------------------------------------------------- + relname +--------- (0 rows) \c - - - :master_port @@ -976,9 +977,9 @@ NOTICE: Copying data from local table... NOTICE: copying the data has completed DETAIL: The local data in the table is no longer visible, but is still on disk. HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$public.trigger_flush$$) - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) ABORT; @@ -988,9 +989,9 @@ SET citus.shard_count TO 3; SET citus.multi_shard_modify_mode TO 'sequential'; CREATE UNLOGGED TABLE trigger_switchover(a int, b int, c int, d int, e int, f int, g int, h int); SELECT create_distributed_table('trigger_switchover','a'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) INSERT INTO trigger_switchover @@ -999,16 +1000,16 @@ ABORT; -- copy into a table with a JSONB column CREATE TABLE copy_jsonb (key text, value jsonb, extra jsonb default '["default"]'::jsonb); SELECT create_distributed_table('copy_jsonb', 'key', colocate_with => 'none'); - create_distributed_table ---------------------------------------------------------------------- - + create_distributed_table +-------------------------- + (1 row) -- JSONB from text should work \COPY copy_jsonb (key, value) FROM STDIN SELECT * FROM copy_jsonb ORDER BY key; - key | value | extra ---------------------------------------------------------------------- + key | value | extra +-------+----------------------------+------------- blue | {"b": 255, "g": 0, "r": 0} | ["default"] green | {"b": 0, "g": 255, "r": 0} | ["default"] (2 rows) @@ -1017,8 +1018,8 @@ SELECT * FROM copy_jsonb ORDER BY key; COPY copy_jsonb TO :'temp_dir''copy_jsonb.pgcopy' WITH (format binary); COPY copy_jsonb FROM :'temp_dir''copy_jsonb.pgcopy' WITH (format binary); SELECT * FROM copy_jsonb ORDER BY key; - key | value | extra ---------------------------------------------------------------------- + key | value | extra +-------+----------------------------+------------- blue | {"b": 255, "g": 0, "r": 0} | ["default"] blue | {"b": 255, "g": 0, "r": 0} | ["default"] green | {"b": 0, "g": 255, "r": 0} | ["default"] @@ -1027,7 +1028,7 @@ SELECT * FROM copy_jsonb ORDER BY key; -- JSONB parsing error without validation: no line number \COPY copy_jsonb (key, value) FROM STDIN -ERROR: invalid input syntax for json +ERROR: invalid input syntax for type json DETAIL: The input string ended unexpectedly. CONTEXT: JSON data, line 1: {"r":255,"g":0,"b":0 COPY copy_jsonb, line 1, column value: "{"r":255,"g":0,"b":0" @@ -1036,8 +1037,8 @@ SET citus.skip_jsonb_validation_in_copy TO off; -- JSONB from text should work \COPY copy_jsonb (key, value) FROM STDIN SELECT * FROM copy_jsonb ORDER BY key; - key | value | extra ---------------------------------------------------------------------- + key | value | extra +-------+----------------------------+------------- blue | {"b": 255, "g": 0, "r": 0} | ["default"] green | {"b": 0, "g": 255, "r": 0} | ["default"] (2 rows) @@ -1046,8 +1047,8 @@ SELECT * FROM copy_jsonb ORDER BY key; COPY copy_jsonb TO :'temp_dir''copy_jsonb.pgcopy' WITH (format binary); COPY copy_jsonb FROM :'temp_dir''copy_jsonb.pgcopy' WITH (format binary); SELECT * FROM copy_jsonb ORDER BY key; - key | value | extra ---------------------------------------------------------------------- + key | value | extra +-------+----------------------------+------------- blue | {"b": 255, "g": 0, "r": 0} | ["default"] blue | {"b": 255, "g": 0, "r": 0} | ["default"] green | {"b": 0, "g": 255, "r": 0} | ["default"] @@ -1056,7 +1057,7 @@ SELECT * FROM copy_jsonb ORDER BY key; -- JSONB parsing error with validation: should see line number \COPY copy_jsonb (key, value) FROM STDIN -ERROR: invalid input syntax for json +ERROR: invalid input syntax for type json DETAIL: The input string ended unexpectedly. CONTEXT: JSON data, line 1: {"r":255,"g":0,"b":0 COPY copy_jsonb, line 1, column value: "{"r":255,"g":0,"b":0" diff --git a/src/test/regress/output/multi_create_schema.source b/src/test/regress/output/multi_create_schema.source deleted file mode 100644 index 4fedfe112..000000000 --- a/src/test/regress/output/multi_create_schema.source +++ /dev/null @@ -1,20 +0,0 @@ -SET citus.next_shard_id TO 250000; -CREATE SCHEMA tpch -CREATE TABLE nation ( - n_nationkey integer not null, - n_name char(25) not null, - n_regionkey integer not null, - n_comment varchar(152)); -SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append'); - create_distributed_table --------------------------- - -(1 row) - -\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' -SELECT count(*) from tpch.nation; - count -------- - 25 -(1 row) - diff --git a/src/test/regress/output/multi_load_data.source b/src/test/regress/output/multi_load_data.source index d8099d0f8..ae9ff3fdd 100644 --- a/src/test/regress/output/multi_load_data.source +++ b/src/test/regress/output/multi_load_data.source @@ -13,9 +13,9 @@ SET citus.next_shard_id TO 290000; \copy orders_reference FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders_reference FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' +\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard 360006) \copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' -\copy part_append FROM '@abs_srcdir@/data/part.data' with delimiter '|' +\copy part_append FROM '@abs_srcdir@/data/part.data' with (delimiter '|', append_to_shard 360009) \copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' diff --git a/src/test/regress/output/multi_load_large_records.source b/src/test/regress/output/multi_load_large_records.source deleted file mode 100644 index fa43cfae4..000000000 --- a/src/test/regress/output/multi_load_large_records.source +++ /dev/null @@ -1,26 +0,0 @@ --- --- MULTI_STAGE_LARGE_RECORDS --- --- Tests for loading data with large records (i.e. greater than the read buffer --- size, which is 32kB) in a distributed cluster. These tests make sure that we --- are creating shards of correct size even when records are large. -SET citus.next_shard_id TO 300000; -SET citus.shard_max_size TO "256kB"; -CREATE TABLE large_records_table (data_id integer, data text); -SELECT master_create_distributed_table('large_records_table', 'data_id', 'append'); - master_create_distributed_table ---------------------------------- - -(1 row) - -\copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' -SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class - WHERE pg_class.oid=logicalrelid AND relname='large_records_table' - ORDER BY shardid; - shardminvalue | shardmaxvalue ----------------+--------------- - 1 | 1 - 2 | 2 -(2 rows) - -RESET citus.shard_max_size; diff --git a/src/test/regress/output/multi_load_more_data.source b/src/test/regress/output/multi_load_more_data.source index ecc8779e4..4dadfad96 100644 --- a/src/test/regress/output/multi_load_more_data.source +++ b/src/test/regress/output/multi_load_more_data.source @@ -8,9 +8,12 @@ SET citus.next_shard_id TO 280000; \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' -\copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' -\copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -\copy part_append FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' +SELECT master_create_empty_shard('customer_append') AS shardid1 \gset +SELECT master_create_empty_shard('customer_append') AS shardid2 \gset +copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid1); +copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with (delimiter '|', append_to_shard :shardid2); +SELECT master_create_empty_shard('part_append') AS shardid \gset +copy part_append FROM '@abs_srcdir@/data/part.more.data' with (delimiter '|', append_to_shard :shardid); -- Exchange partition files in binary format in remaining tests ALTER SYSTEM SET citus.binary_worker_copy_format TO on; SELECT pg_reload_conf(); @@ -20,15 +23,15 @@ SELECT pg_reload_conf(); (1 row) SELECT success FROM run_command_on_workers('ALTER SYSTEM SET citus.binary_worker_copy_format TO on'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) SELECT success FROM run_command_on_workers('SELECT pg_reload_conf()'); - success ---------- + success +--------------------------------------------------------------------- t t (2 rows) diff --git a/src/test/regress/spec/isolation_append_copy_vs_all.spec b/src/test/regress/spec/isolation_append_copy_vs_all.spec deleted file mode 100644 index 4ae92f17b..000000000 --- a/src/test/regress/spec/isolation_append_copy_vs_all.spec +++ /dev/null @@ -1,118 +0,0 @@ -// -// How we organize this isolation test spec, is explained at README.md file in this directory. -// - -// create append distributed table to test behavior of COPY in concurrent operations -setup -{ - SET citus.shard_replication_factor TO 1; - CREATE TABLE append_copy(id integer, data text, int_data int); - SELECT create_distributed_table('append_copy', 'id', 'append'); -} - -// drop distributed table -teardown -{ - DROP TABLE IF EXISTS append_copy CASCADE; -} - -// session 1 -session "s1" -step "s1-initialize" { COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } -step "s1-begin" { BEGIN; } -step "s1-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } -step "s1-copy-additional-column" { COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; } -step "s1-router-select" { SELECT * FROM append_copy WHERE id = 1; } -step "s1-real-time-select" { SELECT * FROM append_copy ORDER BY 1, 2; } -step "s1-adaptive-select" -{ - SET citus.enable_repartition_joins TO ON; - SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -} -step "s1-insert" { INSERT INTO append_copy VALUES(0, 'k', 0); } -step "s1-insert-select" { INSERT INTO append_copy SELECT * FROM append_copy; } -step "s1-update" { UPDATE append_copy SET data = 'l' WHERE id = 0; } -step "s1-delete" { DELETE FROM append_copy WHERE id = 1; } -step "s1-truncate" { TRUNCATE append_copy; } -step "s1-drop" { DROP TABLE append_copy; } -step "s1-ddl-create-index" { CREATE INDEX append_copy_index ON append_copy(id); } -step "s1-ddl-drop-index" { DROP INDEX append_copy_index; } -step "s1-ddl-add-column" { ALTER TABLE append_copy ADD new_column int DEFAULT 0; } -step "s1-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; } -step "s1-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; } -step "s1-table-size" { SELECT citus_total_relation_size('append_copy'); } -step "s1-master-drop-all-shards" { SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); } -step "s1-create-non-distributed-table" { CREATE TABLE append_copy(id integer, data text, int_data int); } -step "s1-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); } -step "s1-select-count" { SELECT COUNT(*) FROM append_copy; } -step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); } -step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } -step "s1-commit" { COMMIT; } - -// session 2 -session "s2" -step "s2-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; } -step "s2-router-select" { SELECT * FROM append_copy WHERE id = 1; } -step "s2-real-time-select" { SELECT * FROM append_copy ORDER BY 1, 2; } -step "s2-adaptive-select" -{ - SET citus.enable_repartition_joins TO ON; - SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; -} -step "s2-insert" { INSERT INTO append_copy VALUES(0, 'k', 0); } -step "s2-insert-select" { INSERT INTO append_copy SELECT * FROM append_copy; } -step "s2-update" { UPDATE append_copy SET data = 'l' WHERE id = 0; } -step "s2-delete" { DELETE FROM append_copy WHERE id = 1; } -step "s2-truncate" { TRUNCATE append_copy; } -step "s2-drop" { DROP TABLE append_copy; } -step "s2-ddl-create-index" { CREATE INDEX append_copy_index ON append_copy(id); } -step "s2-ddl-drop-index" { DROP INDEX append_copy_index; } -step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY append_copy_index ON append_copy(id); } -step "s2-ddl-add-column" { ALTER TABLE append_copy ADD new_column int DEFAULT 0; } -step "s2-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; } -step "s2-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; } -step "s2-table-size" { SELECT citus_total_relation_size('append_copy'); } -step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); } -step "s2-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); } - -// permutations - COPY vs COPY -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" - -// permutations - COPY first -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-adaptive-select" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-truncate" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-drop" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" -permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-copy" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" -permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-copy-additional-column" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-table-size" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" -permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" - -// permutations - COPY second -permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-adaptive-select" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-insert" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-update" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-delete" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-drop" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes" -permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes" -permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" -permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" -permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" -permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s2-copy" "s1-commit" "s1-select-count" -permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-copy" "s1-commit" "s1-select-count" diff --git a/src/test/regress/spec/isolation_master_append_table.spec b/src/test/regress/spec/isolation_master_append_table.spec index 9f58ffd1c..e6b17132e 100644 --- a/src/test/regress/spec/isolation_master_append_table.spec +++ b/src/test/regress/spec/isolation_master_append_table.spec @@ -1,12 +1,15 @@ setup { + SET citus.next_shard_id TO 4080102; + CREATE TABLE table_to_append(id int); CREATE TABLE table_to_be_appended(id int); SELECT create_distributed_table('table_to_append', 'id', 'append'); + SELECT master_create_empty_shard('table_to_append'); INSERT INTO table_to_be_appended SELECT generate_series(1,1000); - COPY table_to_append FROM PROGRAM 'echo 0 && echo 7 && echo 8 && echo 9 && echo 10000'; + COPY table_to_append FROM PROGRAM 'echo 0 && echo 7 && echo 8 && echo 9 && echo 10000' WITH (append_to_shard 4080102); } teardown diff --git a/src/test/regress/spec/isolation_range_copy_vs_all.spec b/src/test/regress/spec/isolation_range_copy_vs_all.spec index f5b31cc7d..50ee920c3 100644 --- a/src/test/regress/spec/isolation_range_copy_vs_all.spec +++ b/src/test/regress/spec/isolation_range_copy_vs_all.spec @@ -2,12 +2,17 @@ // How we organize this isolation test spec, is explained at README.md file in this directory. // -// create append distributed table to test behavior of COPY in concurrent operations +// create range distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; + SET citus.next_shard_id TO 3004005; CREATE TABLE range_copy(id integer, data text, int_data int); - SELECT create_distributed_table('range_copy', 'id', 'append'); + SELECT create_distributed_table('range_copy', 'id', 'range'); + SELECT master_create_empty_shard('range_copy'); + SELECT master_create_empty_shard('range_copy'); + UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005; + UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006; } // drop distributed table @@ -76,7 +81,13 @@ step "s2-ddl-rename-column" { ALTER TABLE range_copy RENAME data TO new_column; step "s2-table-size" { SELECT citus_total_relation_size('range_copy'); } step "s2-master-modify-multiple-shards" { DELETE FROM range_copy; } step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); } -step "s2-distribute-table" { SELECT create_distributed_table('range_copy', 'id', 'range'); } +step "s2-distribute-table" { + SET citus.shard_replication_factor TO 1; + SET citus.next_shard_id TO 3004005; + SELECT create_distributed_table('range_copy', 'id', 'range'); + UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005; + UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006; + } // permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" diff --git a/src/test/regress/spec/isolation_select_vs_all.spec b/src/test/regress/spec/isolation_select_vs_all.spec index eade6ff20..2aebcc7bb 100644 --- a/src/test/regress/spec/isolation_select_vs_all.spec +++ b/src/test/regress/spec/isolation_select_vs_all.spec @@ -9,8 +9,10 @@ setup SELECT citus_internal.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; + SET citus.next_shard_id TO 6780300; CREATE TABLE select_append(id integer, data text, int_data int); SELECT create_distributed_table('select_append', 'id', 'append'); + SELECT master_create_empty_shard('select_append'); } // drop distributed table @@ -22,7 +24,7 @@ teardown // session 1 session "s1" -step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } +step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard 6780300); } step "s1-begin" { BEGIN; } step "s1-disable-binary-protocol" { diff --git a/src/test/regress/spec/isolation_truncate_vs_all.spec b/src/test/regress/spec/isolation_truncate_vs_all.spec index 0ea190208..dd8cb7c2c 100644 --- a/src/test/regress/spec/isolation_truncate_vs_all.spec +++ b/src/test/regress/spec/isolation_truncate_vs_all.spec @@ -9,8 +9,10 @@ setup SELECT citus_internal.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; + SET citus.next_shard_id TO 5990340; CREATE TABLE truncate_append(id integer, data text); SELECT create_distributed_table('truncate_append', 'id', 'append'); + SELECT master_create_empty_shard('truncate_append'); } // drop distributed table @@ -23,7 +25,7 @@ teardown // session 1 session "s1" -step "s1-initialize" { COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } +step "s1-initialize" { COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard 5990340); } step "s1-begin" { BEGIN; } step "s1-truncate" { TRUNCATE truncate_append; } step "s1-drop" { DROP TABLE truncate_append; } diff --git a/src/test/regress/sql/.gitignore b/src/test/regress/sql/.gitignore index ebe88db6a..7fdb2207d 100644 --- a/src/test/regress/sql/.gitignore +++ b/src/test/regress/sql/.gitignore @@ -10,10 +10,8 @@ /multi_behavioral_analytics_create_table_superuser.sql /multi_complex_count_distinct.sql /multi_copy.sql -/multi_create_schema.sql /multi_load_data.sql /multi_load_data_superuser.sql -/multi_load_large_records.sql /multi_load_more_data.sql /multi_mx_copy_data.sql /multi_outer_join.sql diff --git a/src/test/regress/sql/citus_update_table_statistics.sql b/src/test/regress/sql/citus_update_table_statistics.sql index 95515d4a3..9293144dc 100644 --- a/src/test/regress/sql/citus_update_table_statistics.sql +++ b/src/test/regress/sql/citus_update_table_statistics.sql @@ -63,10 +63,12 @@ ORDER BY 2, 3; -- here we update shardlength, shardminvalue and shardmaxvalue CREATE TABLE test_table_statistics_append (id int); SELECT create_distributed_table('test_table_statistics_append', 'id', 'append'); -COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH CSV; -COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH CSV; +SELECT master_create_empty_shard('test_table_statistics_append') AS shardid1 \gset +SELECT master_create_empty_shard('test_table_statistics_append') AS shardid2 \gset +COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH (format 'csv', append_to_shard :shardid1); +COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH (format 'csv', append_to_shard :shardid2); --- originally shardminvalue and shardmaxvalue will be 0,3 and 4, 7 +-- shardminvalue and shardmaxvalue are NULL SELECT ds.logicalrelid::regclass::text AS tablename, ds.shardid AS shardid, diff --git a/src/test/regress/sql/drop_column_partitioned_table.sql b/src/test/regress/sql/drop_column_partitioned_table.sql index 991c6e60a..3fed6f4eb 100644 --- a/src/test/regress/sql/drop_column_partitioned_table.sql +++ b/src/test/regress/sql/drop_column_partitioned_table.sql @@ -85,7 +85,8 @@ FROM WHERE logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass, - 'sensors_2003'::regclass, 'sensors_2004'::regclass); + 'sensors_2003'::regclass, 'sensors_2004'::regclass) +ORDER BY 1,2; -- show that all the tables prune to the same shard for the same distribution key WITH @@ -190,7 +191,8 @@ FROM WHERE logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass, - 'sensors_2003'::regclass, 'sensors_2004'::regclass); + 'sensors_2003'::regclass, 'sensors_2004'::regclass) +ORDER BY 1,2; \c - - - :worker_1_port SET search_path TO drop_column_partitioned_table; @@ -201,7 +203,8 @@ FROM WHERE logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass, - 'sensors_2003'::regclass, 'sensors_2004'::regclass); + 'sensors_2003'::regclass, 'sensors_2004'::regclass) +ORDER BY 1,2; \c - - - :master_port SET client_min_messages TO WARNING; diff --git a/src/test/regress/sql/multi_alter_table_add_constraints.sql b/src/test/regress/sql/multi_alter_table_add_constraints.sql index 501aa53e9..3e80b09c0 100644 --- a/src/test/regress/sql/multi_alter_table_add_constraints.sql +++ b/src/test/regress/sql/multi_alter_table_add_constraints.sql @@ -67,6 +67,7 @@ CREATE TABLE products_append ( ); SELECT create_distributed_table('products_append', 'product_no', 'append'); +SELECT master_create_empty_shard('products_append') AS shardid \gset -- Can only add primary key constraint on distribution column (or group -- of columns including distribution column) @@ -75,7 +76,7 @@ ALTER TABLE products_append ADD CONSTRAINT p_key_name PRIMARY KEY(name); ALTER TABLE products_append ADD CONSTRAINT p_key PRIMARY KEY(product_no); --- Error out since first and third rows have the same product_no -\COPY products_append FROM STDIN DELIMITER AS ','; +COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid); 1, Product_1, 10 2, Product_2, 15 1, Product_3, 8 @@ -138,6 +139,7 @@ DROP TABLE unique_test_table_ref; -- Check "UNIQUE CONSTRAINT" with append table CREATE TABLE unique_test_table_append(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table_append', 'id', 'append'); +SELECT master_create_empty_shard('unique_test_table_append') AS shardid \gset -- Can only add unique constraint on distribution column (or group -- of columns including distribution column) @@ -146,7 +148,7 @@ ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_name UNIQUE(name); ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_id UNIQUE(id); -- Error out. Table can not have two rows with the same id. -\COPY unique_test_table_append FROM STDIN DELIMITER AS ','; +COPY unique_test_table_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid); 1, Product_1 2, Product_2 1, Product_3 @@ -207,13 +209,14 @@ CREATE TABLE products_append ( ); SELECT create_distributed_table('products_append', 'product_no', 'append'); +SELECT master_create_empty_shard('products_append') AS shardid \gset -- Can add column and table check constraints ALTER TABLE products_append ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products_append ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); -- Error out,since the third row conflicting with the p_multi_check -\COPY products_append FROM STDIN DELIMITER AS ','; +COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid); 1, Product_1, 10, 5 2, Product_2, 15, 8 1, Product_3, 8, 10 @@ -277,6 +280,7 @@ CREATE TABLE products_append ( ); SELECT create_distributed_table('products_append', 'product_no','append'); +SELECT master_create_empty_shard('products_append') AS shardid \gset -- Can only add exclusion constraint on distribution column (or group of column -- including distribution column) @@ -285,7 +289,7 @@ ALTER TABLE products_append ADD CONSTRAINT exc_name EXCLUDE USING btree (name wi ALTER TABLE products_append ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); -- Error out since first and third can not pass the exclusion check. -\COPY products_append FROM STDIN DELIMITER AS ','; +COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid); 1, Product_1, 10 1, Product_2, 15 1, Product_1, 8 @@ -335,11 +339,12 @@ CREATE TABLE products_append ( ); SELECT create_distributed_table('products_append', 'product_no', 'append'); +SELECT master_create_empty_shard('products_append') AS shardid \gset ALTER TABLE products_append ALTER COLUMN name SET NOT NULL; -- Error out since name and product_no columns can not handle NULL value. -\COPY products_append FROM STDIN DELIMITER AS ','; +COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid); 1, \N, 10 \N, Product_2, 15 1, Product_1, 8 diff --git a/src/test/regress/sql/multi_create_table.sql b/src/test/regress/sql/multi_create_table.sql index e25eec397..4c318e73a 100644 --- a/src/test/regress/sql/multi_create_table.sql +++ b/src/test/regress/sql/multi_create_table.sql @@ -98,6 +98,7 @@ CREATE TABLE customer_append ( c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT create_distributed_table('customer_append', 'c_custkey', 'append'); +SELECT master_create_empty_shard('customer_append'); CREATE TABLE nation ( n_nationkey integer not null, @@ -130,6 +131,7 @@ CREATE TABLE part_append ( p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT create_distributed_table('part_append', 'p_partkey', 'append'); +SELECT master_create_empty_shard('part_append'); CREATE TABLE supplier ( diff --git a/src/test/regress/sql/multi_null_minmax_value_pruning.sql b/src/test/regress/sql/multi_null_minmax_value_pruning.sql index eb2fb3eae..81295174d 100644 --- a/src/test/regress/sql/multi_null_minmax_value_pruning.sql +++ b/src/test/regress/sql/multi_null_minmax_value_pruning.sql @@ -4,24 +4,35 @@ -- This test checks that we can handle null min/max values in shard statistics -- and that we don't partition or join prune shards that have null values. +CREATE SCHEMA multi_null_minmax_value_pruning; +SET search_path TO multi_null_minmax_value_pruning; - -SET client_min_messages TO DEBUG2; SET citus.explain_all_tasks TO on; --- to avoid differing explain output - executor doesn't matter, --- because were testing pruning here. - --- Change configuration to treat lineitem and orders tables as large SET citus.log_multi_join_order to true; SET citus.enable_repartition_joins to ON; -SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; -SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; +SET citus.next_shard_id = 290000; + +CREATE TABLE lineitem (LIKE public.lineitem); +SELECT create_distributed_table('lineitem', 'l_orderkey', 'range'); +SELECT master_create_empty_shard('lineitem') as lineitem_shardid1 \gset +SELECT master_create_empty_shard('lineitem') as lineitem_shardid2 \gset + +CREATE TABLE orders (LIKE public.orders); +SELECT create_distributed_table('orders', 'o_orderkey', 'range'); +SELECT master_create_empty_shard('orders') as orders_shardid1 \gset +SELECT master_create_empty_shard('orders') as orders_shardid2 \gset + +SET client_min_messages TO DEBUG2; + +UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '6000' WHERE shardid = :lineitem_shardid1 OR shardid = :orders_shardid1; +UPDATE pg_dist_shard SET shardminvalue = '6001', shardmaxvalue = '20000' WHERE shardid = :lineitem_shardid2 OR shardid = :orders_shardid2; +UPDATE pg_dist_partition SET colocationid = 87091 WHERE logicalrelid = 'orders'::regclass OR logicalrelid = 'lineitem'::regclass; -- Check that partition and join pruning works when min/max values exist -- Adding l_orderkey = 1 to make the query not router executable -SELECT coordinator_plan($Q$ +SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; $Q$); @@ -34,9 +45,9 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders -- partition or join pruning for the shard with null min value. Since it is not -- supported with single-repartition join, dual-repartition has been used. -UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; +UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = :lineitem_shardid1; -SELECT coordinator_plan($Q$ +SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; $Q$); @@ -49,9 +60,9 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders -- don't apply partition or join pruning for this other shard either. Since it -- is not supported with single-repartition join, dual-repartition has been used. -UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; +UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = :lineitem_shardid2; -SELECT coordinator_plan($Q$ +SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; $Q$); @@ -64,9 +75,9 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders -- should apply partition and join pruning for this shard now. Since it is not -- supported with single-repartition join, dual-repartition has been used. -UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; +UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = :lineitem_shardid1; -SELECT coordinator_plan($Q$ +SELECT public.coordinator_plan($Q$ EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; $Q$); @@ -75,9 +86,5 @@ EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_partkey = o_custkey; --- Set minimum and maximum values for two shards back to their original values - -UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000; -UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001; - -SET client_min_messages TO NOTICE; +RESET client_min_messages; +DROP SCHEMA multi_null_minmax_value_pruning CASCADE; diff --git a/src/test/regress/sql/multi_repartition_join_pruning.sql b/src/test/regress/sql/multi_repartition_join_pruning.sql index c6592f99f..4b8119595 100644 --- a/src/test/regress/sql/multi_repartition_join_pruning.sql +++ b/src/test/regress/sql/multi_repartition_join_pruning.sql @@ -52,14 +52,14 @@ FROM orders, customer_append WHERE o_custkey = c_custkey AND - c_custkey < 0; + c_custkey < 0 AND c_custkey > 0; SELECT count(*) FROM orders, customer_append WHERE o_custkey = c_custkey AND - c_custkey < 0; + c_custkey < 0 AND c_custkey > 0; -- Dual hash-repartition join test case. Note that this query doesn't produce -- meaningful results and is only to test hash-partitioning of two large tables diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql index d4abcabf1..e3888d331 100644 --- a/src/test/regress/sql/multi_schema_support.sql +++ b/src/test/regress/sql/multi_schema_support.sql @@ -82,9 +82,10 @@ CREATE TABLE nation_append_search_path( n_regionkey integer not null, n_comment varchar(152) ); -SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); +SELECT create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); +SELECT master_create_empty_shard('nation_append_search_path') AS shardid \gset -\copy nation_append_search_path FROM STDIN with delimiter '|'; +copy nation_append_search_path FROM STDIN with (delimiter '|', append_to_shard :shardid); 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special diff --git a/src/test/regress/sql/non_colocated_join_order.sql b/src/test/regress/sql/non_colocated_join_order.sql deleted file mode 100644 index 4c77f68c2..000000000 --- a/src/test/regress/sql/non_colocated_join_order.sql +++ /dev/null @@ -1,70 +0,0 @@ --- --- NON_COLOCATED_JOIN_ORDER --- - --- Tests to check placements of shards must be equal to choose local join logic. - -CREATE TABLE test_table_1(id int, value_1 int); -SELECT master_create_distributed_table('test_table_1', 'id', 'append'); - -\copy test_table_1 FROM STDIN DELIMITER ',' -1,2 -2,3 -3,4 -\. - -\copy test_table_1 FROM STDIN DELIMITER ',' -5,2 -6,3 -7,4 -\. - -CREATE TABLE test_table_2(id int, value_1 int); -SELECT master_create_distributed_table('test_table_2', 'id', 'append'); - -\copy test_table_2 FROM STDIN DELIMITER ',' -1,2 -2,3 -3,4 -\. - -\copy test_table_2 FROM STDIN DELIMITER ',' -5,2 -6,3 -7,4 -\. - -SET citus.log_multi_join_order to TRUE; -SET client_min_messages to DEBUG1; -SET citus.enable_repartition_joins TO on; - --- when joining append tables we always get dual re-partition joins -SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id; - --- Add two shards placement of interval [8,10] to test_table_1 -SET citus.shard_replication_factor to 2; - -\copy test_table_1 FROM STDIN DELIMITER ',' -8,2 -9,3 -10,4 -\. - --- Add two shards placement of interval [8,10] to test_table_2 -SET citus.shard_replication_factor to 1; - -\copy test_table_2 FROM STDIN DELIMITER ',' -8,2 -9,3 -10,4 -\. - --- Although shard interval of relation are same, since they have different amount of placements --- for interval [8,10] repartition join logic will be triggered. -SET citus.enable_repartition_joins to ON; -SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id; - -SET client_min_messages TO default; - -DROP TABLE test_table_1; -DROP TABLE test_table_2; diff --git a/src/test/regress/sql/single_node.sql b/src/test/regress/sql/single_node.sql index fed898084..029c5938b 100644 --- a/src/test/regress/sql/single_node.sql +++ b/src/test/regress/sql/single_node.sql @@ -365,6 +365,13 @@ BEGIN; INSERT INTO test SELECT i,i FROM generate_series(0,100)i; ROLLBACK; +-- master_create_empty_shard on coordinator +BEGIN; +CREATE TABLE append_table (a INT, b INT); +SELECT create_distributed_table('append_table','a','append'); +SELECT master_create_empty_shard('append_table'); +END; + -- alter table inside a tx block BEGIN; ALTER TABLE test ADD COLUMN z single_node.new_type; diff --git a/src/test/regress/sql/subquery_append.sql b/src/test/regress/sql/subquery_append.sql index 940556f7e..4210f61ef 100644 --- a/src/test/regress/sql/subquery_append.sql +++ b/src/test/regress/sql/subquery_append.sql @@ -5,14 +5,15 @@ CREATE TABLE append_table (key text, value int, extra int default 0); CREATE INDEX ON append_table (key); SELECT create_distributed_table('append_table', 'key', 'append'); -SELECT 1 FROM master_create_empty_shard('append_table'); -SELECT 1 FROM master_create_empty_shard('append_table'); +SELECT master_create_empty_shard('append_table') AS shardid1 \gset +SELECT master_create_empty_shard('append_table') AS shardid2 \gset +SELECT master_create_empty_shard('append_table') AS shardid3 \gset CREATE TABLE ref_table (value int); CREATE INDEX ON ref_table (value); SELECT create_reference_table('ref_table'); -\COPY append_table (key,value) FROM STDIN WITH CSV +COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid1); abc,234 bcd,123 bcd,234 @@ -21,7 +22,7 @@ def,456 efg,234 \. -\COPY append_table (key,value) FROM STDIN WITH CSV +COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid2); abc,123 efg,123 hij,123 @@ -30,7 +31,7 @@ ijk,1 jkl,0 \. -\COPY ref_table FROM STDIN WITH CSV +COPY ref_table FROM STDIN WITH CSV; 123 234 345 diff --git a/src/test/regress/sql/upgrade_basic_after.sql b/src/test/regress/sql/upgrade_basic_after.sql index b525b8964..48209fa62 100644 --- a/src/test/regress/sql/upgrade_basic_after.sql +++ b/src/test/regress/sql/upgrade_basic_after.sql @@ -99,22 +99,24 @@ INSERT INTO t3 VALUES (3, 33); SELECT * FROM t3 ORDER BY a; SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard - WHERE logicalrelid = 't_append'::regclass + WHERE logicalrelid = 't_range'::regclass ORDER BY shardminvalue, shardmaxvalue; -SELECT * FROM t_append ORDER BY id; +SELECT * FROM t_range ORDER BY id; -\copy t_append FROM STDIN DELIMITER ',' +SELECT master_create_empty_shard('t_range') AS new_shard_id \gset +UPDATE pg_dist_shard SET shardminvalue = '9', shardmaxvalue = '11' WHERE shardid = :new_shard_id; +\copy t_range FROM STDIN with (DELIMITER ',') 9,2 10,3 11,4 \. SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard - WHERE logicalrelid = 't_append'::regclass + WHERE logicalrelid = 't_range'::regclass ORDER BY shardminvalue, shardmaxvalue; -SELECT * FROM t_append ORDER BY id; +SELECT * FROM t_range ORDER BY id; ROLLBACK; diff --git a/src/test/regress/sql/upgrade_basic_before.sql b/src/test/regress/sql/upgrade_basic_before.sql index 51c97f620..3b236cca0 100644 --- a/src/test/regress/sql/upgrade_basic_before.sql +++ b/src/test/regress/sql/upgrade_basic_before.sql @@ -50,16 +50,20 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; -CREATE TABLE t_append(id int, value_1 int); -SELECT master_create_distributed_table('t_append', 'id', 'append'); +CREATE TABLE t_range(id int, value_1 int); +SELECT create_distributed_table('t_range', 'id', 'range'); +SELECT master_create_empty_shard('t_range') as shardid1 \gset +SELECT master_create_empty_shard('t_range') as shardid2 \gset +UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '3' WHERE shardid = :shardid1; +UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '7' WHERE shardid = :shardid2; -\copy t_append FROM STDIN DELIMITER ',' +\copy t_range FROM STDIN with (DELIMITER ',') 1,2 2,3 3,4 \. -\copy t_append FROM STDIN DELIMITER ',' +\copy t_range FROM STDIN with (DELIMITER ',') 5,2 6,3 7,4