mirror of https://github.com/citusdata/citus.git
Remove copy into new append shard logic
parent
27ba19f7e1
commit
fba93df4b0
|
@ -117,6 +117,9 @@
|
|||
/* constant used in binary protocol */
|
||||
static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0";
|
||||
|
||||
/* custom Citus option for appending to a shard */
|
||||
#define APPEND_TO_SHARD_OPTION "append_to_shard"
|
||||
|
||||
/*
|
||||
* Data size threshold to switch over the active placement for a connection.
|
||||
* If this is too low, overhead of starting COPY commands will hurt the
|
||||
|
@ -239,11 +242,6 @@ typedef enum LocalCopyStatus
|
|||
/* Local functions forward declarations */
|
||||
static void CopyToExistingShards(CopyStmt *copyStatement,
|
||||
QueryCompletionCompat *completionTag);
|
||||
static void CopyToNewShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag,
|
||||
Oid relationId);
|
||||
static void OpenCopyConnectionsForNewShards(CopyStmt *copyStatement,
|
||||
ShardConnections *shardConnections,
|
||||
bool useBinaryCopyFormat);
|
||||
static List * RemoveOptionFromList(List *optionList, char *optionName);
|
||||
static bool BinaryOutputFunctionDefined(Oid typeId);
|
||||
static bool BinaryInputFunctionDefined(Oid typeId);
|
||||
|
@ -257,9 +255,6 @@ static void SendCopyDataToPlacement(StringInfo dataBuffer, int64 shardId,
|
|||
MultiConnection *connection);
|
||||
static void ReportCopyError(MultiConnection *connection, PGresult *result);
|
||||
static uint32 AvailableColumnCount(TupleDesc tupleDescriptor);
|
||||
static int64 StartCopyToNewShard(ShardConnections *shardConnections,
|
||||
CopyStmt *copyStatement, bool useBinaryCopyFormat);
|
||||
static int64 CreateEmptyShard(char *relationName);
|
||||
|
||||
static Oid TypeForColumnName(Oid relationId, TupleDesc tupleDescriptor, char *columnName);
|
||||
static Oid * TypeArrayFromTupleDescriptor(TupleDesc tupleDescriptor);
|
||||
|
@ -332,6 +327,7 @@ static void RemovePlacementStateFromCopyConnectionStateBuffer(CopyConnectionStat
|
|||
connectionState,
|
||||
CopyPlacementState *
|
||||
placementState);
|
||||
static uint64 ProcessAppendToShardOption(Oid relationId, CopyStmt *copyStatement);
|
||||
static uint64 ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues,
|
||||
bool *columnNulls);
|
||||
|
||||
|
@ -403,14 +399,11 @@ CitusCopyFrom(CopyStmt *copyStatement, QueryCompletionCompat *completionTag)
|
|||
|
||||
if (IsCitusTableTypeCacheEntry(cacheEntry, HASH_DISTRIBUTED) ||
|
||||
IsCitusTableTypeCacheEntry(cacheEntry, RANGE_DISTRIBUTED) ||
|
||||
IsCitusTableTypeCacheEntry(cacheEntry, APPEND_DISTRIBUTED) ||
|
||||
IsCitusTableTypeCacheEntry(cacheEntry, CITUS_TABLE_WITH_NO_DIST_KEY))
|
||||
{
|
||||
CopyToExistingShards(copyStatement, completionTag);
|
||||
}
|
||||
else if (IsCitusTableTypeCacheEntry(cacheEntry, APPEND_DISTRIBUTED))
|
||||
{
|
||||
CopyToNewShards(copyStatement, completionTag, relationId);
|
||||
}
|
||||
else
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
|
@ -508,6 +501,14 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(tableId, columnNameList,
|
||||
partitionColumnIndex,
|
||||
executorState, NULL);
|
||||
|
||||
/* if the user specified an explicit append-to_shard option, write to it */
|
||||
uint64 appendShardId = ProcessAppendToShardOption(tableId, copyStatement);
|
||||
if (appendShardId != INVALID_SHARD_ID)
|
||||
{
|
||||
copyDest->appendShardId = appendShardId;
|
||||
}
|
||||
|
||||
DestReceiver *dest = (DestReceiver *) copyDest;
|
||||
dest->rStartup(dest, 0, tupleDescriptor);
|
||||
|
||||
|
@ -609,196 +610,6 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* CopyToNewShards implements the COPY table_name FROM ... for append-partitioned
|
||||
* tables where we create new shards into which to copy rows.
|
||||
*/
|
||||
static void
|
||||
CopyToNewShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag, Oid
|
||||
relationId)
|
||||
{
|
||||
/* allocate column values and nulls arrays */
|
||||
Relation distributedRelation = table_open(relationId, RowExclusiveLock);
|
||||
TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation);
|
||||
uint32 columnCount = tupleDescriptor->natts;
|
||||
Datum *columnValues = palloc0(columnCount * sizeof(Datum));
|
||||
bool *columnNulls = palloc0(columnCount * sizeof(bool));
|
||||
|
||||
EState *executorState = CreateExecutorState();
|
||||
MemoryContext executorTupleContext = GetPerTupleMemoryContext(executorState);
|
||||
ExprContext *executorExpressionContext = GetPerTupleExprContext(executorState);
|
||||
|
||||
const char *delimiterCharacter = "\t";
|
||||
const char *nullPrintCharacter = "\\N";
|
||||
|
||||
ErrorContextCallback errorCallback;
|
||||
|
||||
int64 currentShardId = INVALID_SHARD_ID;
|
||||
uint64 shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L;
|
||||
uint64 copiedDataSizeInBytes = 0;
|
||||
uint64 processedRowCount = 0;
|
||||
|
||||
ShardConnections *shardConnections =
|
||||
(ShardConnections *) palloc0(sizeof(ShardConnections));
|
||||
|
||||
/* initialize copy state to read from COPY data source */
|
||||
CopyFromState copyState = BeginCopyFrom_compat(NULL,
|
||||
distributedRelation,
|
||||
NULL,
|
||||
copyStatement->filename,
|
||||
copyStatement->is_program,
|
||||
NULL,
|
||||
copyStatement->attlist,
|
||||
copyStatement->options);
|
||||
|
||||
CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
|
||||
copyOutState->delim = (char *) delimiterCharacter;
|
||||
copyOutState->null_print = (char *) nullPrintCharacter;
|
||||
copyOutState->null_print_client = (char *) nullPrintCharacter;
|
||||
copyOutState->binary = CanUseBinaryCopyFormat(tupleDescriptor);
|
||||
copyOutState->fe_msgbuf = makeStringInfo();
|
||||
copyOutState->rowcontext = executorTupleContext;
|
||||
|
||||
FmgrInfo *columnOutputFunctions = ColumnOutputFunctions(tupleDescriptor,
|
||||
copyOutState->binary);
|
||||
|
||||
/* set up callback to identify error line number */
|
||||
errorCallback.callback = CopyFromErrorCallback;
|
||||
errorCallback.arg = (void *) copyState;
|
||||
errorCallback.previous = error_context_stack;
|
||||
|
||||
/*
|
||||
* From here on we use copyStatement as the template for the command
|
||||
* that we send to workers. This command does not have an attribute
|
||||
* list since NextCopyFrom will generate a value for all columns.
|
||||
* We also strip options.
|
||||
*/
|
||||
copyStatement->attlist = NIL;
|
||||
copyStatement->options = NIL;
|
||||
|
||||
if (copyOutState->binary)
|
||||
{
|
||||
DefElem *binaryFormatOption =
|
||||
makeDefElem("format", (Node *) makeString("binary"), -1);
|
||||
|
||||
copyStatement->options = lappend(copyStatement->options, binaryFormatOption);
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
ResetPerTupleExprContext(executorState);
|
||||
|
||||
/* switch to tuple memory context and start showing line number in errors */
|
||||
error_context_stack = &errorCallback;
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext);
|
||||
|
||||
/* parse a row from the input */
|
||||
bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
|
||||
columnValues, columnNulls);
|
||||
|
||||
if (!nextRowFound)
|
||||
{
|
||||
/* switch to regular memory context and stop showing line number in errors */
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
error_context_stack = errorCallback.previous;
|
||||
break;
|
||||
}
|
||||
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
/* switch to regular memory context and stop showing line number in errors */
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
error_context_stack = errorCallback.previous;
|
||||
|
||||
/*
|
||||
* If copied data size is zero, this means either this is the first
|
||||
* line in the copy or we just filled the previous shard up to its
|
||||
* capacity. Either way, we need to create a new shard and
|
||||
* start copying new rows into it.
|
||||
*/
|
||||
if (copiedDataSizeInBytes == 0)
|
||||
{
|
||||
/* create shard and open connections to shard placements */
|
||||
currentShardId = StartCopyToNewShard(shardConnections, copyStatement,
|
||||
copyOutState->binary);
|
||||
|
||||
/* send copy binary headers to shard placements */
|
||||
if (copyOutState->binary)
|
||||
{
|
||||
SendCopyBinaryHeaders(copyOutState, currentShardId,
|
||||
shardConnections->connectionList);
|
||||
}
|
||||
}
|
||||
|
||||
/* replicate row to shard placements */
|
||||
resetStringInfo(copyOutState->fe_msgbuf);
|
||||
AppendCopyRowData(columnValues, columnNulls, tupleDescriptor,
|
||||
copyOutState, columnOutputFunctions, NULL);
|
||||
SendCopyDataToAll(copyOutState->fe_msgbuf, currentShardId,
|
||||
shardConnections->connectionList);
|
||||
|
||||
uint64 messageBufferSize = copyOutState->fe_msgbuf->len;
|
||||
copiedDataSizeInBytes = copiedDataSizeInBytes + messageBufferSize;
|
||||
|
||||
/*
|
||||
* If we filled up this shard to its capacity, send copy binary footers
|
||||
* to shard placements, and update shard statistics.
|
||||
*/
|
||||
if (copiedDataSizeInBytes > shardMaxSizeInBytes)
|
||||
{
|
||||
Assert(currentShardId != INVALID_SHARD_ID);
|
||||
|
||||
if (copyOutState->binary)
|
||||
{
|
||||
SendCopyBinaryFooters(copyOutState, currentShardId,
|
||||
shardConnections->connectionList);
|
||||
}
|
||||
|
||||
EndRemoteCopy(currentShardId, shardConnections->connectionList);
|
||||
UpdateShardStatistics(shardConnections->shardId);
|
||||
|
||||
copiedDataSizeInBytes = 0;
|
||||
currentShardId = INVALID_SHARD_ID;
|
||||
}
|
||||
|
||||
processedRowCount += 1;
|
||||
|
||||
#if PG_VERSION_NUM >= PG_VERSION_14
|
||||
pgstat_progress_update_param(PROGRESS_COPY_TUPLES_PROCESSED, processedRowCount);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* For the last shard, send copy binary footers to shard placements,
|
||||
* and update shard statistics. If no row is send, there is no shard
|
||||
* to finalize the copy command.
|
||||
*/
|
||||
if (copiedDataSizeInBytes > 0)
|
||||
{
|
||||
Assert(currentShardId != INVALID_SHARD_ID);
|
||||
|
||||
if (copyOutState->binary)
|
||||
{
|
||||
SendCopyBinaryFooters(copyOutState, currentShardId,
|
||||
shardConnections->connectionList);
|
||||
}
|
||||
EndRemoteCopy(currentShardId, shardConnections->connectionList);
|
||||
UpdateShardStatistics(shardConnections->shardId);
|
||||
}
|
||||
|
||||
EndCopyFrom(copyState);
|
||||
table_close(distributedRelation, NoLock);
|
||||
|
||||
/* check for cancellation one last time before returning */
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
if (completionTag != NULL)
|
||||
{
|
||||
CompleteCopyQueryTagCompat(completionTag, processedRowCount);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount)
|
||||
{
|
||||
|
@ -839,105 +650,6 @@ RemoveOptionFromList(List *optionList, char *optionName)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* OpenCopyConnectionsForNewShards opens a connection for each placement of a shard and
|
||||
* starts a COPY transaction if necessary. If a connection cannot be opened,
|
||||
* then the transaction is rollbacked.
|
||||
*/
|
||||
static void
|
||||
OpenCopyConnectionsForNewShards(CopyStmt *copyStatement,
|
||||
ShardConnections *shardConnections,
|
||||
bool useBinaryCopyFormat)
|
||||
{
|
||||
int failedPlacementCount = 0;
|
||||
ListCell *placementCell = NULL;
|
||||
List *connectionList = NULL;
|
||||
int64 shardId = shardConnections->shardId;
|
||||
bool raiseInterrupts = true;
|
||||
MemoryContext localContext =
|
||||
AllocSetContextCreateExtended(CurrentMemoryContext,
|
||||
"OpenCopyConnectionsForNewShards",
|
||||
ALLOCSET_DEFAULT_MINSIZE,
|
||||
ALLOCSET_DEFAULT_INITSIZE,
|
||||
ALLOCSET_DEFAULT_MAXSIZE);
|
||||
|
||||
|
||||
/* release active placement list at the end of this function */
|
||||
MemoryContext oldContext = MemoryContextSwitchTo(localContext);
|
||||
|
||||
List *activePlacementList = ActiveShardPlacementList(shardId);
|
||||
|
||||
MemoryContextSwitchTo(oldContext);
|
||||
|
||||
foreach(placementCell, activePlacementList)
|
||||
{
|
||||
ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell);
|
||||
char *nodeUser = CurrentUserName();
|
||||
uint32 connectionFlags = FOR_DML;
|
||||
|
||||
/*
|
||||
* For hash partitioned tables, connection establishment happens in
|
||||
* CopyGetPlacementConnection().
|
||||
*/
|
||||
Assert(placement->partitionMethod != DISTRIBUTE_BY_HASH);
|
||||
|
||||
MultiConnection *connection = GetPlacementConnection(connectionFlags, placement,
|
||||
nodeUser);
|
||||
|
||||
/*
|
||||
* This code-path doesn't support optional connections, so we don't expect
|
||||
* NULL connections.
|
||||
*/
|
||||
Assert(connection != NULL);
|
||||
|
||||
if (PQstatus(connection->pgConn) != CONNECTION_OK)
|
||||
{
|
||||
ReportConnectionError(connection, ERROR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Errors are supposed to cause immediate aborts (i.e. we don't
|
||||
* want to/can't invalidate placements), mark the connection as
|
||||
* critical so later errors cause failures.
|
||||
*/
|
||||
MarkRemoteTransactionCritical(connection);
|
||||
ClaimConnectionExclusively(connection);
|
||||
RemoteTransactionBeginIfNecessary(connection);
|
||||
|
||||
StringInfo copyCommand = ConstructCopyStatement(copyStatement,
|
||||
shardConnections->shardId);
|
||||
|
||||
if (!SendRemoteCommand(connection, copyCommand->data))
|
||||
{
|
||||
ReportConnectionError(connection, ERROR);
|
||||
}
|
||||
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
|
||||
if (PQresultStatus(result) != PGRES_COPY_IN)
|
||||
{
|
||||
ReportResultError(connection, result, ERROR);
|
||||
}
|
||||
PQclear(result);
|
||||
connectionList = lappend(connectionList, connection);
|
||||
}
|
||||
|
||||
/* if all placements failed, error out */
|
||||
if (failedPlacementCount == list_length(activePlacementList))
|
||||
{
|
||||
ereport(ERROR, (errmsg("could not connect to any active placements")));
|
||||
}
|
||||
|
||||
/*
|
||||
* We should just error out and code execution should
|
||||
* never reach to this point. This is the case for all tables.
|
||||
*/
|
||||
Assert(failedPlacementCount == 0);
|
||||
|
||||
shardConnections->connectionList = connectionList;
|
||||
|
||||
MemoryContextReset(localContext);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CanUseBinaryCopyFormat iterates over columns of the relation and looks for a
|
||||
* column whose type is array of user-defined type or composite type. If it finds
|
||||
|
@ -1830,48 +1542,6 @@ AppendCopyBinaryFooters(CopyOutState footerOutputState)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* StartCopyToNewShard creates a new shard and related shard placements and
|
||||
* opens connections to shard placements.
|
||||
*/
|
||||
static int64
|
||||
StartCopyToNewShard(ShardConnections *shardConnections, CopyStmt *copyStatement,
|
||||
bool useBinaryCopyFormat)
|
||||
{
|
||||
char *relationName = copyStatement->relation->relname;
|
||||
char *schemaName = copyStatement->relation->schemaname;
|
||||
char *qualifiedName = quote_qualified_identifier(schemaName, relationName);
|
||||
int64 shardId = CreateEmptyShard(qualifiedName);
|
||||
|
||||
shardConnections->shardId = shardId;
|
||||
|
||||
shardConnections->connectionList = NIL;
|
||||
|
||||
/* connect to shards placements and start transactions */
|
||||
OpenCopyConnectionsForNewShards(copyStatement, shardConnections,
|
||||
useBinaryCopyFormat);
|
||||
|
||||
return shardId;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* CreateEmptyShard creates a new shard and related shard placements from the
|
||||
* local master node.
|
||||
*/
|
||||
static int64
|
||||
CreateEmptyShard(char *relationName)
|
||||
{
|
||||
text *relationNameText = cstring_to_text(relationName);
|
||||
Datum relationNameDatum = PointerGetDatum(relationNameText);
|
||||
Datum shardIdDatum = DirectFunctionCall1(master_create_empty_shard,
|
||||
relationNameDatum);
|
||||
int64 shardId = DatumGetInt64(shardIdDatum);
|
||||
|
||||
return shardId;
|
||||
}
|
||||
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
|
||||
|
||||
|
@ -2283,14 +1953,17 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
|
|||
}
|
||||
|
||||
/* error if any shard missing min/max values */
|
||||
if (IsCitusTableTypeCacheEntry(cacheEntry, DISTRIBUTED_TABLE) &&
|
||||
cacheEntry->hasUninitializedShardInterval)
|
||||
if (cacheEntry->hasUninitializedShardInterval)
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("could not start copy"),
|
||||
errdetail("Distributed relation \"%s\" has shards "
|
||||
"with missing shardminvalue/shardmaxvalue.",
|
||||
relationName)));
|
||||
if (IsCitusTableTypeCacheEntry(cacheEntry, HASH_DISTRIBUTED) ||
|
||||
IsCitusTableTypeCacheEntry(cacheEntry, RANGE_DISTRIBUTED))
|
||||
{
|
||||
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
|
||||
errmsg("could not start copy"),
|
||||
errdetail("Distributed relation \"%s\" has shards "
|
||||
"with missing shardminvalue/shardmaxvalue.",
|
||||
relationName)));
|
||||
}
|
||||
}
|
||||
|
||||
/* prevent concurrent placement changes and non-commutative DML statements */
|
||||
|
@ -2670,6 +2343,58 @@ RemovePlacementStateFromCopyConnectionStateBuffer(CopyConnectionState *connectio
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* ProcessAppendToShardOption returns the value of append_to_shard if set,
|
||||
* and removes the option from the options list.
|
||||
*/
|
||||
static uint64
|
||||
ProcessAppendToShardOption(Oid relationId, CopyStmt *copyStatement)
|
||||
{
|
||||
uint64 appendShardId = INVALID_SHARD_ID;
|
||||
bool appendToShardSet = false;
|
||||
|
||||
DefElem *defel = NULL;
|
||||
foreach_ptr(defel, copyStatement->options)
|
||||
{
|
||||
if (strncmp(defel->defname, APPEND_TO_SHARD_OPTION, NAMEDATALEN) == 0)
|
||||
{
|
||||
appendShardId = defGetInt64(defel);
|
||||
appendToShardSet = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (appendToShardSet)
|
||||
{
|
||||
if (!IsCitusTableType(relationId, APPEND_DISTRIBUTED))
|
||||
{
|
||||
ereport(ERROR, (errmsg(APPEND_TO_SHARD_OPTION " is only valid for "
|
||||
"append-distributed tables")));
|
||||
}
|
||||
|
||||
/* throws an error if shard does not exist */
|
||||
ShardInterval *shardInterval = LoadShardInterval(appendShardId);
|
||||
|
||||
/* also check whether shard belongs to table */
|
||||
if (shardInterval->relationId != relationId)
|
||||
{
|
||||
ereport(ERROR, (errmsg("shard " UINT64_FORMAT " does not belong to table %s",
|
||||
appendShardId, get_rel_name(relationId))));
|
||||
}
|
||||
|
||||
copyStatement->options =
|
||||
RemoveOptionFromList(copyStatement->options, APPEND_TO_SHARD_OPTION);
|
||||
}
|
||||
else if (IsCitusTableType(relationId, APPEND_DISTRIBUTED))
|
||||
{
|
||||
ereport(ERROR, (errmsg("COPY into append-distributed table requires using the "
|
||||
APPEND_TO_SHARD_OPTION " option")));
|
||||
}
|
||||
|
||||
return appendShardId;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ContainsLocalPlacement returns true if the current node has
|
||||
* a local placement for the given shard id.
|
||||
|
@ -2703,6 +2428,13 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu
|
|||
int partitionColumnIndex = copyDest->partitionColumnIndex;
|
||||
Datum partitionColumnValue = 0;
|
||||
CopyCoercionData *columnCoercionPaths = copyDest->columnCoercionPaths;
|
||||
CitusTableCacheEntry *cacheEntry =
|
||||
GetCitusTableCacheEntry(copyDest->distributedRelationId);
|
||||
|
||||
if (IsCitusTableTypeCacheEntry(cacheEntry, APPEND_DISTRIBUTED))
|
||||
{
|
||||
return copyDest->appendShardId;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the partition column value and corresponding shard interval
|
||||
|
@ -2743,8 +2475,6 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu
|
|||
* For reference table, this function blindly returns the tables single
|
||||
* shard.
|
||||
*/
|
||||
CitusTableCacheEntry *cacheEntry =
|
||||
GetCitusTableCacheEntry(copyDest->distributedRelationId);
|
||||
ShardInterval *shardInterval = FindShardInterval(partitionColumnValue, cacheEntry);
|
||||
if (shardInterval == NULL)
|
||||
{
|
||||
|
@ -3230,7 +2960,8 @@ CitusCopyTo(CopyStmt *copyStatement, QueryCompletionCompat *completionTag)
|
|||
if (shardIntervalCell == list_head(shardIntervalList))
|
||||
{
|
||||
/* remove header after the first shard */
|
||||
RemoveOptionFromList(copyStatement->options, "header");
|
||||
copyStatement->options =
|
||||
RemoveOptionFromList(copyStatement->options, "header");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -140,6 +140,12 @@ typedef struct CitusCopyDestReceiver
|
|||
* files as if they are shards.
|
||||
*/
|
||||
char *colocatedIntermediateResultIdPrefix;
|
||||
|
||||
/*
|
||||
* When copying into append-partitioned tables, the destination shard is chosen
|
||||
* upfront.
|
||||
*/
|
||||
uint64 appendShardId;
|
||||
} CitusCopyDestReceiver;
|
||||
|
||||
|
||||
|
|
|
@ -25,3 +25,6 @@
|
|||
|
||||
# python
|
||||
*.pyc
|
||||
|
||||
# core dumps
|
||||
core
|
||||
|
|
|
@ -43,7 +43,7 @@ output_files := $(patsubst $(citus_abs_srcdir)/output/%.source,expected/%.out, $
|
|||
# intermediate, for muscle memory backward compatibility.
|
||||
check: check-full
|
||||
# check-full triggers all tests that ought to be run routinely
|
||||
check-full: check-multi check-multi-mx check-worker check-operations check-follower-cluster check-failure
|
||||
check-full: check-multi check-multi-mx check-multi-1 check-worker check-operations check-follower-cluster check-isolation check-failure
|
||||
|
||||
|
||||
ISOLATION_DEPDIR=.deps/isolation
|
||||
|
|
|
@ -10,10 +10,8 @@
|
|||
/multi_behavioral_analytics_create_table_superuser.out
|
||||
/multi_complex_count_distinct.out
|
||||
/multi_copy.out
|
||||
/multi_create_schema.out
|
||||
/multi_load_data.out
|
||||
/multi_load_data_superuser.out
|
||||
/multi_load_large_records.out
|
||||
/multi_load_more_data.out
|
||||
/multi_mx_copy_data.out
|
||||
/multi_outer_join.out
|
||||
|
|
|
@ -401,7 +401,7 @@ SELECT citus_add_local_table_to_metadata('"LocalTabLE.1!?!9012345678901234567890
|
|||
-- create some objects after citus_add_local_table_to_metadata
|
||||
CREATE INDEX "my!Index2" ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id) WITH ( fillfactor = 90 ) WHERE id < 20;
|
||||
NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456"
|
||||
NOTICE: executing the command locally: CREATE INDEX "my!Index2_1504022" ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) WITH (fillfactor = '90' )WHERE (id < 20)
|
||||
NOTICE: executing the command locally: CREATE INDEX "my!Index2_1504022" ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) WITH (fillfactor = '90' ) WHERE (id < 20)
|
||||
CREATE UNIQUE INDEX uniqueIndex2 ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id);
|
||||
NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456"
|
||||
NOTICE: executing the command locally: CREATE UNIQUE INDEX uniqueindex2_1504022 ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id )
|
||||
|
|
|
@ -22,10 +22,10 @@ SELECT create_distributed_table('test_table_statistics_hash', 'id');
|
|||
INSERT INTO test_table_statistics_hash SELECT i FROM generate_series(0, 10000)i;
|
||||
-- originally shardlength (size of the shard) is zero
|
||||
SELECT
|
||||
ds.logicalrelid::regclass::text AS tablename,
|
||||
ds.shardid AS shardid,
|
||||
ds.logicalrelid::regclass::text AS tablename,
|
||||
ds.shardid AS shardid,
|
||||
dsp.placementid AS placementid,
|
||||
shard_name(ds.logicalrelid, ds.shardid) AS shardname,
|
||||
shard_name(ds.logicalrelid, ds.shardid) AS shardname,
|
||||
ds.shardminvalue AS shardminvalue,
|
||||
ds.shardmaxvalue AS shardmaxvalue
|
||||
FROM pg_dist_shard ds JOIN pg_dist_shard_placement dsp USING (shardid)
|
||||
|
@ -82,10 +82,10 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|||
RESET citus.log_remote_commands;
|
||||
RESET citus.multi_shard_modify_mode;
|
||||
SELECT
|
||||
ds.logicalrelid::regclass::text AS tablename,
|
||||
ds.shardid AS shardid,
|
||||
ds.logicalrelid::regclass::text AS tablename,
|
||||
ds.shardid AS shardid,
|
||||
dsp.placementid AS placementid,
|
||||
shard_name(ds.logicalrelid, ds.shardid) AS shardname,
|
||||
shard_name(ds.logicalrelid, ds.shardid) AS shardname,
|
||||
ds.shardminvalue as shardminvalue,
|
||||
ds.shardmaxvalue as shardmaxvalue
|
||||
FROM pg_dist_shard ds JOIN pg_dist_shard_placement dsp USING (shardid)
|
||||
|
@ -120,14 +120,16 @@ SELECT create_distributed_table('test_table_statistics_append', 'id', 'append');
|
|||
|
||||
(1 row)
|
||||
|
||||
COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH CSV;
|
||||
COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH CSV;
|
||||
-- originally shardminvalue and shardmaxvalue will be 0,3 and 4, 7
|
||||
SELECT master_create_empty_shard('test_table_statistics_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('test_table_statistics_append') AS shardid2 \gset
|
||||
COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH (format 'csv', append_to_shard :shardid1);
|
||||
COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH (format 'csv', append_to_shard :shardid2);
|
||||
-- shardminvalue and shardmaxvalue are NULL
|
||||
SELECT
|
||||
ds.logicalrelid::regclass::text AS tablename,
|
||||
ds.shardid AS shardid,
|
||||
ds.logicalrelid::regclass::text AS tablename,
|
||||
ds.shardid AS shardid,
|
||||
dsp.placementid AS placementid,
|
||||
shard_name(ds.logicalrelid, ds.shardid) AS shardname,
|
||||
shard_name(ds.logicalrelid, ds.shardid) AS shardname,
|
||||
ds.shardminvalue as shardminvalue,
|
||||
ds.shardmaxvalue as shardmaxvalue
|
||||
FROM pg_dist_shard ds JOIN pg_dist_shard_placement dsp USING (shardid)
|
||||
|
@ -135,10 +137,10 @@ WHERE ds.logicalrelid::regclass::text in ('test_table_statistics_append')
|
|||
ORDER BY 2, 3;
|
||||
tablename | shardid | placementid | shardname | shardminvalue | shardmaxvalue
|
||||
---------------------------------------------------------------------
|
||||
test_table_statistics_append | 981008 | 982016 | test_table_statistics_append_981008 | 0 | 3
|
||||
test_table_statistics_append | 981008 | 982017 | test_table_statistics_append_981008 | 0 | 3
|
||||
test_table_statistics_append | 981009 | 982018 | test_table_statistics_append_981009 | 4 | 7
|
||||
test_table_statistics_append | 981009 | 982019 | test_table_statistics_append_981009 | 4 | 7
|
||||
test_table_statistics_append | 981008 | 982016 | test_table_statistics_append_981008 | |
|
||||
test_table_statistics_append | 981008 | 982017 | test_table_statistics_append_981008 | |
|
||||
test_table_statistics_append | 981009 | 982018 | test_table_statistics_append_981009 | |
|
||||
test_table_statistics_append | 981009 | 982019 | test_table_statistics_append_981009 | |
|
||||
(4 rows)
|
||||
|
||||
-- delete some data to change shardminvalues of a shards
|
||||
|
@ -168,10 +170,10 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx
|
|||
RESET citus.log_remote_commands;
|
||||
RESET citus.multi_shard_modify_mode;
|
||||
SELECT
|
||||
ds.logicalrelid::regclass::text AS tablename,
|
||||
ds.shardid AS shardid,
|
||||
ds.logicalrelid::regclass::text AS tablename,
|
||||
ds.shardid AS shardid,
|
||||
dsp.placementid AS placementid,
|
||||
shard_name(ds.logicalrelid, ds.shardid) AS shardname,
|
||||
shard_name(ds.logicalrelid, ds.shardid) AS shardname,
|
||||
ds.shardminvalue as shardminvalue,
|
||||
ds.shardmaxvalue as shardmaxvalue
|
||||
FROM pg_dist_shard ds JOIN pg_dist_shard_placement dsp USING (shardid)
|
||||
|
|
|
@ -124,7 +124,8 @@ FROM
|
|||
WHERE
|
||||
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
|
||||
'sensors_2001'::regclass, 'sensors_2002'::regclass,
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass);
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass)
|
||||
ORDER BY 1,2;
|
||||
logicalrelid | column_to_column_name
|
||||
---------------------------------------------------------------------
|
||||
sensors | measureid
|
||||
|
@ -357,7 +358,8 @@ FROM
|
|||
WHERE
|
||||
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
|
||||
'sensors_2001'::regclass, 'sensors_2002'::regclass,
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass);
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass)
|
||||
ORDER BY 1,2;
|
||||
logicalrelid | column_to_column_name
|
||||
---------------------------------------------------------------------
|
||||
sensors | measureid
|
||||
|
@ -377,7 +379,8 @@ FROM
|
|||
WHERE
|
||||
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
|
||||
'sensors_2001'::regclass, 'sensors_2002'::regclass,
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass);
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass)
|
||||
ORDER BY 1,2;
|
||||
logicalrelid | column_to_column_name
|
||||
---------------------------------------------------------------------
|
||||
sensors | measureid
|
||||
|
|
|
@ -1,827 +0,0 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
15
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-router-select: SELECT * FROM append_copy WHERE id = 1;
|
||||
id|data|int_data
|
||||
---------------------------------------------------------------------
|
||||
1| b | 1
|
||||
(1 row)
|
||||
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2;
|
||||
id|data|int_data
|
||||
---------------------------------------------------------------------
|
||||
0| a | 0
|
||||
1| b | 1
|
||||
2| c | 2
|
||||
3| d | 3
|
||||
4| e | 4
|
||||
(5 rows)
|
||||
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-adaptive-select:
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
|
||||
id|data|int_data|id|data|int_data
|
||||
---------------------------------------------------------------------
|
||||
0| a | 0| 0| a | 0
|
||||
1| b | 1| 1| b | 1
|
||||
2| c | 2| 2| c | 2
|
||||
3| d | 3| 3| d | 3
|
||||
4| e | 4| 4| e | 4
|
||||
(5 rows)
|
||||
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-insert: INSERT INTO append_copy VALUES(0, 'k', 0);
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
11
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-insert-select: INSERT INTO append_copy SELECT * FROM append_copy;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
15
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-update: UPDATE append_copy SET data = 'l' WHERE id = 0;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-delete: DELETE FROM append_copy WHERE id = 1;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-truncate: TRUNCATE append_copy; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-truncate: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-drop: DROP TABLE append_copy; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-drop: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
ERROR: relation "append_copy" does not exist
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-ddl-create-index: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,2)
|
||||
(localhost,57638,t,2)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id);
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-ddl-drop-index: DROP INDEX append_copy_index; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-ddl-drop-index: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,0)
|
||||
(localhost,57638,t,0)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY append_copy_index ON append_copy(id); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-ddl-create-index-concurrently: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,1)
|
||||
(localhost,57638,t,1)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-ddl-add-column: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,new_column)
|
||||
(localhost,57638,t,new_column)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy-additional-column: COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV;
|
||||
step s2-ddl-drop-column: ALTER TABLE append_copy DROP new_column; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-ddl-drop-column: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"")
|
||||
(localhost,57638,t,"")
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-ddl-rename-column: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,new_column)
|
||||
(localhost,57638,t,new_column)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-table-size: SELECT citus_total_relation_size('append_copy');
|
||||
citus_total_relation_size
|
||||
---------------------------------------------------------------------
|
||||
32768
|
||||
(1 row)
|
||||
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-master-drop-all-shards: SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-master-drop-all-shards: <... completed>
|
||||
citus_drop_all_shards
|
||||
---------------------------------------------------------------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-drop: DROP TABLE append_copy;
|
||||
step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int);
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append'); <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-distribute-table: <... completed>
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-router-select: SELECT * FROM append_copy WHERE id = 1;
|
||||
id|data|int_data
|
||||
---------------------------------------------------------------------
|
||||
1| b | 1
|
||||
(1 row)
|
||||
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2;
|
||||
id|data|int_data
|
||||
---------------------------------------------------------------------
|
||||
0| a | 0
|
||||
1| b | 1
|
||||
2| c | 2
|
||||
3| d | 3
|
||||
4| e | 4
|
||||
(5 rows)
|
||||
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-adaptive-select:
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
|
||||
id|data|int_data|id|data|int_data
|
||||
---------------------------------------------------------------------
|
||||
0| a | 0| 0| a | 0
|
||||
1| b | 1| 1| b | 1
|
||||
2| c | 2| 2| c | 2
|
||||
3| d | 3| 3| d | 3
|
||||
4| e | 4| 4| e | 4
|
||||
(5 rows)
|
||||
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert: INSERT INTO append_copy VALUES(0, 'k', 0);
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
11
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert-select: INSERT INTO append_copy SELECT * FROM append_copy;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
15
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-update: UPDATE append_copy SET data = 'l' WHERE id = 0;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-delete: DELETE FROM append_copy WHERE id = 1;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE append_copy;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-drop: DROP TABLE append_copy;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
ERROR: relation "append_copy" does not exist
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
ERROR: relation "append_copy" does not exist
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id);
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,2)
|
||||
(localhost,57638,t,2)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id);
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-drop-index: DROP INDEX append_copy_index;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,0)
|
||||
(localhost,57638,t,0)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
ERROR: missing data for column "new_column"
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,new_column)
|
||||
(localhost,57638,t,new_column)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-drop-column: ALTER TABLE append_copy DROP new_column;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,"")
|
||||
(localhost,57638,t,"")
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column;
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,new_column)
|
||||
(localhost,57638,t,new_column)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('append_copy');
|
||||
citus_total_relation_size
|
||||
---------------------------------------------------------------------
|
||||
32768
|
||||
(1 row)
|
||||
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s1-commit: COMMIT;
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
10
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy');
|
||||
citus_drop_all_shards
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-drop: DROP TABLE append_copy;
|
||||
step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int);
|
||||
step s1-begin: BEGIN;
|
||||
step s1-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
step s1-select-count: SELECT COUNT(*) FROM append_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
(1 row)
|
||||
|
|
@ -0,0 +1,104 @@
|
|||
Parsed test spec with 4 sessions
|
||||
|
||||
starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-update:
|
||||
UPDATE distributed_table SET y = 1 WHERE x = 1;
|
||||
|
||||
step s2-update:
|
||||
UPDATE distributed_table SET y = 2 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step detector-dump-wait-edges:
|
||||
SELECT
|
||||
waiting_transaction_num,
|
||||
blocking_transaction_num,
|
||||
blocking_transaction_waiting
|
||||
FROM
|
||||
dump_global_wait_edges()
|
||||
ORDER BY
|
||||
waiting_transaction_num,
|
||||
blocking_transaction_num,
|
||||
blocking_transaction_waiting;
|
||||
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
|
||||
|
||||
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
|
||||
---------------------------------------------------------------------
|
||||
406| 405|f
|
||||
(1 row)
|
||||
|
||||
transactionnumber|waitingtransactionnumbers
|
||||
---------------------------------------------------------------------
|
||||
405|
|
||||
406| 405
|
||||
(2 rows)
|
||||
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-update: <... completed>
|
||||
step s2-abort:
|
||||
ABORT;
|
||||
|
||||
|
||||
starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort
|
||||
step s1-begin:
|
||||
BEGIN;
|
||||
|
||||
step s2-begin:
|
||||
BEGIN;
|
||||
|
||||
step s3-begin:
|
||||
BEGIN;
|
||||
|
||||
step s1-update:
|
||||
UPDATE distributed_table SET y = 1 WHERE x = 1;
|
||||
|
||||
step s2-update:
|
||||
UPDATE distributed_table SET y = 2 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step s3-update:
|
||||
UPDATE distributed_table SET y = 3 WHERE x = 1;
|
||||
<waiting ...>
|
||||
step detector-dump-wait-edges:
|
||||
SELECT
|
||||
waiting_transaction_num,
|
||||
blocking_transaction_num,
|
||||
blocking_transaction_waiting
|
||||
FROM
|
||||
dump_global_wait_edges()
|
||||
ORDER BY
|
||||
waiting_transaction_num,
|
||||
blocking_transaction_num,
|
||||
blocking_transaction_waiting;
|
||||
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
|
||||
|
||||
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
|
||||
---------------------------------------------------------------------
|
||||
410| 409|f
|
||||
411| 409|f
|
||||
411| 410|t
|
||||
(3 rows)
|
||||
|
||||
transactionnumber|waitingtransactionnumbers
|
||||
---------------------------------------------------------------------
|
||||
409|
|
||||
410|409
|
||||
411|409,410
|
||||
(3 rows)
|
||||
|
||||
step s1-abort:
|
||||
ABORT;
|
||||
|
||||
step s2-update: <... completed>
|
||||
step s2-abort:
|
||||
ABORT;
|
||||
|
||||
step s3-update: <... completed>
|
||||
step s3-abort:
|
||||
ABORT;
|
||||
|
|
@ -1,11 +1,6 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -19,11 +14,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -42,11 +32,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -69,11 +54,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -99,11 +79,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -117,11 +92,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -135,11 +105,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -153,11 +118,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -171,11 +131,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -190,11 +145,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -205,11 +155,6 @@ step s1-select-count: SELECT COUNT(*) FROM range_copy;
|
|||
ERROR: relation "range_copy" does not exist
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -225,17 +170,12 @@ count
|
|||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%''');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,2)
|
||||
(localhost,57638,t,2)
|
||||
(localhost,57637,t,1)
|
||||
(localhost,57638,t,1)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -258,11 +198,6 @@ run_command_on_workers
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -284,11 +219,6 @@ run_command_on_workers
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -310,11 +240,6 @@ run_command_on_workers
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -337,11 +262,6 @@ run_command_on_workers
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -363,11 +283,6 @@ run_command_on_workers
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -386,11 +301,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -404,11 +314,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -428,16 +333,17 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-drop: DROP TABLE range_copy;
|
||||
step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int);
|
||||
step s1-begin: BEGIN;
|
||||
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
step s2-distribute-table: SELECT create_distributed_table('range_copy', 'id', 'range'); <waiting ...>
|
||||
step s2-distribute-table:
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3004005;
|
||||
SELECT create_distributed_table('range_copy', 'id', 'range');
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006;
|
||||
<waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-distribute-table: <... completed>
|
||||
create_distributed_table
|
||||
|
@ -453,11 +359,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-router-select: SELECT * FROM range_copy WHERE id = 1;
|
||||
|
@ -476,11 +377,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-real-time-select: SELECT * FROM range_copy ORDER BY 1, 2;
|
||||
|
@ -503,11 +399,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-adaptive-select:
|
||||
|
@ -533,11 +424,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert: INSERT INTO range_copy VALUES(0, 'k', 0);
|
||||
|
@ -551,11 +437,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-insert-select: INSERT INTO range_copy SELECT * FROM range_copy;
|
||||
|
@ -569,11 +450,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-update: UPDATE range_copy SET data = 'l' WHERE id = 0;
|
||||
|
@ -587,11 +463,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-delete: DELETE FROM range_copy WHERE id = 1;
|
||||
|
@ -605,11 +476,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE range_copy;
|
||||
|
@ -624,11 +490,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-drop: DROP TABLE range_copy;
|
||||
|
@ -640,11 +501,6 @@ step s1-select-count: SELECT COUNT(*) FROM range_copy;
|
|||
ERROR: relation "range_copy" does not exist
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id);
|
||||
|
@ -660,17 +516,12 @@ count
|
|||
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%''');
|
||||
run_command_on_workers
|
||||
---------------------------------------------------------------------
|
||||
(localhost,57637,t,2)
|
||||
(localhost,57638,t,2)
|
||||
(localhost,57637,t,1)
|
||||
(localhost,57638,t,1)
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id);
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -693,11 +544,6 @@ run_command_on_workers
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0;
|
||||
|
@ -720,11 +566,6 @@ run_command_on_workers
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
|
@ -747,11 +588,6 @@ run_command_on_workers
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-ddl-rename-column: ALTER TABLE range_copy RENAME data TO new_column;
|
||||
|
@ -773,17 +609,12 @@ run_command_on_workers
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('range_copy');
|
||||
citus_total_relation_size
|
||||
---------------------------------------------------------------------
|
||||
32768
|
||||
24576
|
||||
(1 row)
|
||||
|
||||
step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
|
||||
|
@ -796,11 +627,6 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-modify-multiple-shards: DELETE FROM range_copy;
|
||||
|
@ -814,35 +640,26 @@ count
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
|
||||
step s1-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy');
|
||||
citus_drop_all_shards
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
2
|
||||
(1 row)
|
||||
|
||||
step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
|
||||
step s1-commit: COMMIT;
|
||||
step s2-copy: <... completed>
|
||||
ERROR: could not find any shards into which to copy
|
||||
step s1-select-count: SELECT COUNT(*) FROM range_copy;
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
5
|
||||
0
|
||||
(1 row)
|
||||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1-drop: DROP TABLE range_copy;
|
||||
step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int);
|
||||
step s1-begin: BEGIN;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,12 +1,12 @@
|
|||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
|
@ -27,12 +27,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
|
@ -53,12 +53,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-drop s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
|
@ -75,12 +75,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
|
@ -108,12 +108,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-truncate s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -142,12 +142,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s1-truncate s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY truncate_append_index ON truncate_append(id); <waiting ...>
|
||||
|
@ -173,12 +173,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
|
@ -206,12 +206,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-truncate s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -240,12 +240,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
|
@ -273,12 +273,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-table-size s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
|
@ -304,12 +304,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
|
@ -330,12 +330,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-drop-all-shards s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
|
@ -361,9 +361,9 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-truncate s2-distribute-table s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-drop: DROP TABLE truncate_append;
|
||||
|
@ -393,12 +393,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-truncate: TRUNCATE truncate_append;
|
||||
|
@ -419,12 +419,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-truncate s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-drop: DROP TABLE truncate_append;
|
||||
|
@ -442,12 +442,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id);
|
||||
|
@ -475,12 +475,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -509,12 +509,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0;
|
||||
|
@ -542,12 +542,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0;
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
|
@ -576,12 +576,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-ddl-rename-column: ALTER TABLE truncate_append RENAME data TO new_column;
|
||||
|
@ -609,18 +609,18 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-truncate s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-table-size: SELECT citus_total_relation_size('truncate_append');
|
||||
citus_total_relation_size
|
||||
---------------------------------------------------------------------
|
||||
32768
|
||||
16384
|
||||
(1 row)
|
||||
|
||||
step s2-truncate: TRUNCATE truncate_append;
|
||||
|
@ -639,12 +639,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-truncate s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-master-modify-multiple-shards: DELETE FROM truncate_append;
|
||||
|
@ -665,12 +665,12 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-initialize s1-begin s2-begin s1-master-drop-all-shards s2-truncate s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV;
|
||||
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
|
||||
step s1-begin: BEGIN;
|
||||
step s2-begin: BEGIN;
|
||||
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append');
|
||||
|
@ -696,9 +696,9 @@ restore_isolation_tester_func
|
|||
|
||||
|
||||
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-distribute-table s2-truncate s1-commit s2-commit s1-select-count
|
||||
create_distributed_table
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
|
||||
5990340
|
||||
(1 row)
|
||||
|
||||
step s1-drop: DROP TABLE truncate_append;
|
||||
|
|
|
@ -76,6 +76,7 @@ SELECT create_distributed_table('products_append', 'product_no', 'append');
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('products_append') AS shardid \gset
|
||||
-- Can only add primary key constraint on distribution column (or group
|
||||
-- of columns including distribution column)
|
||||
-- Command below should error out since 'name' is not a distribution column
|
||||
|
@ -90,7 +91,7 @@ WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint
|
|||
DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced.
|
||||
HINT: Consider using hash partitioning.
|
||||
--- Error out since first and third rows have the same product_no
|
||||
\COPY products_append FROM STDIN DELIMITER AS ',';
|
||||
COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
|
||||
ERROR: duplicate key value violates unique constraint "p_key_1450033"
|
||||
DETAIL: Key (product_no)=(1) already exists.
|
||||
DROP TABLE products_append;
|
||||
|
@ -163,6 +164,7 @@ SELECT create_distributed_table('unique_test_table_append', 'id', 'append');
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('unique_test_table_append') AS shardid \gset
|
||||
-- Can only add unique constraint on distribution column (or group
|
||||
-- of columns including distribution column)
|
||||
-- Command below should error out since 'name' is not a distribution column
|
||||
|
@ -177,7 +179,7 @@ WARNING: table "unique_test_table_append" has a UNIQUE or EXCLUDE constraint
|
|||
DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced.
|
||||
HINT: Consider using hash partitioning.
|
||||
-- Error out. Table can not have two rows with the same id.
|
||||
\COPY unique_test_table_append FROM STDIN DELIMITER AS ',';
|
||||
COPY unique_test_table_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
|
||||
ERROR: duplicate key value violates unique constraint "unn_id_1450067"
|
||||
DETAIL: Key (id)=(X) already exists.
|
||||
DROP TABLE unique_test_table_append;
|
||||
|
@ -250,12 +252,13 @@ SELECT create_distributed_table('products_append', 'product_no', 'append');
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('products_append') AS shardid \gset
|
||||
-- Can add column and table check constraints
|
||||
ALTER TABLE products_append ADD CONSTRAINT p_check CHECK(price > 0);
|
||||
ALTER TABLE products_append ADD CONSTRAINT p_multi_check CHECK(price > discounted_price);
|
||||
-- Error out,since the third row conflicting with the p_multi_check
|
||||
\COPY products_append FROM STDIN DELIMITER AS ',';
|
||||
ERROR: new row for relation "products_append_1450101" violates check constraint "p_multi_check"
|
||||
COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
|
||||
ERROR: new row for relation "products_append_1450101" violates check constraint "p_multi_check_1450101"
|
||||
DETAIL: Failing row contains (1, Product_3, 8, 10).
|
||||
DROP TABLE products_append;
|
||||
-- Check "EXCLUSION CONSTRAINT"
|
||||
|
@ -323,6 +326,7 @@ SELECT create_distributed_table('products_append', 'product_no','append');
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('products_append') AS shardid \gset
|
||||
-- Can only add exclusion constraint on distribution column (or group of column
|
||||
-- including distribution column)
|
||||
-- Command below should error out since 'name' is not a distribution column
|
||||
|
@ -337,7 +341,7 @@ WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint
|
|||
DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced.
|
||||
HINT: Consider using hash partitioning.
|
||||
-- Error out since first and third can not pass the exclusion check.
|
||||
\COPY products_append FROM STDIN DELIMITER AS ',';
|
||||
COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
|
||||
ERROR: conflicting key value violates exclusion constraint "exc_pno_name_1450135"
|
||||
DETAIL: Key (product_no, name)=(1, Product_1) conflicts with existing key (product_no, name)=(1, Product_1).
|
||||
DROP TABLE products_append;
|
||||
|
@ -394,9 +398,10 @@ SELECT create_distributed_table('products_append', 'product_no', 'append');
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('products_append') AS shardid \gset
|
||||
ALTER TABLE products_append ALTER COLUMN name SET NOT NULL;
|
||||
-- Error out since name and product_no columns can not handle NULL value.
|
||||
\COPY products_append FROM STDIN DELIMITER AS ',';
|
||||
COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
|
||||
DROP TABLE products_append;
|
||||
-- Tests for ADD CONSTRAINT is not only subcommand
|
||||
CREATE TABLE products (
|
||||
|
|
|
@ -112,6 +112,12 @@ SELECT create_distributed_table('customer_append', 'c_custkey', 'append');
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('customer_append');
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
360006
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE nation (
|
||||
n_nationkey integer not null,
|
||||
n_name char(25) not null,
|
||||
|
@ -155,6 +161,12 @@ SELECT create_distributed_table('part_append', 'p_partkey', 'append');
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('part_append');
|
||||
master_create_empty_shard
|
||||
---------------------------------------------------------------------
|
||||
360009
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE supplier
|
||||
(
|
||||
s_suppkey integer not null,
|
||||
|
|
|
@ -3,37 +3,48 @@
|
|||
--
|
||||
-- This test checks that we can handle null min/max values in shard statistics
|
||||
-- and that we don't partition or join prune shards that have null values.
|
||||
SET client_min_messages TO DEBUG2;
|
||||
CREATE SCHEMA multi_null_minmax_value_pruning;
|
||||
SET search_path TO multi_null_minmax_value_pruning;
|
||||
SET citus.explain_all_tasks TO on;
|
||||
-- to avoid differing explain output - executor doesn't matter,
|
||||
-- because were testing pruning here.
|
||||
-- Change configuration to treat lineitem and orders tables as large
|
||||
SET citus.log_multi_join_order to true;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000;
|
||||
shardminvalue | shardmaxvalue
|
||||
SET citus.next_shard_id = 290000;
|
||||
CREATE TABLE lineitem (LIKE public.lineitem);
|
||||
SELECT create_distributed_table('lineitem', 'l_orderkey', 'range');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
1 | 1000
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
|
||||
shardminvalue | shardmaxvalue
|
||||
SELECT master_create_empty_shard('lineitem') as lineitem_shardid1 \gset
|
||||
SELECT master_create_empty_shard('lineitem') as lineitem_shardid2 \gset
|
||||
CREATE TABLE orders (LIKE public.orders);
|
||||
SELECT create_distributed_table('orders', 'o_orderkey', 'range');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
1 | 1000
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('orders') as orders_shardid1 \gset
|
||||
SELECT master_create_empty_shard('orders') as orders_shardid2 \gset
|
||||
SET client_min_messages TO DEBUG2;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '6000' WHERE shardid = :lineitem_shardid1 OR shardid = :orders_shardid1;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '6001', shardmaxvalue = '20000' WHERE shardid = :lineitem_shardid2 OR shardid = :orders_shardid2;
|
||||
UPDATE pg_dist_partition SET colocationid = 87091 WHERE logicalrelid = 'orders'::regclass OR logicalrelid = 'lineitem'::regclass;
|
||||
-- Check that partition and join pruning works when min/max values exist
|
||||
-- Adding l_orderkey = 1 to make the query not router executable
|
||||
SELECT coordinator_plan($Q$
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||
$Q$);
|
||||
DEBUG: Creating router plan
|
||||
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
LOG: join order: [ "lineitem" ]
|
||||
CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 1
|
||||
Task Count: 2
|
||||
(2 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
|
@ -41,8 +52,8 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
WHERE l_orderkey = o_orderkey;
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
LOG: join order: [ "lineitem" ][ local partition join "orders" ]
|
||||
DEBUG: join prunable for intervals [-2147483648,-1] and [0,2147483647]
|
||||
DEBUG: join prunable for intervals [0,2147483647] and [-2147483648,-1]
|
||||
DEBUG: join prunable for intervals [1,6000] and [6001,20000]
|
||||
DEBUG: join prunable for intervals [6001,20000] and [1,6000]
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
|
@ -53,38 +64,36 @@ DEBUG: join prunable for intervals [0,2147483647] and [-2147483648,-1]
|
|||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
-> Hash Join
|
||||
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Seq Scan on lineitem_360000 lineitem
|
||||
Hash Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Seq Scan on orders_290002 orders
|
||||
-> Hash
|
||||
-> Seq Scan on orders_360002 orders
|
||||
-> Seq Scan on lineitem_290000 lineitem
|
||||
-> Task
|
||||
Node: host=localhost port=xxxxx dbname=regression
|
||||
-> Aggregate
|
||||
-> Hash Join
|
||||
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey)
|
||||
-> Seq Scan on lineitem_360001 lineitem
|
||||
Hash Cond: (orders.o_orderkey = lineitem.l_orderkey)
|
||||
-> Seq Scan on orders_290003 orders
|
||||
-> Hash
|
||||
-> Seq Scan on orders_360003 orders
|
||||
-> Seq Scan on lineitem_290001 lineitem
|
||||
(20 rows)
|
||||
|
||||
-- Now set the minimum value for a shard to null. Then check that we don't apply
|
||||
-- partition or join pruning for the shard with null min value. Since it is not
|
||||
-- supported with single-repartition join, dual-repartition has been used.
|
||||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
|
||||
SELECT coordinator_plan($Q$
|
||||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = :lineitem_shardid1;
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
$Q$);
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
DEBUG: Creating router plan
|
||||
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
DEBUG: query has a single distribution column value: 9030
|
||||
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
LOG: join order: [ "lineitem" ]
|
||||
CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 1
|
||||
Task Count: 2
|
||||
(2 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
|
@ -137,21 +146,19 @@ DETAIL: Creating dependency on merge taskId 12
|
|||
-- Next, set the maximum value for another shard to null. Then check that we
|
||||
-- don't apply partition or join pruning for this other shard either. Since it
|
||||
-- is not supported with single-repartition join, dual-repartition has been used.
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
|
||||
SELECT coordinator_plan($Q$
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = :lineitem_shardid2;
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
$Q$);
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
DEBUG: Creating router plan
|
||||
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
DEBUG: query has a single distribution column value: 9030
|
||||
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
DEBUG: Router planner cannot handle multi-shard select queries
|
||||
CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
LOG: join order: [ "lineitem" ]
|
||||
CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
Task Count: 1
|
||||
Task Count: 2
|
||||
(2 rows)
|
||||
|
||||
EXPLAIN (COSTS FALSE)
|
||||
|
@ -204,17 +211,13 @@ DETAIL: Creating dependency on merge taskId 12
|
|||
-- Last, set the minimum value to 0 and check that we don't treat it as null. We
|
||||
-- should apply partition and join pruning for this shard now. Since it is not
|
||||
-- supported with single-repartition join, dual-repartition has been used.
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||
SELECT coordinator_plan($Q$
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = :lineitem_shardid1;
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
$Q$);
|
||||
DEBUG: Distributed planning for a fast-path router query
|
||||
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
DEBUG: Creating router plan
|
||||
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
DEBUG: query has a single distribution column value: 9030
|
||||
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
|
||||
coordinator_plan
|
||||
---------------------------------------------------------------------
|
||||
Custom Scan (Citus Adaptive)
|
||||
|
@ -268,7 +271,8 @@ DETAIL: Creating dependency on merge taskId 12
|
|||
Merge Task Count: 4
|
||||
(10 rows)
|
||||
|
||||
-- Set minimum and maximum values for two shards back to their original values
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001;
|
||||
SET client_min_messages TO NOTICE;
|
||||
RESET client_min_messages;
|
||||
DROP SCHEMA multi_null_minmax_value_pruning CASCADE;
|
||||
NOTICE: drop cascades to 2 other objects
|
||||
DETAIL: drop cascades to table lineitem
|
||||
drop cascades to table orders
|
||||
|
|
|
@ -199,20 +199,48 @@ FROM
|
|||
orders, customer_append
|
||||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
c_custkey < 0;
|
||||
c_custkey < 0 AND c_custkey > 0;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
DEBUG: join prunable for task partitionId 1 and 0
|
||||
DEBUG: join prunable for task partitionId 1 and 2
|
||||
DEBUG: join prunable for task partitionId 1 and 3
|
||||
DEBUG: join prunable for task partitionId 2 and 0
|
||||
DEBUG: join prunable for task partitionId 2 and 1
|
||||
DEBUG: join prunable for task partitionId 2 and 3
|
||||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 16
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------
|
||||
Aggregate
|
||||
-> Custom Scan (Citus Adaptive)
|
||||
Task Count: 0
|
||||
Task Count: 4
|
||||
Tasks Shown: None, not supported for re-partition queries
|
||||
-> MapMergeJob
|
||||
Map Task Count: 2
|
||||
Merge Task Count: 4
|
||||
-> MapMergeJob
|
||||
Map Task Count: 0
|
||||
Merge Task Count: 0
|
||||
Map Task Count: 3
|
||||
Merge Task Count: 4
|
||||
(10 rows)
|
||||
|
||||
SELECT
|
||||
|
@ -221,8 +249,36 @@ FROM
|
|||
orders, customer_append
|
||||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
c_custkey < 0;
|
||||
c_custkey < 0 AND c_custkey > 0;
|
||||
DEBUG: Router planner does not support append-partitioned tables.
|
||||
DEBUG: join prunable for task partitionId 0 and 1
|
||||
DEBUG: join prunable for task partitionId 0 and 2
|
||||
DEBUG: join prunable for task partitionId 0 and 3
|
||||
DEBUG: join prunable for task partitionId 1 and 0
|
||||
DEBUG: join prunable for task partitionId 1 and 2
|
||||
DEBUG: join prunable for task partitionId 1 and 3
|
||||
DEBUG: join prunable for task partitionId 2 and 0
|
||||
DEBUG: join prunable for task partitionId 2 and 1
|
||||
DEBUG: join prunable for task partitionId 2 and 3
|
||||
DEBUG: join prunable for task partitionId 3 and 0
|
||||
DEBUG: join prunable for task partitionId 3 and 1
|
||||
DEBUG: join prunable for task partitionId 3 and 2
|
||||
DEBUG: pruning merge fetch taskId 1
|
||||
DETAIL: Creating dependency on merge taskId 3
|
||||
DEBUG: pruning merge fetch taskId 2
|
||||
DETAIL: Creating dependency on merge taskId 4
|
||||
DEBUG: pruning merge fetch taskId 4
|
||||
DETAIL: Creating dependency on merge taskId 6
|
||||
DEBUG: pruning merge fetch taskId 5
|
||||
DETAIL: Creating dependency on merge taskId 8
|
||||
DEBUG: pruning merge fetch taskId 7
|
||||
DETAIL: Creating dependency on merge taskId 9
|
||||
DEBUG: pruning merge fetch taskId 8
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 10
|
||||
DETAIL: Creating dependency on merge taskId 12
|
||||
DEBUG: pruning merge fetch taskId 11
|
||||
DETAIL: Creating dependency on merge taskId 16
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
0
|
||||
|
|
|
@ -115,13 +115,14 @@ CREATE TABLE nation_append_search_path(
|
|||
n_regionkey integer not null,
|
||||
n_comment varchar(152)
|
||||
);
|
||||
SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append');
|
||||
master_create_distributed_table
|
||||
SELECT create_distributed_table('nation_append_search_path', 'n_nationkey', 'append');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy nation_append_search_path FROM STDIN with delimiter '|';
|
||||
SELECT master_create_empty_shard('nation_append_search_path') AS shardid \gset
|
||||
copy nation_append_search_path FROM STDIN with (delimiter '|', append_to_shard :shardid);
|
||||
-- create shard with master_create_worker_shards
|
||||
CREATE TABLE test_schema_support.nation_hash(
|
||||
n_nationkey integer not null,
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
--
|
||||
-- NON_COLOCATED_JOIN_ORDER
|
||||
--
|
||||
-- Tests to check placements of shards must be equal to choose local join logic.
|
||||
CREATE TABLE test_table_1(id int, value_1 int);
|
||||
SELECT master_create_distributed_table('test_table_1', 'id', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table_1 FROM STDIN DELIMITER ','
|
||||
\copy test_table_1 FROM STDIN DELIMITER ','
|
||||
CREATE TABLE test_table_2(id int, value_1 int);
|
||||
SELECT master_create_distributed_table('test_table_2', 'id', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy test_table_2 FROM STDIN DELIMITER ','
|
||||
\copy test_table_2 FROM STDIN DELIMITER ','
|
||||
SET citus.log_multi_join_order to TRUE;
|
||||
SET client_min_messages to DEBUG1;
|
||||
SET citus.enable_repartition_joins TO on;
|
||||
-- when joining append tables we always get dual re-partition joins
|
||||
SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id;
|
||||
LOG: join order: [ "test_table_1" ][ dual partition join "test_table_2" ]
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
6
|
||||
(1 row)
|
||||
|
||||
-- Add two shards placement of interval [8,10] to test_table_1
|
||||
SET citus.shard_replication_factor to 2;
|
||||
\copy test_table_1 FROM STDIN DELIMITER ','
|
||||
-- Add two shards placement of interval [8,10] to test_table_2
|
||||
SET citus.shard_replication_factor to 1;
|
||||
\copy test_table_2 FROM STDIN DELIMITER ','
|
||||
-- Although shard interval of relation are same, since they have different amount of placements
|
||||
-- for interval [8,10] repartition join logic will be triggered.
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id;
|
||||
LOG: join order: [ "test_table_1" ][ dual partition join "test_table_2" ]
|
||||
count
|
||||
---------------------------------------------------------------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
SET client_min_messages TO default;
|
||||
DROP TABLE test_table_1;
|
||||
DROP TABLE test_table_2;
|
|
@ -625,6 +625,19 @@ BEGIN;
|
|||
32
|
||||
INSERT INTO test SELECT i,i FROM generate_series(0,100)i;
|
||||
ROLLBACK;
|
||||
-- master_create_empty_shard on coordinator
|
||||
BEGIN;
|
||||
CREATE TABLE append_table (a INT, b INT);
|
||||
SELECT create_distributed_table('append_table','a','append');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('append_table');
|
||||
NOTICE: Creating placements for the append partitioned tables on the coordinator is not supported, skipping coordinator ...
|
||||
ERROR: could only create 0 of 1 of required shard replicas
|
||||
END;
|
||||
-- alter table inside a tx block
|
||||
BEGIN;
|
||||
ALTER TABLE test ADD COLUMN z single_node.new_type;
|
||||
|
|
|
@ -8,18 +8,9 @@ SELECT create_distributed_table('append_table', 'key', 'append');
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_create_empty_shard('append_table');
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT 1 FROM master_create_empty_shard('append_table');
|
||||
?column?
|
||||
---------------------------------------------------------------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('append_table') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('append_table') AS shardid2 \gset
|
||||
SELECT master_create_empty_shard('append_table') AS shardid3 \gset
|
||||
CREATE TABLE ref_table (value int);
|
||||
CREATE INDEX ON ref_table (value);
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
@ -28,9 +19,9 @@ SELECT create_reference_table('ref_table');
|
|||
|
||||
(1 row)
|
||||
|
||||
\COPY append_table (key,value) FROM STDIN WITH CSV
|
||||
\COPY append_table (key,value) FROM STDIN WITH CSV
|
||||
\COPY ref_table FROM STDIN WITH CSV
|
||||
COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid1);
|
||||
COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid2);
|
||||
COPY ref_table FROM STDIN WITH CSV;
|
||||
-- exercise some optimizer pushdown features with subqueries
|
||||
SELECT count(*) FROM (SELECT random() FROM append_table) u;
|
||||
count
|
||||
|
|
|
@ -69,7 +69,7 @@ SELECT logicalrelid FROM pg_dist_partition
|
|||
t_ab
|
||||
r
|
||||
tr
|
||||
t_append
|
||||
t_range
|
||||
(6 rows)
|
||||
|
||||
SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4::bit(8)
|
||||
|
@ -80,14 +80,14 @@ SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4
|
|||
relnamespace='upgrade_basic'::regnamespace
|
||||
AND tgname LIKE 'truncate_trigger_%'
|
||||
ORDER BY tgrelid::regclass;
|
||||
tgrelid | tgfoid | tgisinternal | tgenabled | tgtype
|
||||
tgrelid | tgfoid | tgisinternal | tgenabled | tgtype
|
||||
---------------------------------------------------------------------
|
||||
t | citus_truncate_trigger | t | O | 00100000
|
||||
tp | citus_truncate_trigger | t | O | 00100000
|
||||
t_ab | citus_truncate_trigger | t | O | 00100000
|
||||
r | citus_truncate_trigger | t | O | 00100000
|
||||
tr | citus_truncate_trigger | t | O | 00100000
|
||||
t_append | citus_truncate_trigger | t | O | 00100000
|
||||
t | citus_truncate_trigger | t | O | 00100000
|
||||
tp | citus_truncate_trigger | t | O | 00100000
|
||||
t_ab | citus_truncate_trigger | t | O | 00100000
|
||||
r | citus_truncate_trigger | t | O | 00100000
|
||||
tr | citus_truncate_trigger | t | O | 00100000
|
||||
t_range | citus_truncate_trigger | t | O | 00100000
|
||||
(6 rows)
|
||||
|
||||
SELECT * FROM t ORDER BY a;
|
||||
|
@ -305,7 +305,7 @@ SELECT * FROM t3 ORDER BY a;
|
|||
(3 rows)
|
||||
|
||||
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
|
||||
WHERE logicalrelid = 't_append'::regclass
|
||||
WHERE logicalrelid = 't_range'::regclass
|
||||
ORDER BY shardminvalue, shardmaxvalue;
|
||||
shardminvalue | shardmaxvalue
|
||||
---------------------------------------------------------------------
|
||||
|
@ -313,7 +313,7 @@ SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
|
|||
5 | 7
|
||||
(2 rows)
|
||||
|
||||
SELECT * FROM t_append ORDER BY id;
|
||||
SELECT * FROM t_range ORDER BY id;
|
||||
id | value_1
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
|
@ -324,9 +324,11 @@ SELECT * FROM t_append ORDER BY id;
|
|||
7 | 4
|
||||
(6 rows)
|
||||
|
||||
\copy t_append FROM STDIN DELIMITER ','
|
||||
SELECT master_create_empty_shard('t_range') AS new_shard_id \gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = '9', shardmaxvalue = '11' WHERE shardid = :new_shard_id;
|
||||
\copy t_range FROM STDIN with (DELIMITER ',')
|
||||
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
|
||||
WHERE logicalrelid = 't_append'::regclass
|
||||
WHERE logicalrelid = 't_range'::regclass
|
||||
ORDER BY shardminvalue, shardmaxvalue;
|
||||
shardminvalue | shardmaxvalue
|
||||
---------------------------------------------------------------------
|
||||
|
@ -335,7 +337,7 @@ SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
|
|||
9 | 11
|
||||
(3 rows)
|
||||
|
||||
SELECT * FROM t_append ORDER BY id;
|
||||
SELECT * FROM t_range ORDER BY id;
|
||||
id | value_1
|
||||
---------------------------------------------------------------------
|
||||
1 | 2
|
||||
|
|
|
@ -69,12 +69,16 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex
|
|||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
CREATE TABLE t_append(id int, value_1 int);
|
||||
SELECT master_create_distributed_table('t_append', 'id', 'append');
|
||||
master_create_distributed_table
|
||||
CREATE TABLE t_range(id int, value_1 int);
|
||||
SELECT create_distributed_table('t_range', 'id', 'range');
|
||||
create_distributed_table
|
||||
---------------------------------------------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy t_append FROM STDIN DELIMITER ','
|
||||
\copy t_append FROM STDIN DELIMITER ','
|
||||
SELECT master_create_empty_shard('t_range') as shardid1 \gset
|
||||
SELECT master_create_empty_shard('t_range') as shardid2 \gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '3' WHERE shardid = :shardid1;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '7' WHERE shardid = :shardid2;
|
||||
\copy t_range FROM STDIN with (DELIMITER ',')
|
||||
\copy t_range FROM STDIN with (DELIMITER ',')
|
||||
|
|
|
@ -24,7 +24,7 @@ drop cascades to table upgrade_basic.t_ab
|
|||
drop cascades to table upgrade_basic.t2
|
||||
drop cascades to table upgrade_basic.r
|
||||
drop cascades to table upgrade_basic.tr
|
||||
drop cascades to table upgrade_basic.t_append
|
||||
drop cascades to table upgrade_basic.t_range
|
||||
-- as we updated citus to available version,
|
||||
-- "isn" extension
|
||||
-- "new_schema" schema
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
-- MULTI_AGG_TYPE_CONVERSION
|
||||
--
|
||||
|
||||
|
||||
-- Test aggregate type conversions using sums of integers and division operator
|
||||
SELECT sum(l_suppkey) FROM lineitem;
|
||||
SELECT sum(l_suppkey) / 2 FROM lineitem;
|
||||
|
@ -19,8 +18,9 @@ CREATE TABLE aggregate_type (
|
|||
double_value float(40) not null,
|
||||
interval_value interval not null);
|
||||
SELECT create_distributed_table('aggregate_type', 'float_value', 'append');
|
||||
SELECT master_create_empty_shard('aggregate_type') AS shardid \gset
|
||||
|
||||
\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
|
||||
copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' with (append_to_shard :shardid);
|
||||
|
||||
-- Test conversions using aggregates on floats and division
|
||||
|
||||
|
|
|
@ -29,7 +29,8 @@ CREATE TABLE lineitem_alter (
|
|||
)
|
||||
WITH ( fillfactor = 80 );
|
||||
SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
|
||||
\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
SELECT master_create_empty_shard('lineitem_alter') AS shardid \gset
|
||||
copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
|
||||
-- verify that the storage options made it to the table definitions
|
||||
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
|
||||
|
@ -65,7 +66,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1;
|
|||
ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT;
|
||||
|
||||
-- \copy to verify that default values take effect
|
||||
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
|
||||
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
|
||||
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
|
||||
SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
|
||||
|
@ -80,7 +82,10 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
|
|||
|
||||
-- \copy should fail because it will try to insert NULLs for a NOT NULL column
|
||||
-- Note, this operation will create a table on the workers but it won't be in the metadata
|
||||
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
BEGIN;
|
||||
SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
|
||||
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
END;
|
||||
|
||||
-- Verify that DROP NOT NULL works
|
||||
|
||||
|
@ -88,7 +93,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
|
|||
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
|
||||
|
||||
-- \copy should succeed now
|
||||
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'
|
||||
SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
|
||||
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
SELECT count(*) from lineitem_alter;
|
||||
|
||||
-- Verify that SET DATA TYPE works
|
||||
|
|
|
@ -20,6 +20,8 @@ CREATE TABLE multi_append_table_to_shard_left
|
|||
left_text TEXT not null
|
||||
);
|
||||
SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append');
|
||||
SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid2 \gset
|
||||
|
||||
CREATE TABLE multi_append_table_to_shard_right_reference_hash
|
||||
(
|
||||
|
@ -32,8 +34,8 @@ SELECT create_distributed_table('multi_append_table_to_shard_right_reference_has
|
|||
|
||||
-- Replicate 'left' table on both workers
|
||||
SELECT set_config('citus.shard_replication_factor', '2', false);
|
||||
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
|
||||
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
|
||||
copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid1);
|
||||
copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid2);
|
||||
|
||||
-- Place 'right' table on both workers
|
||||
\copy multi_append_table_to_shard_right_reference FROM '@abs_srcdir@/data/agg.data'
|
||||
|
|
|
@ -35,6 +35,9 @@ COPY customer_copy_hash (c_custkey,c_name) FROM STDIN;
|
|||
notinteger,customernot
|
||||
\.
|
||||
|
||||
-- Test invalid option
|
||||
COPY customer_copy_hash (c_custkey,c_name) FROM STDIN (append_to_shard 1);
|
||||
|
||||
-- Confirm that no data was copied
|
||||
SELECT count(*) FROM customer_copy_hash;
|
||||
|
||||
|
@ -231,46 +234,55 @@ CREATE TABLE customer_copy_append (
|
|||
c_acctbal decimal(15,2),
|
||||
c_mktsegment char(10),
|
||||
c_comment varchar(117));
|
||||
SELECT master_create_distributed_table('customer_copy_append', 'c_custkey', 'append');
|
||||
SELECT create_distributed_table('customer_copy_append', 'c_custkey', 'append');
|
||||
|
||||
-- Test syntax error
|
||||
COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv');
|
||||
BEGIN;
|
||||
SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset
|
||||
COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid);
|
||||
1,customer1
|
||||
2,customer2
|
||||
notinteger,customernot
|
||||
\.
|
||||
END;
|
||||
|
||||
-- Test that no shard is created for failing copy
|
||||
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass;
|
||||
|
||||
-- Test empty copy
|
||||
COPY customer_copy_append FROM STDIN;
|
||||
BEGIN;
|
||||
SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset
|
||||
COPY customer_copy_append FROM STDIN WITH (append_to_shard :shardid);
|
||||
\.
|
||||
END;
|
||||
|
||||
-- Test that no shard is created for copying zero rows
|
||||
-- Test that a shard is created
|
||||
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass;
|
||||
|
||||
-- Test proper copy
|
||||
COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv');
|
||||
BEGIN;
|
||||
SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset
|
||||
COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid);
|
||||
1,customer1
|
||||
2,customer2
|
||||
\.
|
||||
END;
|
||||
|
||||
-- Check whether data was copied properly
|
||||
SELECT * FROM customer_copy_append;
|
||||
|
||||
-- Manipulate manipulate and check shard statistics for append-partitioned table shard
|
||||
UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = 560131;
|
||||
UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560131;
|
||||
UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = 560132;
|
||||
UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560132;
|
||||
|
||||
SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131;
|
||||
SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131;
|
||||
SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560132;
|
||||
SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560132;
|
||||
|
||||
-- Update shard statistics for append-partitioned shard
|
||||
SELECT master_update_shard_statistics(560131);
|
||||
SELECT master_update_shard_statistics(560132);
|
||||
|
||||
SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131;
|
||||
SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131;
|
||||
SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560132;
|
||||
SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560132;
|
||||
|
||||
-- Create lineitem table
|
||||
CREATE TABLE lineitem_copy_append (
|
||||
|
@ -290,33 +302,18 @@ CREATE TABLE lineitem_copy_append (
|
|||
l_shipinstruct char(25) not null,
|
||||
l_shipmode char(10) not null,
|
||||
l_comment varchar(44) not null);
|
||||
SELECT master_create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append');
|
||||
SELECT create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append');
|
||||
|
||||
-- Test multiple shard creation
|
||||
SET citus.shard_max_size TO '256kB';
|
||||
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|';
|
||||
BEGIN;
|
||||
SELECT master_create_empty_shard('lineitem_copy_append') AS shardid \gset
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
|
||||
END;
|
||||
|
||||
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::regclass;
|
||||
|
||||
-- Test round robin shard policy
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|';
|
||||
|
||||
SELECT
|
||||
pg_dist_shard_placement.shardid,
|
||||
pg_dist_shard_placement.nodeport
|
||||
FROM
|
||||
pg_dist_shard,
|
||||
pg_dist_shard_placement
|
||||
WHERE
|
||||
pg_dist_shard.shardid = pg_dist_shard_placement.shardid AND
|
||||
logicalrelid = 'lineitem_copy_append'::regclass
|
||||
ORDER BY
|
||||
pg_dist_shard.shardid DESC
|
||||
LIMIT
|
||||
5;
|
||||
-- trigger some errors on the append_to_shard option
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard 1);
|
||||
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard 560000);
|
||||
|
||||
-- Test schema support on append partitioned tables
|
||||
CREATE SCHEMA append;
|
||||
|
@ -330,11 +327,13 @@ CREATE TABLE append.customer_copy (
|
|||
c_mktsegment char(10),
|
||||
c_comment varchar(117));
|
||||
|
||||
SELECT master_create_distributed_table('append.customer_copy', 'c_custkey', 'append');
|
||||
SELECT create_distributed_table('append.customer_copy', 'c_custkey', 'append');
|
||||
SELECT master_create_empty_shard('append.customer_copy') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('append.customer_copy') AS shardid2 \gset
|
||||
|
||||
-- Test copy from the master node
|
||||
COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|');
|
||||
COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|');
|
||||
COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard :shardid1);
|
||||
COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid2);
|
||||
|
||||
-- Test the content of the table
|
||||
SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.customer_copy;
|
||||
|
@ -421,8 +420,10 @@ CREATE TABLE packed_numbers_append (
|
|||
packed_numbers number_pack[]
|
||||
);
|
||||
|
||||
SELECT master_create_distributed_table('packed_numbers_append', 'id', 'append');
|
||||
COPY packed_numbers_append FROM :'temp_dir''copy_test_array_of_composite';
|
||||
SELECT create_distributed_table('packed_numbers_append', 'id', 'append');
|
||||
SELECT master_create_empty_shard('packed_numbers_append') AS shardid \gset
|
||||
|
||||
COPY packed_numbers_append FROM :'temp_dir''copy_test_array_of_composite' WITH (append_to_shard :shardid);
|
||||
|
||||
-- Verify data is actually copied
|
||||
SELECT * FROM packed_numbers_append;
|
||||
|
@ -434,8 +435,10 @@ CREATE TABLE super_packed_numbers_append (
|
|||
super_packed_number super_number_pack
|
||||
);
|
||||
|
||||
SELECT master_create_distributed_table('super_packed_numbers_append', 'id', 'append');
|
||||
COPY super_packed_numbers_append FROM :'temp_dir''copy_test_composite_of_composite';
|
||||
SELECT create_distributed_table('super_packed_numbers_append', 'id', 'append');
|
||||
SELECT master_create_empty_shard('super_packed_numbers_append') AS shardid \gset
|
||||
|
||||
COPY super_packed_numbers_append FROM :'temp_dir''copy_test_composite_of_composite' WITH (append_to_shard :shardid);
|
||||
|
||||
-- Verify data is actually copied
|
||||
SELECT * FROM super_packed_numbers_append;
|
||||
|
@ -448,9 +451,10 @@ CREATE TABLE composite_partition_column_table(
|
|||
composite_column number_pack
|
||||
);
|
||||
|
||||
SELECT master_create_distributed_table('composite_partition_column_table', 'composite_column', 'append');
|
||||
SELECT create_distributed_table('composite_partition_column_table', 'composite_column', 'append');
|
||||
SELECT master_create_empty_shard('composite_partition_column_table') AS shardid \gset
|
||||
|
||||
\COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv');
|
||||
COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid);
|
||||
1,"(1,1)"
|
||||
2,"(2,2)"
|
||||
\.
|
||||
|
@ -458,20 +462,22 @@ SELECT master_create_distributed_table('composite_partition_column_table', 'comp
|
|||
|
||||
-- Test copy on append distributed tables do not create shards on removed workers
|
||||
CREATE TABLE numbers_append (a int, b int);
|
||||
SELECT master_create_distributed_table('numbers_append', 'a', 'append');
|
||||
SELECT create_distributed_table('numbers_append', 'a', 'append');
|
||||
|
||||
-- no shards is created yet
|
||||
SELECT shardid, nodename, nodeport
|
||||
FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
|
||||
WHERE logicalrelid = 'numbers_append'::regclass order by placementid;
|
||||
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv');
|
||||
SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset
|
||||
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1);
|
||||
1,1
|
||||
2,2
|
||||
\.
|
||||
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv');
|
||||
3,5
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2);
|
||||
4,6
|
||||
\.
|
||||
|
||||
|
@ -487,12 +493,15 @@ SELECT master_disable_node('localhost', :worker_1_port);
|
|||
SET citus.shard_replication_factor TO 1;
|
||||
|
||||
-- add two new shards and verify they are created at the other node
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv');
|
||||
SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset
|
||||
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1);
|
||||
5,7
|
||||
6,8
|
||||
\.
|
||||
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv');
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2);
|
||||
7,9
|
||||
8,10
|
||||
\.
|
||||
|
@ -507,12 +516,15 @@ SELECT 1 FROM master_activate_node('localhost', :worker_1_port);
|
|||
RESET client_min_messages;
|
||||
RESET citus.shard_replication_factor;
|
||||
-- add two new shards and verify they are created at both workers
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv');
|
||||
SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset
|
||||
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1);
|
||||
9,11
|
||||
10,12
|
||||
\.
|
||||
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv');
|
||||
COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2);
|
||||
11,13
|
||||
12,14
|
||||
\.
|
||||
|
@ -625,10 +637,6 @@ SELECT shardid, shardstate, nodename, nodeport
|
|||
\c - :default_user - :worker_1_port
|
||||
ALTER USER test_user WITH login;
|
||||
|
||||
-- there is a dangling shard in worker_2, drop it
|
||||
\c - test_user - :worker_2_port
|
||||
DROP TABLE numbers_hash_other_560176;
|
||||
|
||||
\c - test_user - :master_port
|
||||
|
||||
DROP TABLE numbers_hash;
|
||||
|
@ -644,7 +652,7 @@ CREATE TABLE numbers_hash(a int, b int);
|
|||
SELECT create_distributed_table('numbers_hash', 'a');
|
||||
|
||||
\c - - - :worker_1_port
|
||||
ALTER TABLE numbers_hash_560180 DROP COLUMN b;
|
||||
ALTER TABLE numbers_hash_560170 DROP COLUMN b;
|
||||
\c - - - :master_port
|
||||
|
||||
-- operation will fail to modify a shard and roll back
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
|
||||
SET citus.next_shard_id TO 250000;
|
||||
|
||||
|
||||
CREATE SCHEMA tpch
|
||||
CREATE TABLE nation (
|
||||
n_nationkey integer not null,
|
||||
n_name char(25) not null,
|
||||
n_regionkey integer not null,
|
||||
n_comment varchar(152));
|
||||
SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append');
|
||||
|
||||
\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
|
||||
SELECT count(*) from tpch.nation;
|
|
@ -18,9 +18,9 @@ SET citus.next_shard_id TO 290000;
|
|||
\copy orders_reference FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard 360006)
|
||||
\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
|
||||
\copy part_append FROM '@abs_srcdir@/data/part.data' with delimiter '|'
|
||||
\copy part_append FROM '@abs_srcdir@/data/part.data' with (delimiter '|', append_to_shard 360009)
|
||||
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
||||
\copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
--
|
||||
-- MULTI_STAGE_LARGE_RECORDS
|
||||
--
|
||||
-- Tests for loading data with large records (i.e. greater than the read buffer
|
||||
-- size, which is 32kB) in a distributed cluster. These tests make sure that we
|
||||
-- are creating shards of correct size even when records are large.
|
||||
|
||||
|
||||
SET citus.next_shard_id TO 300000;
|
||||
|
||||
|
||||
SET citus.shard_max_size TO "256kB";
|
||||
|
||||
CREATE TABLE large_records_table (data_id integer, data text);
|
||||
SELECT master_create_distributed_table('large_records_table', 'data_id', 'append');
|
||||
|
||||
\copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
|
||||
|
||||
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class
|
||||
WHERE pg_class.oid=logicalrelid AND relname='large_records_table'
|
||||
ORDER BY shardid;
|
||||
|
||||
RESET citus.shard_max_size;
|
|
@ -14,9 +14,15 @@ SET citus.next_shard_id TO 280000;
|
|||
\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
|
||||
\copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
|
||||
|
||||
\copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
|
||||
\copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
|
||||
\copy part_append FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid2 \gset
|
||||
|
||||
copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid1);
|
||||
copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with (delimiter '|', append_to_shard :shardid2);
|
||||
|
||||
SELECT master_create_empty_shard('part_append') AS shardid \gset
|
||||
|
||||
copy part_append FROM '@abs_srcdir@/data/part.more.data' with (delimiter '|', append_to_shard :shardid);
|
||||
|
||||
-- Exchange partition files in binary format in remaining tests
|
||||
ALTER SYSTEM SET citus.binary_worker_copy_format TO on;
|
||||
|
|
|
@ -44,7 +44,6 @@ test: isolation_create_distributed_table isolation_master_append_table
|
|||
test: isolation_multi_shard_modify_vs_all
|
||||
test: isolation_modify_with_subquery_vs_dml
|
||||
test: isolation_hash_copy_vs_all
|
||||
test: isolation_append_copy_vs_all
|
||||
test: isolation_range_copy_vs_all
|
||||
test: isolation_partitioned_copy_vs_all
|
||||
test: isolation_select_vs_all
|
||||
|
|
|
@ -127,12 +127,6 @@ test: with_modifying cte_prepared_modify cte_nested_modification
|
|||
test: ensure_no_intermediate_data_leak
|
||||
test: with_executors with_join with_partitioning with_transactions with_dml
|
||||
|
||||
|
||||
# ----------
|
||||
# Tests to check our large record loading and shard deletion behavior
|
||||
# ----------
|
||||
test: multi_load_large_records
|
||||
|
||||
# ----------
|
||||
# Tests around DDL statements run on distributed tables
|
||||
# ----------
|
||||
|
@ -140,12 +134,6 @@ test: multi_index_statements
|
|||
test: multi_alter_table_statements
|
||||
test: multi_alter_table_add_constraints
|
||||
|
||||
# ----------
|
||||
# multi_create_schema tests creation, loading, and querying of a table in a new
|
||||
# schema (namespace).
|
||||
# ----------
|
||||
test: multi_create_schema
|
||||
|
||||
# ----------
|
||||
# Tests to check if we inform the user about potential caveats of creating new
|
||||
# databases, schemas, roles, and authentication information.
|
||||
|
|
|
@ -50,7 +50,7 @@ test: set_operation_and_local_tables
|
|||
test: subqueries_deep subquery_view subquery_partitioning subqueries_not_supported
|
||||
test: subquery_in_targetlist subquery_in_where subquery_complex_target_list subquery_append
|
||||
test: subquery_prepared_statements
|
||||
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins non_colocated_join_order
|
||||
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins
|
||||
test: cte_inline recursive_view_local_table values
|
||||
test: pg13 pg12
|
||||
# run pg14 sequentially as it syncs metadata
|
||||
|
|
|
@ -53,7 +53,6 @@ test: insert_select_connection_leak
|
|||
# ----------
|
||||
test: subquery_basics subquery_local_tables subquery_executors set_operations set_operation_and_local_tables
|
||||
test: subquery_partitioning subquery_complex_target_list subqueries_not_supported
|
||||
test: non_colocated_join_order
|
||||
test: subquery_prepared_statements pg12 cte_inline
|
||||
|
||||
# ----------
|
||||
|
@ -114,8 +113,6 @@ test: with_executors with_partitioning with_dml
|
|||
# ----------
|
||||
# Tests to check our large record loading and shard deletion behavior
|
||||
# ----------
|
||||
test: multi_load_large_records
|
||||
test: multi_master_delete_protocol
|
||||
test: multi_shard_modify
|
||||
|
||||
# ----------
|
||||
|
|
|
@ -59,7 +59,7 @@ test: multi_partitioning_utils
|
|||
# ----------
|
||||
test: subquery_local_tables subquery_executors subquery_and_cte set_operations set_operation_and_local_tables
|
||||
test: subqueries_deep subquery_view subquery_partitioning subqueries_not_supported subquery_in_where
|
||||
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins non_colocated_join_order
|
||||
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins
|
||||
test: subquery_prepared_statements pg12 cte_inline
|
||||
|
||||
# ----------
|
||||
|
@ -122,8 +122,6 @@ test: with_executors with_partitioning with_dml
|
|||
# ----------
|
||||
# Tests to check our large record loading and shard deletion behavior
|
||||
# ----------
|
||||
test: multi_load_large_records
|
||||
test: multi_master_delete_protocol
|
||||
test: multi_shard_modify
|
||||
|
||||
# ----------
|
||||
|
@ -131,12 +129,6 @@ test: multi_shard_modify
|
|||
# ----------
|
||||
test: multi_alter_table_add_constraints
|
||||
|
||||
# ----------
|
||||
# multi_create_schema tests creation, loading, and querying of a table in a new
|
||||
# schema (namespace).
|
||||
# ----------
|
||||
test: multi_create_schema
|
||||
|
||||
# ----------
|
||||
# Tests to check the sequential and parallel executions of DDL and modification
|
||||
# commands
|
||||
|
|
|
@ -39,7 +39,8 @@ SELECT create_distributed_table('aggregate_type', 'float_value', 'append');
|
|||
|
||||
(1 row)
|
||||
|
||||
\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data'
|
||||
SELECT master_create_empty_shard('aggregate_type') AS shardid \gset
|
||||
copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' with (append_to_shard :shardid);
|
||||
-- Test conversions using aggregates on floats and division
|
||||
SELECT min(float_value), max(float_value),
|
||||
sum(float_value), count(float_value), avg(float_value)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -25,6 +25,8 @@ SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number
|
|||
|
||||
(1 row)
|
||||
|
||||
SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid2 \gset
|
||||
CREATE TABLE multi_append_table_to_shard_right_reference_hash
|
||||
(
|
||||
right_number INTEGER not null,
|
||||
|
@ -45,8 +47,8 @@ SELECT set_config('citus.shard_replication_factor', '2', false);
|
|||
2
|
||||
(1 row)
|
||||
|
||||
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
|
||||
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data'
|
||||
copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid1);
|
||||
copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid2);
|
||||
-- Place 'right' table on both workers
|
||||
\copy multi_append_table_to_shard_right_reference FROM '@abs_srcdir@/data/agg.data'
|
||||
-- Reset shard replication factor to ensure tasks will be assigned to both workers
|
||||
|
@ -81,7 +83,7 @@ SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage'
|
|||
FROM
|
||||
pg_dist_shard
|
||||
WHERE 'multi_append_table_to_shard_right_reference_hash'::regclass::oid = logicalrelid;
|
||||
ERROR: cannot append to shardId 230001
|
||||
ERROR: cannot append to shardId 230003
|
||||
DETAIL: We currently don't support appending to shards in hash-partitioned, reference and local tables
|
||||
-- Clean up after test
|
||||
DROP TABLE multi_append_table_to_shard_stage;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,20 +0,0 @@
|
|||
SET citus.next_shard_id TO 250000;
|
||||
CREATE SCHEMA tpch
|
||||
CREATE TABLE nation (
|
||||
n_nationkey integer not null,
|
||||
n_name char(25) not null,
|
||||
n_regionkey integer not null,
|
||||
n_comment varchar(152));
|
||||
SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append');
|
||||
create_distributed_table
|
||||
--------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
SELECT count(*) from tpch.nation;
|
||||
count
|
||||
-------
|
||||
25
|
||||
(1 row)
|
||||
|
|
@ -13,9 +13,9 @@ SET citus.next_shard_id TO 290000;
|
|||
\copy orders_reference FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
|
||||
\copy orders_reference FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
|
||||
\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard 360006)
|
||||
\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
|
||||
\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
|
||||
\copy part_append FROM '@abs_srcdir@/data/part.data' with delimiter '|'
|
||||
\copy part_append FROM '@abs_srcdir@/data/part.data' with (delimiter '|', append_to_shard 360009)
|
||||
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
||||
\copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
--
|
||||
-- MULTI_STAGE_LARGE_RECORDS
|
||||
--
|
||||
-- Tests for loading data with large records (i.e. greater than the read buffer
|
||||
-- size, which is 32kB) in a distributed cluster. These tests make sure that we
|
||||
-- are creating shards of correct size even when records are large.
|
||||
SET citus.next_shard_id TO 300000;
|
||||
SET citus.shard_max_size TO "256kB";
|
||||
CREATE TABLE large_records_table (data_id integer, data text);
|
||||
SELECT master_create_distributed_table('large_records_table', 'data_id', 'append');
|
||||
master_create_distributed_table
|
||||
---------------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
\copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
|
||||
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class
|
||||
WHERE pg_class.oid=logicalrelid AND relname='large_records_table'
|
||||
ORDER BY shardid;
|
||||
shardminvalue | shardmaxvalue
|
||||
---------------+---------------
|
||||
1 | 1
|
||||
2 | 2
|
||||
(2 rows)
|
||||
|
||||
RESET citus.shard_max_size;
|
|
@ -8,9 +8,12 @@ SET citus.next_shard_id TO 280000;
|
|||
\copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
|
||||
\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
|
||||
\copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
|
||||
\copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
|
||||
\copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
|
||||
\copy part_append FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('customer_append') AS shardid2 \gset
|
||||
copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid1);
|
||||
copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with (delimiter '|', append_to_shard :shardid2);
|
||||
SELECT master_create_empty_shard('part_append') AS shardid \gset
|
||||
copy part_append FROM '@abs_srcdir@/data/part.more.data' with (delimiter '|', append_to_shard :shardid);
|
||||
-- Exchange partition files in binary format in remaining tests
|
||||
ALTER SYSTEM SET citus.binary_worker_copy_format TO on;
|
||||
SELECT pg_reload_conf();
|
||||
|
@ -20,15 +23,15 @@ SELECT pg_reload_conf();
|
|||
(1 row)
|
||||
|
||||
SELECT success FROM run_command_on_workers('ALTER SYSTEM SET citus.binary_worker_copy_format TO on');
|
||||
success
|
||||
---------
|
||||
success
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
t
|
||||
(2 rows)
|
||||
|
||||
SELECT success FROM run_command_on_workers('SELECT pg_reload_conf()');
|
||||
success
|
||||
---------
|
||||
success
|
||||
---------------------------------------------------------------------
|
||||
t
|
||||
t
|
||||
(2 rows)
|
||||
|
|
|
@ -1,118 +0,0 @@
|
|||
//
|
||||
// How we organize this isolation test spec, is explained at README.md file in this directory.
|
||||
//
|
||||
|
||||
// create append distributed table to test behavior of COPY in concurrent operations
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
CREATE TABLE append_copy(id integer, data text, int_data int);
|
||||
SELECT create_distributed_table('append_copy', 'id', 'append');
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
teardown
|
||||
{
|
||||
DROP TABLE IF EXISTS append_copy CASCADE;
|
||||
}
|
||||
|
||||
// session 1
|
||||
session "s1"
|
||||
step "s1-initialize" { COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
|
||||
step "s1-begin" { BEGIN; }
|
||||
step "s1-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; }
|
||||
step "s1-copy-additional-column" { COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; }
|
||||
step "s1-router-select" { SELECT * FROM append_copy WHERE id = 1; }
|
||||
step "s1-real-time-select" { SELECT * FROM append_copy ORDER BY 1, 2; }
|
||||
step "s1-adaptive-select"
|
||||
{
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
}
|
||||
step "s1-insert" { INSERT INTO append_copy VALUES(0, 'k', 0); }
|
||||
step "s1-insert-select" { INSERT INTO append_copy SELECT * FROM append_copy; }
|
||||
step "s1-update" { UPDATE append_copy SET data = 'l' WHERE id = 0; }
|
||||
step "s1-delete" { DELETE FROM append_copy WHERE id = 1; }
|
||||
step "s1-truncate" { TRUNCATE append_copy; }
|
||||
step "s1-drop" { DROP TABLE append_copy; }
|
||||
step "s1-ddl-create-index" { CREATE INDEX append_copy_index ON append_copy(id); }
|
||||
step "s1-ddl-drop-index" { DROP INDEX append_copy_index; }
|
||||
step "s1-ddl-add-column" { ALTER TABLE append_copy ADD new_column int DEFAULT 0; }
|
||||
step "s1-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; }
|
||||
step "s1-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; }
|
||||
step "s1-table-size" { SELECT citus_total_relation_size('append_copy'); }
|
||||
step "s1-master-drop-all-shards" { SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); }
|
||||
step "s1-create-non-distributed-table" { CREATE TABLE append_copy(id integer, data text, int_data int); }
|
||||
step "s1-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); }
|
||||
step "s1-select-count" { SELECT COUNT(*) FROM append_copy; }
|
||||
step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); }
|
||||
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
|
||||
step "s1-commit" { COMMIT; }
|
||||
|
||||
// session 2
|
||||
session "s2"
|
||||
step "s2-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; }
|
||||
step "s2-router-select" { SELECT * FROM append_copy WHERE id = 1; }
|
||||
step "s2-real-time-select" { SELECT * FROM append_copy ORDER BY 1, 2; }
|
||||
step "s2-adaptive-select"
|
||||
{
|
||||
SET citus.enable_repartition_joins TO ON;
|
||||
SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
|
||||
}
|
||||
step "s2-insert" { INSERT INTO append_copy VALUES(0, 'k', 0); }
|
||||
step "s2-insert-select" { INSERT INTO append_copy SELECT * FROM append_copy; }
|
||||
step "s2-update" { UPDATE append_copy SET data = 'l' WHERE id = 0; }
|
||||
step "s2-delete" { DELETE FROM append_copy WHERE id = 1; }
|
||||
step "s2-truncate" { TRUNCATE append_copy; }
|
||||
step "s2-drop" { DROP TABLE append_copy; }
|
||||
step "s2-ddl-create-index" { CREATE INDEX append_copy_index ON append_copy(id); }
|
||||
step "s2-ddl-drop-index" { DROP INDEX append_copy_index; }
|
||||
step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY append_copy_index ON append_copy(id); }
|
||||
step "s2-ddl-add-column" { ALTER TABLE append_copy ADD new_column int DEFAULT 0; }
|
||||
step "s2-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; }
|
||||
step "s2-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; }
|
||||
step "s2-table-size" { SELECT citus_total_relation_size('append_copy'); }
|
||||
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); }
|
||||
step "s2-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); }
|
||||
|
||||
// permutations - COPY vs COPY
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count"
|
||||
|
||||
// permutations - COPY first
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-adaptive-select" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-truncate" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-drop" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes"
|
||||
permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-copy" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns"
|
||||
permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-copy-additional-column" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-table-size" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count"
|
||||
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count"
|
||||
|
||||
// permutations - COPY second
|
||||
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-adaptive-select" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-update" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-delete" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-drop" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes"
|
||||
permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes"
|
||||
permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns"
|
||||
permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns"
|
||||
permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns"
|
||||
permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s2-copy" "s1-commit" "s1-select-count"
|
||||
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-copy" "s1-commit" "s1-select-count"
|
|
@ -1,12 +1,15 @@
|
|||
setup
|
||||
{
|
||||
SET citus.next_shard_id TO 4080102;
|
||||
|
||||
CREATE TABLE table_to_append(id int);
|
||||
CREATE TABLE table_to_be_appended(id int);
|
||||
|
||||
SELECT create_distributed_table('table_to_append', 'id', 'append');
|
||||
SELECT master_create_empty_shard('table_to_append');
|
||||
INSERT INTO table_to_be_appended SELECT generate_series(1,1000);
|
||||
|
||||
COPY table_to_append FROM PROGRAM 'echo 0 && echo 7 && echo 8 && echo 9 && echo 10000';
|
||||
COPY table_to_append FROM PROGRAM 'echo 0 && echo 7 && echo 8 && echo 9 && echo 10000' WITH (append_to_shard 4080102);
|
||||
}
|
||||
|
||||
teardown
|
||||
|
|
|
@ -2,12 +2,17 @@
|
|||
// How we organize this isolation test spec, is explained at README.md file in this directory.
|
||||
//
|
||||
|
||||
// create append distributed table to test behavior of COPY in concurrent operations
|
||||
// create range distributed table to test behavior of COPY in concurrent operations
|
||||
setup
|
||||
{
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3004005;
|
||||
CREATE TABLE range_copy(id integer, data text, int_data int);
|
||||
SELECT create_distributed_table('range_copy', 'id', 'append');
|
||||
SELECT create_distributed_table('range_copy', 'id', 'range');
|
||||
SELECT master_create_empty_shard('range_copy');
|
||||
SELECT master_create_empty_shard('range_copy');
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006;
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
|
@ -76,7 +81,13 @@ step "s2-ddl-rename-column" { ALTER TABLE range_copy RENAME data TO new_column;
|
|||
step "s2-table-size" { SELECT citus_total_relation_size('range_copy'); }
|
||||
step "s2-master-modify-multiple-shards" { DELETE FROM range_copy; }
|
||||
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); }
|
||||
step "s2-distribute-table" { SELECT create_distributed_table('range_copy', 'id', 'range'); }
|
||||
step "s2-distribute-table" {
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 3004005;
|
||||
SELECT create_distributed_table('range_copy', 'id', 'range');
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006;
|
||||
}
|
||||
|
||||
// permutations - COPY vs COPY
|
||||
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count"
|
||||
|
|
|
@ -9,8 +9,10 @@ setup
|
|||
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 6780300;
|
||||
CREATE TABLE select_append(id integer, data text, int_data int);
|
||||
SELECT create_distributed_table('select_append', 'id', 'append');
|
||||
SELECT master_create_empty_shard('select_append');
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
|
@ -22,7 +24,7 @@ teardown
|
|||
|
||||
// session 1
|
||||
session "s1"
|
||||
step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
|
||||
step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard 6780300); }
|
||||
step "s1-begin" { BEGIN; }
|
||||
|
||||
step "s1-disable-binary-protocol" {
|
||||
|
|
|
@ -9,8 +9,10 @@ setup
|
|||
SELECT citus_internal.refresh_isolation_tester_prepared_statement();
|
||||
|
||||
SET citus.shard_replication_factor TO 1;
|
||||
SET citus.next_shard_id TO 5990340;
|
||||
CREATE TABLE truncate_append(id integer, data text);
|
||||
SELECT create_distributed_table('truncate_append', 'id', 'append');
|
||||
SELECT master_create_empty_shard('truncate_append');
|
||||
}
|
||||
|
||||
// drop distributed table
|
||||
|
@ -23,7 +25,7 @@ teardown
|
|||
|
||||
// session 1
|
||||
session "s1"
|
||||
step "s1-initialize" { COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; }
|
||||
step "s1-initialize" { COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard 5990340); }
|
||||
step "s1-begin" { BEGIN; }
|
||||
step "s1-truncate" { TRUNCATE truncate_append; }
|
||||
step "s1-drop" { DROP TABLE truncate_append; }
|
||||
|
|
|
@ -10,10 +10,8 @@
|
|||
/multi_behavioral_analytics_create_table_superuser.sql
|
||||
/multi_complex_count_distinct.sql
|
||||
/multi_copy.sql
|
||||
/multi_create_schema.sql
|
||||
/multi_load_data.sql
|
||||
/multi_load_data_superuser.sql
|
||||
/multi_load_large_records.sql
|
||||
/multi_load_more_data.sql
|
||||
/multi_mx_copy_data.sql
|
||||
/multi_outer_join.sql
|
||||
|
|
|
@ -63,10 +63,12 @@ ORDER BY 2, 3;
|
|||
-- here we update shardlength, shardminvalue and shardmaxvalue
|
||||
CREATE TABLE test_table_statistics_append (id int);
|
||||
SELECT create_distributed_table('test_table_statistics_append', 'id', 'append');
|
||||
COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH CSV;
|
||||
COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH CSV;
|
||||
SELECT master_create_empty_shard('test_table_statistics_append') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('test_table_statistics_append') AS shardid2 \gset
|
||||
COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH (format 'csv', append_to_shard :shardid1);
|
||||
COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH (format 'csv', append_to_shard :shardid2);
|
||||
|
||||
-- originally shardminvalue and shardmaxvalue will be 0,3 and 4, 7
|
||||
-- shardminvalue and shardmaxvalue are NULL
|
||||
SELECT
|
||||
ds.logicalrelid::regclass::text AS tablename,
|
||||
ds.shardid AS shardid,
|
||||
|
|
|
@ -85,7 +85,8 @@ FROM
|
|||
WHERE
|
||||
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
|
||||
'sensors_2001'::regclass, 'sensors_2002'::regclass,
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass);
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass)
|
||||
ORDER BY 1,2;
|
||||
|
||||
-- show that all the tables prune to the same shard for the same distribution key
|
||||
WITH
|
||||
|
@ -190,7 +191,8 @@ FROM
|
|||
WHERE
|
||||
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
|
||||
'sensors_2001'::regclass, 'sensors_2002'::regclass,
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass);
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass)
|
||||
ORDER BY 1,2;
|
||||
|
||||
\c - - - :worker_1_port
|
||||
SET search_path TO drop_column_partitioned_table;
|
||||
|
@ -201,7 +203,8 @@ FROM
|
|||
WHERE
|
||||
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
|
||||
'sensors_2001'::regclass, 'sensors_2002'::regclass,
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass);
|
||||
'sensors_2003'::regclass, 'sensors_2004'::regclass)
|
||||
ORDER BY 1,2;
|
||||
|
||||
\c - - - :master_port
|
||||
SET client_min_messages TO WARNING;
|
||||
|
|
|
@ -67,6 +67,7 @@ CREATE TABLE products_append (
|
|||
);
|
||||
|
||||
SELECT create_distributed_table('products_append', 'product_no', 'append');
|
||||
SELECT master_create_empty_shard('products_append') AS shardid \gset
|
||||
|
||||
-- Can only add primary key constraint on distribution column (or group
|
||||
-- of columns including distribution column)
|
||||
|
@ -75,7 +76,7 @@ ALTER TABLE products_append ADD CONSTRAINT p_key_name PRIMARY KEY(name);
|
|||
ALTER TABLE products_append ADD CONSTRAINT p_key PRIMARY KEY(product_no);
|
||||
|
||||
--- Error out since first and third rows have the same product_no
|
||||
\COPY products_append FROM STDIN DELIMITER AS ',';
|
||||
COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
|
||||
1, Product_1, 10
|
||||
2, Product_2, 15
|
||||
1, Product_3, 8
|
||||
|
@ -138,6 +139,7 @@ DROP TABLE unique_test_table_ref;
|
|||
-- Check "UNIQUE CONSTRAINT" with append table
|
||||
CREATE TABLE unique_test_table_append(id int, name varchar(20));
|
||||
SELECT create_distributed_table('unique_test_table_append', 'id', 'append');
|
||||
SELECT master_create_empty_shard('unique_test_table_append') AS shardid \gset
|
||||
|
||||
-- Can only add unique constraint on distribution column (or group
|
||||
-- of columns including distribution column)
|
||||
|
@ -146,7 +148,7 @@ ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_name UNIQUE(name);
|
|||
ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_id UNIQUE(id);
|
||||
|
||||
-- Error out. Table can not have two rows with the same id.
|
||||
\COPY unique_test_table_append FROM STDIN DELIMITER AS ',';
|
||||
COPY unique_test_table_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
|
||||
1, Product_1
|
||||
2, Product_2
|
||||
1, Product_3
|
||||
|
@ -207,13 +209,14 @@ CREATE TABLE products_append (
|
|||
);
|
||||
|
||||
SELECT create_distributed_table('products_append', 'product_no', 'append');
|
||||
SELECT master_create_empty_shard('products_append') AS shardid \gset
|
||||
|
||||
-- Can add column and table check constraints
|
||||
ALTER TABLE products_append ADD CONSTRAINT p_check CHECK(price > 0);
|
||||
ALTER TABLE products_append ADD CONSTRAINT p_multi_check CHECK(price > discounted_price);
|
||||
|
||||
-- Error out,since the third row conflicting with the p_multi_check
|
||||
\COPY products_append FROM STDIN DELIMITER AS ',';
|
||||
COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
|
||||
1, Product_1, 10, 5
|
||||
2, Product_2, 15, 8
|
||||
1, Product_3, 8, 10
|
||||
|
@ -277,6 +280,7 @@ CREATE TABLE products_append (
|
|||
);
|
||||
|
||||
SELECT create_distributed_table('products_append', 'product_no','append');
|
||||
SELECT master_create_empty_shard('products_append') AS shardid \gset
|
||||
|
||||
-- Can only add exclusion constraint on distribution column (or group of column
|
||||
-- including distribution column)
|
||||
|
@ -285,7 +289,7 @@ ALTER TABLE products_append ADD CONSTRAINT exc_name EXCLUDE USING btree (name wi
|
|||
ALTER TABLE products_append ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =);
|
||||
|
||||
-- Error out since first and third can not pass the exclusion check.
|
||||
\COPY products_append FROM STDIN DELIMITER AS ',';
|
||||
COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
|
||||
1, Product_1, 10
|
||||
1, Product_2, 15
|
||||
1, Product_1, 8
|
||||
|
@ -335,11 +339,12 @@ CREATE TABLE products_append (
|
|||
);
|
||||
|
||||
SELECT create_distributed_table('products_append', 'product_no', 'append');
|
||||
SELECT master_create_empty_shard('products_append') AS shardid \gset
|
||||
|
||||
ALTER TABLE products_append ALTER COLUMN name SET NOT NULL;
|
||||
|
||||
-- Error out since name and product_no columns can not handle NULL value.
|
||||
\COPY products_append FROM STDIN DELIMITER AS ',';
|
||||
COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
|
||||
1, \N, 10
|
||||
\N, Product_2, 15
|
||||
1, Product_1, 8
|
||||
|
|
|
@ -98,6 +98,7 @@ CREATE TABLE customer_append (
|
|||
c_mktsegment char(10) not null,
|
||||
c_comment varchar(117) not null);
|
||||
SELECT create_distributed_table('customer_append', 'c_custkey', 'append');
|
||||
SELECT master_create_empty_shard('customer_append');
|
||||
|
||||
CREATE TABLE nation (
|
||||
n_nationkey integer not null,
|
||||
|
@ -130,6 +131,7 @@ CREATE TABLE part_append (
|
|||
p_retailprice decimal(15,2) not null,
|
||||
p_comment varchar(23) not null);
|
||||
SELECT create_distributed_table('part_append', 'p_partkey', 'append');
|
||||
SELECT master_create_empty_shard('part_append');
|
||||
|
||||
CREATE TABLE supplier
|
||||
(
|
||||
|
|
|
@ -4,24 +4,35 @@
|
|||
|
||||
-- This test checks that we can handle null min/max values in shard statistics
|
||||
-- and that we don't partition or join prune shards that have null values.
|
||||
CREATE SCHEMA multi_null_minmax_value_pruning;
|
||||
SET search_path TO multi_null_minmax_value_pruning;
|
||||
|
||||
|
||||
SET client_min_messages TO DEBUG2;
|
||||
SET citus.explain_all_tasks TO on;
|
||||
-- to avoid differing explain output - executor doesn't matter,
|
||||
-- because were testing pruning here.
|
||||
|
||||
-- Change configuration to treat lineitem and orders tables as large
|
||||
|
||||
SET citus.log_multi_join_order to true;
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000;
|
||||
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
|
||||
SET citus.next_shard_id = 290000;
|
||||
|
||||
CREATE TABLE lineitem (LIKE public.lineitem);
|
||||
SELECT create_distributed_table('lineitem', 'l_orderkey', 'range');
|
||||
SELECT master_create_empty_shard('lineitem') as lineitem_shardid1 \gset
|
||||
SELECT master_create_empty_shard('lineitem') as lineitem_shardid2 \gset
|
||||
|
||||
CREATE TABLE orders (LIKE public.orders);
|
||||
SELECT create_distributed_table('orders', 'o_orderkey', 'range');
|
||||
SELECT master_create_empty_shard('orders') as orders_shardid1 \gset
|
||||
SELECT master_create_empty_shard('orders') as orders_shardid2 \gset
|
||||
|
||||
SET client_min_messages TO DEBUG2;
|
||||
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '6000' WHERE shardid = :lineitem_shardid1 OR shardid = :orders_shardid1;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '6001', shardmaxvalue = '20000' WHERE shardid = :lineitem_shardid2 OR shardid = :orders_shardid2;
|
||||
UPDATE pg_dist_partition SET colocationid = 87091 WHERE logicalrelid = 'orders'::regclass OR logicalrelid = 'lineitem'::regclass;
|
||||
|
||||
-- Check that partition and join pruning works when min/max values exist
|
||||
-- Adding l_orderkey = 1 to make the query not router executable
|
||||
SELECT coordinator_plan($Q$
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
|
||||
$Q$);
|
||||
|
@ -34,9 +45,9 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
-- partition or join pruning for the shard with null min value. Since it is not
|
||||
-- supported with single-repartition join, dual-repartition has been used.
|
||||
|
||||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000;
|
||||
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = :lineitem_shardid1;
|
||||
|
||||
SELECT coordinator_plan($Q$
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
$Q$);
|
||||
|
@ -49,9 +60,9 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
-- don't apply partition or join pruning for this other shard either. Since it
|
||||
-- is not supported with single-repartition join, dual-repartition has been used.
|
||||
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = :lineitem_shardid2;
|
||||
|
||||
SELECT coordinator_plan($Q$
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
$Q$);
|
||||
|
@ -64,9 +75,9 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
|||
-- should apply partition and join pruning for this shard now. Since it is not
|
||||
-- supported with single-repartition join, dual-repartition has been used.
|
||||
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = :lineitem_shardid1;
|
||||
|
||||
SELECT coordinator_plan($Q$
|
||||
SELECT public.coordinator_plan($Q$
|
||||
EXPLAIN (COSTS FALSE)
|
||||
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
|
||||
$Q$);
|
||||
|
@ -75,9 +86,5 @@ EXPLAIN (COSTS FALSE)
|
|||
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
|
||||
WHERE l_partkey = o_custkey;
|
||||
|
||||
-- Set minimum and maximum values for two shards back to their original values
|
||||
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000;
|
||||
UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001;
|
||||
|
||||
SET client_min_messages TO NOTICE;
|
||||
RESET client_min_messages;
|
||||
DROP SCHEMA multi_null_minmax_value_pruning CASCADE;
|
||||
|
|
|
@ -52,14 +52,14 @@ FROM
|
|||
orders, customer_append
|
||||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
c_custkey < 0;
|
||||
c_custkey < 0 AND c_custkey > 0;
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
orders, customer_append
|
||||
WHERE
|
||||
o_custkey = c_custkey AND
|
||||
c_custkey < 0;
|
||||
c_custkey < 0 AND c_custkey > 0;
|
||||
|
||||
-- Dual hash-repartition join test case. Note that this query doesn't produce
|
||||
-- meaningful results and is only to test hash-partitioning of two large tables
|
||||
|
|
|
@ -82,9 +82,10 @@ CREATE TABLE nation_append_search_path(
|
|||
n_regionkey integer not null,
|
||||
n_comment varchar(152)
|
||||
);
|
||||
SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append');
|
||||
SELECT create_distributed_table('nation_append_search_path', 'n_nationkey', 'append');
|
||||
SELECT master_create_empty_shard('nation_append_search_path') AS shardid \gset
|
||||
|
||||
\copy nation_append_search_path FROM STDIN with delimiter '|';
|
||||
copy nation_append_search_path FROM STDIN with (delimiter '|', append_to_shard :shardid);
|
||||
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
|
||||
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
|
||||
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
--
|
||||
-- NON_COLOCATED_JOIN_ORDER
|
||||
--
|
||||
|
||||
-- Tests to check placements of shards must be equal to choose local join logic.
|
||||
|
||||
CREATE TABLE test_table_1(id int, value_1 int);
|
||||
SELECT master_create_distributed_table('test_table_1', 'id', 'append');
|
||||
|
||||
\copy test_table_1 FROM STDIN DELIMITER ','
|
||||
1,2
|
||||
2,3
|
||||
3,4
|
||||
\.
|
||||
|
||||
\copy test_table_1 FROM STDIN DELIMITER ','
|
||||
5,2
|
||||
6,3
|
||||
7,4
|
||||
\.
|
||||
|
||||
CREATE TABLE test_table_2(id int, value_1 int);
|
||||
SELECT master_create_distributed_table('test_table_2', 'id', 'append');
|
||||
|
||||
\copy test_table_2 FROM STDIN DELIMITER ','
|
||||
1,2
|
||||
2,3
|
||||
3,4
|
||||
\.
|
||||
|
||||
\copy test_table_2 FROM STDIN DELIMITER ','
|
||||
5,2
|
||||
6,3
|
||||
7,4
|
||||
\.
|
||||
|
||||
SET citus.log_multi_join_order to TRUE;
|
||||
SET client_min_messages to DEBUG1;
|
||||
SET citus.enable_repartition_joins TO on;
|
||||
|
||||
-- when joining append tables we always get dual re-partition joins
|
||||
SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id;
|
||||
|
||||
-- Add two shards placement of interval [8,10] to test_table_1
|
||||
SET citus.shard_replication_factor to 2;
|
||||
|
||||
\copy test_table_1 FROM STDIN DELIMITER ','
|
||||
8,2
|
||||
9,3
|
||||
10,4
|
||||
\.
|
||||
|
||||
-- Add two shards placement of interval [8,10] to test_table_2
|
||||
SET citus.shard_replication_factor to 1;
|
||||
|
||||
\copy test_table_2 FROM STDIN DELIMITER ','
|
||||
8,2
|
||||
9,3
|
||||
10,4
|
||||
\.
|
||||
|
||||
-- Although shard interval of relation are same, since they have different amount of placements
|
||||
-- for interval [8,10] repartition join logic will be triggered.
|
||||
SET citus.enable_repartition_joins to ON;
|
||||
SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id;
|
||||
|
||||
SET client_min_messages TO default;
|
||||
|
||||
DROP TABLE test_table_1;
|
||||
DROP TABLE test_table_2;
|
|
@ -365,6 +365,13 @@ BEGIN;
|
|||
INSERT INTO test SELECT i,i FROM generate_series(0,100)i;
|
||||
ROLLBACK;
|
||||
|
||||
-- master_create_empty_shard on coordinator
|
||||
BEGIN;
|
||||
CREATE TABLE append_table (a INT, b INT);
|
||||
SELECT create_distributed_table('append_table','a','append');
|
||||
SELECT master_create_empty_shard('append_table');
|
||||
END;
|
||||
|
||||
-- alter table inside a tx block
|
||||
BEGIN;
|
||||
ALTER TABLE test ADD COLUMN z single_node.new_type;
|
||||
|
|
|
@ -5,14 +5,15 @@ CREATE TABLE append_table (key text, value int, extra int default 0);
|
|||
CREATE INDEX ON append_table (key);
|
||||
|
||||
SELECT create_distributed_table('append_table', 'key', 'append');
|
||||
SELECT 1 FROM master_create_empty_shard('append_table');
|
||||
SELECT 1 FROM master_create_empty_shard('append_table');
|
||||
SELECT master_create_empty_shard('append_table') AS shardid1 \gset
|
||||
SELECT master_create_empty_shard('append_table') AS shardid2 \gset
|
||||
SELECT master_create_empty_shard('append_table') AS shardid3 \gset
|
||||
|
||||
CREATE TABLE ref_table (value int);
|
||||
CREATE INDEX ON ref_table (value);
|
||||
SELECT create_reference_table('ref_table');
|
||||
|
||||
\COPY append_table (key,value) FROM STDIN WITH CSV
|
||||
COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid1);
|
||||
abc,234
|
||||
bcd,123
|
||||
bcd,234
|
||||
|
@ -21,7 +22,7 @@ def,456
|
|||
efg,234
|
||||
\.
|
||||
|
||||
\COPY append_table (key,value) FROM STDIN WITH CSV
|
||||
COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid2);
|
||||
abc,123
|
||||
efg,123
|
||||
hij,123
|
||||
|
@ -30,7 +31,7 @@ ijk,1
|
|||
jkl,0
|
||||
\.
|
||||
|
||||
\COPY ref_table FROM STDIN WITH CSV
|
||||
COPY ref_table FROM STDIN WITH CSV;
|
||||
123
|
||||
234
|
||||
345
|
||||
|
|
|
@ -99,22 +99,24 @@ INSERT INTO t3 VALUES (3, 33);
|
|||
SELECT * FROM t3 ORDER BY a;
|
||||
|
||||
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
|
||||
WHERE logicalrelid = 't_append'::regclass
|
||||
WHERE logicalrelid = 't_range'::regclass
|
||||
ORDER BY shardminvalue, shardmaxvalue;
|
||||
|
||||
SELECT * FROM t_append ORDER BY id;
|
||||
SELECT * FROM t_range ORDER BY id;
|
||||
|
||||
\copy t_append FROM STDIN DELIMITER ','
|
||||
SELECT master_create_empty_shard('t_range') AS new_shard_id \gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = '9', shardmaxvalue = '11' WHERE shardid = :new_shard_id;
|
||||
\copy t_range FROM STDIN with (DELIMITER ',')
|
||||
9,2
|
||||
10,3
|
||||
11,4
|
||||
\.
|
||||
|
||||
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
|
||||
WHERE logicalrelid = 't_append'::regclass
|
||||
WHERE logicalrelid = 't_range'::regclass
|
||||
ORDER BY shardminvalue, shardmaxvalue;
|
||||
|
||||
SELECT * FROM t_append ORDER BY id;
|
||||
SELECT * FROM t_range ORDER BY id;
|
||||
|
||||
|
||||
ROLLBACK;
|
||||
|
|
|
@ -50,16 +50,20 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex
|
|||
RETURNS void
|
||||
AS 'citus', $$master_create_worker_shards$$
|
||||
LANGUAGE C STRICT;
|
||||
CREATE TABLE t_append(id int, value_1 int);
|
||||
SELECT master_create_distributed_table('t_append', 'id', 'append');
|
||||
CREATE TABLE t_range(id int, value_1 int);
|
||||
SELECT create_distributed_table('t_range', 'id', 'range');
|
||||
SELECT master_create_empty_shard('t_range') as shardid1 \gset
|
||||
SELECT master_create_empty_shard('t_range') as shardid2 \gset
|
||||
UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '3' WHERE shardid = :shardid1;
|
||||
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '7' WHERE shardid = :shardid2;
|
||||
|
||||
\copy t_append FROM STDIN DELIMITER ','
|
||||
\copy t_range FROM STDIN with (DELIMITER ',')
|
||||
1,2
|
||||
2,3
|
||||
3,4
|
||||
\.
|
||||
|
||||
\copy t_append FROM STDIN DELIMITER ','
|
||||
\copy t_range FROM STDIN with (DELIMITER ',')
|
||||
5,2
|
||||
6,3
|
||||
7,4
|
||||
|
|
Loading…
Reference in New Issue