Merge pull request #5399 from citusdata/marcocitus/remove-append-copy

pull/5444/head
Marco Slot 2021-11-07 21:09:26 +01:00 committed by GitHub
commit ee0cd75648
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
63 changed files with 1649 additions and 2983 deletions

View File

@ -117,6 +117,9 @@
/* constant used in binary protocol */ /* constant used in binary protocol */
static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0"; static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0";
/* custom Citus option for appending to a shard */
#define APPEND_TO_SHARD_OPTION "append_to_shard"
/* /*
* Data size threshold to switch over the active placement for a connection. * Data size threshold to switch over the active placement for a connection.
* If this is too low, overhead of starting COPY commands will hurt the * If this is too low, overhead of starting COPY commands will hurt the
@ -239,11 +242,6 @@ typedef enum LocalCopyStatus
/* Local functions forward declarations */ /* Local functions forward declarations */
static void CopyToExistingShards(CopyStmt *copyStatement, static void CopyToExistingShards(CopyStmt *copyStatement,
QueryCompletionCompat *completionTag); QueryCompletionCompat *completionTag);
static void CopyToNewShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag,
Oid relationId);
static void OpenCopyConnectionsForNewShards(CopyStmt *copyStatement,
ShardConnections *shardConnections,
bool useBinaryCopyFormat);
static List * RemoveOptionFromList(List *optionList, char *optionName); static List * RemoveOptionFromList(List *optionList, char *optionName);
static bool BinaryOutputFunctionDefined(Oid typeId); static bool BinaryOutputFunctionDefined(Oid typeId);
static bool BinaryInputFunctionDefined(Oid typeId); static bool BinaryInputFunctionDefined(Oid typeId);
@ -257,9 +255,6 @@ static void SendCopyDataToPlacement(StringInfo dataBuffer, int64 shardId,
MultiConnection *connection); MultiConnection *connection);
static void ReportCopyError(MultiConnection *connection, PGresult *result); static void ReportCopyError(MultiConnection *connection, PGresult *result);
static uint32 AvailableColumnCount(TupleDesc tupleDescriptor); static uint32 AvailableColumnCount(TupleDesc tupleDescriptor);
static int64 StartCopyToNewShard(ShardConnections *shardConnections,
CopyStmt *copyStatement, bool useBinaryCopyFormat);
static int64 CreateEmptyShard(char *relationName);
static Oid TypeForColumnName(Oid relationId, TupleDesc tupleDescriptor, char *columnName); static Oid TypeForColumnName(Oid relationId, TupleDesc tupleDescriptor, char *columnName);
static Oid * TypeArrayFromTupleDescriptor(TupleDesc tupleDescriptor); static Oid * TypeArrayFromTupleDescriptor(TupleDesc tupleDescriptor);
@ -332,6 +327,7 @@ static void RemovePlacementStateFromCopyConnectionStateBuffer(CopyConnectionStat
connectionState, connectionState,
CopyPlacementState * CopyPlacementState *
placementState); placementState);
static uint64 ProcessAppendToShardOption(Oid relationId, CopyStmt *copyStatement);
static uint64 ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, static uint64 ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues,
bool *columnNulls); bool *columnNulls);
@ -403,14 +399,11 @@ CitusCopyFrom(CopyStmt *copyStatement, QueryCompletionCompat *completionTag)
if (IsCitusTableTypeCacheEntry(cacheEntry, HASH_DISTRIBUTED) || if (IsCitusTableTypeCacheEntry(cacheEntry, HASH_DISTRIBUTED) ||
IsCitusTableTypeCacheEntry(cacheEntry, RANGE_DISTRIBUTED) || IsCitusTableTypeCacheEntry(cacheEntry, RANGE_DISTRIBUTED) ||
IsCitusTableTypeCacheEntry(cacheEntry, APPEND_DISTRIBUTED) ||
IsCitusTableTypeCacheEntry(cacheEntry, CITUS_TABLE_WITH_NO_DIST_KEY)) IsCitusTableTypeCacheEntry(cacheEntry, CITUS_TABLE_WITH_NO_DIST_KEY))
{ {
CopyToExistingShards(copyStatement, completionTag); CopyToExistingShards(copyStatement, completionTag);
} }
else if (IsCitusTableTypeCacheEntry(cacheEntry, APPEND_DISTRIBUTED))
{
CopyToNewShards(copyStatement, completionTag, relationId);
}
else else
{ {
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@ -508,6 +501,14 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(tableId, columnNameList, CitusCopyDestReceiver *copyDest = CreateCitusCopyDestReceiver(tableId, columnNameList,
partitionColumnIndex, partitionColumnIndex,
executorState, NULL); executorState, NULL);
/* if the user specified an explicit append-to_shard option, write to it */
uint64 appendShardId = ProcessAppendToShardOption(tableId, copyStatement);
if (appendShardId != INVALID_SHARD_ID)
{
copyDest->appendShardId = appendShardId;
}
DestReceiver *dest = (DestReceiver *) copyDest; DestReceiver *dest = (DestReceiver *) copyDest;
dest->rStartup(dest, 0, tupleDescriptor); dest->rStartup(dest, 0, tupleDescriptor);
@ -609,196 +610,6 @@ CopyToExistingShards(CopyStmt *copyStatement, QueryCompletionCompat *completionT
} }
/*
* CopyToNewShards implements the COPY table_name FROM ... for append-partitioned
* tables where we create new shards into which to copy rows.
*/
static void
CopyToNewShards(CopyStmt *copyStatement, QueryCompletionCompat *completionTag, Oid
relationId)
{
/* allocate column values and nulls arrays */
Relation distributedRelation = table_open(relationId, RowExclusiveLock);
TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation);
uint32 columnCount = tupleDescriptor->natts;
Datum *columnValues = palloc0(columnCount * sizeof(Datum));
bool *columnNulls = palloc0(columnCount * sizeof(bool));
EState *executorState = CreateExecutorState();
MemoryContext executorTupleContext = GetPerTupleMemoryContext(executorState);
ExprContext *executorExpressionContext = GetPerTupleExprContext(executorState);
const char *delimiterCharacter = "\t";
const char *nullPrintCharacter = "\\N";
ErrorContextCallback errorCallback;
int64 currentShardId = INVALID_SHARD_ID;
uint64 shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L;
uint64 copiedDataSizeInBytes = 0;
uint64 processedRowCount = 0;
ShardConnections *shardConnections =
(ShardConnections *) palloc0(sizeof(ShardConnections));
/* initialize copy state to read from COPY data source */
CopyFromState copyState = BeginCopyFrom_compat(NULL,
distributedRelation,
NULL,
copyStatement->filename,
copyStatement->is_program,
NULL,
copyStatement->attlist,
copyStatement->options);
CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData));
copyOutState->delim = (char *) delimiterCharacter;
copyOutState->null_print = (char *) nullPrintCharacter;
copyOutState->null_print_client = (char *) nullPrintCharacter;
copyOutState->binary = CanUseBinaryCopyFormat(tupleDescriptor);
copyOutState->fe_msgbuf = makeStringInfo();
copyOutState->rowcontext = executorTupleContext;
FmgrInfo *columnOutputFunctions = ColumnOutputFunctions(tupleDescriptor,
copyOutState->binary);
/* set up callback to identify error line number */
errorCallback.callback = CopyFromErrorCallback;
errorCallback.arg = (void *) copyState;
errorCallback.previous = error_context_stack;
/*
* From here on we use copyStatement as the template for the command
* that we send to workers. This command does not have an attribute
* list since NextCopyFrom will generate a value for all columns.
* We also strip options.
*/
copyStatement->attlist = NIL;
copyStatement->options = NIL;
if (copyOutState->binary)
{
DefElem *binaryFormatOption =
makeDefElem("format", (Node *) makeString("binary"), -1);
copyStatement->options = lappend(copyStatement->options, binaryFormatOption);
}
while (true)
{
ResetPerTupleExprContext(executorState);
/* switch to tuple memory context and start showing line number in errors */
error_context_stack = &errorCallback;
MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext);
/* parse a row from the input */
bool nextRowFound = NextCopyFromCompat(copyState, executorExpressionContext,
columnValues, columnNulls);
if (!nextRowFound)
{
/* switch to regular memory context and stop showing line number in errors */
MemoryContextSwitchTo(oldContext);
error_context_stack = errorCallback.previous;
break;
}
CHECK_FOR_INTERRUPTS();
/* switch to regular memory context and stop showing line number in errors */
MemoryContextSwitchTo(oldContext);
error_context_stack = errorCallback.previous;
/*
* If copied data size is zero, this means either this is the first
* line in the copy or we just filled the previous shard up to its
* capacity. Either way, we need to create a new shard and
* start copying new rows into it.
*/
if (copiedDataSizeInBytes == 0)
{
/* create shard and open connections to shard placements */
currentShardId = StartCopyToNewShard(shardConnections, copyStatement,
copyOutState->binary);
/* send copy binary headers to shard placements */
if (copyOutState->binary)
{
SendCopyBinaryHeaders(copyOutState, currentShardId,
shardConnections->connectionList);
}
}
/* replicate row to shard placements */
resetStringInfo(copyOutState->fe_msgbuf);
AppendCopyRowData(columnValues, columnNulls, tupleDescriptor,
copyOutState, columnOutputFunctions, NULL);
SendCopyDataToAll(copyOutState->fe_msgbuf, currentShardId,
shardConnections->connectionList);
uint64 messageBufferSize = copyOutState->fe_msgbuf->len;
copiedDataSizeInBytes = copiedDataSizeInBytes + messageBufferSize;
/*
* If we filled up this shard to its capacity, send copy binary footers
* to shard placements, and update shard statistics.
*/
if (copiedDataSizeInBytes > shardMaxSizeInBytes)
{
Assert(currentShardId != INVALID_SHARD_ID);
if (copyOutState->binary)
{
SendCopyBinaryFooters(copyOutState, currentShardId,
shardConnections->connectionList);
}
EndRemoteCopy(currentShardId, shardConnections->connectionList);
UpdateShardStatistics(shardConnections->shardId);
copiedDataSizeInBytes = 0;
currentShardId = INVALID_SHARD_ID;
}
processedRowCount += 1;
#if PG_VERSION_NUM >= PG_VERSION_14
pgstat_progress_update_param(PROGRESS_COPY_TUPLES_PROCESSED, processedRowCount);
#endif
}
/*
* For the last shard, send copy binary footers to shard placements,
* and update shard statistics. If no row is send, there is no shard
* to finalize the copy command.
*/
if (copiedDataSizeInBytes > 0)
{
Assert(currentShardId != INVALID_SHARD_ID);
if (copyOutState->binary)
{
SendCopyBinaryFooters(copyOutState, currentShardId,
shardConnections->connectionList);
}
EndRemoteCopy(currentShardId, shardConnections->connectionList);
UpdateShardStatistics(shardConnections->shardId);
}
EndCopyFrom(copyState);
table_close(distributedRelation, NoLock);
/* check for cancellation one last time before returning */
CHECK_FOR_INTERRUPTS();
if (completionTag != NULL)
{
CompleteCopyQueryTagCompat(completionTag, processedRowCount);
}
}
static void static void
CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount) CompleteCopyQueryTagCompat(QueryCompletionCompat *completionTag, uint64 processedRowCount)
{ {
@ -839,105 +650,6 @@ RemoveOptionFromList(List *optionList, char *optionName)
} }
/*
* OpenCopyConnectionsForNewShards opens a connection for each placement of a shard and
* starts a COPY transaction if necessary. If a connection cannot be opened,
* then the transaction is rollbacked.
*/
static void
OpenCopyConnectionsForNewShards(CopyStmt *copyStatement,
ShardConnections *shardConnections,
bool useBinaryCopyFormat)
{
int failedPlacementCount = 0;
ListCell *placementCell = NULL;
List *connectionList = NULL;
int64 shardId = shardConnections->shardId;
bool raiseInterrupts = true;
MemoryContext localContext =
AllocSetContextCreateExtended(CurrentMemoryContext,
"OpenCopyConnectionsForNewShards",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/* release active placement list at the end of this function */
MemoryContext oldContext = MemoryContextSwitchTo(localContext);
List *activePlacementList = ActiveShardPlacementList(shardId);
MemoryContextSwitchTo(oldContext);
foreach(placementCell, activePlacementList)
{
ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell);
char *nodeUser = CurrentUserName();
uint32 connectionFlags = FOR_DML;
/*
* For hash partitioned tables, connection establishment happens in
* CopyGetPlacementConnection().
*/
Assert(placement->partitionMethod != DISTRIBUTE_BY_HASH);
MultiConnection *connection = GetPlacementConnection(connectionFlags, placement,
nodeUser);
/*
* This code-path doesn't support optional connections, so we don't expect
* NULL connections.
*/
Assert(connection != NULL);
if (PQstatus(connection->pgConn) != CONNECTION_OK)
{
ReportConnectionError(connection, ERROR);
}
/*
* Errors are supposed to cause immediate aborts (i.e. we don't
* want to/can't invalidate placements), mark the connection as
* critical so later errors cause failures.
*/
MarkRemoteTransactionCritical(connection);
ClaimConnectionExclusively(connection);
RemoteTransactionBeginIfNecessary(connection);
StringInfo copyCommand = ConstructCopyStatement(copyStatement,
shardConnections->shardId);
if (!SendRemoteCommand(connection, copyCommand->data))
{
ReportConnectionError(connection, ERROR);
}
PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts);
if (PQresultStatus(result) != PGRES_COPY_IN)
{
ReportResultError(connection, result, ERROR);
}
PQclear(result);
connectionList = lappend(connectionList, connection);
}
/* if all placements failed, error out */
if (failedPlacementCount == list_length(activePlacementList))
{
ereport(ERROR, (errmsg("could not connect to any active placements")));
}
/*
* We should just error out and code execution should
* never reach to this point. This is the case for all tables.
*/
Assert(failedPlacementCount == 0);
shardConnections->connectionList = connectionList;
MemoryContextReset(localContext);
}
/* /*
* CanUseBinaryCopyFormat iterates over columns of the relation and looks for a * CanUseBinaryCopyFormat iterates over columns of the relation and looks for a
* column whose type is array of user-defined type or composite type. If it finds * column whose type is array of user-defined type or composite type. If it finds
@ -1830,48 +1542,6 @@ AppendCopyBinaryFooters(CopyOutState footerOutputState)
} }
/*
* StartCopyToNewShard creates a new shard and related shard placements and
* opens connections to shard placements.
*/
static int64
StartCopyToNewShard(ShardConnections *shardConnections, CopyStmt *copyStatement,
bool useBinaryCopyFormat)
{
char *relationName = copyStatement->relation->relname;
char *schemaName = copyStatement->relation->schemaname;
char *qualifiedName = quote_qualified_identifier(schemaName, relationName);
int64 shardId = CreateEmptyShard(qualifiedName);
shardConnections->shardId = shardId;
shardConnections->connectionList = NIL;
/* connect to shards placements and start transactions */
OpenCopyConnectionsForNewShards(copyStatement, shardConnections,
useBinaryCopyFormat);
return shardId;
}
/*
* CreateEmptyShard creates a new shard and related shard placements from the
* local master node.
*/
static int64
CreateEmptyShard(char *relationName)
{
text *relationNameText = cstring_to_text(relationName);
Datum relationNameDatum = PointerGetDatum(relationNameText);
Datum shardIdDatum = DirectFunctionCall1(master_create_empty_shard,
relationNameDatum);
int64 shardId = DatumGetInt64(shardIdDatum);
return shardId;
}
/* *INDENT-OFF* */ /* *INDENT-OFF* */
@ -2283,8 +1953,10 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
} }
/* error if any shard missing min/max values */ /* error if any shard missing min/max values */
if (IsCitusTableTypeCacheEntry(cacheEntry, DISTRIBUTED_TABLE) && if (cacheEntry->hasUninitializedShardInterval)
cacheEntry->hasUninitializedShardInterval) {
if (IsCitusTableTypeCacheEntry(cacheEntry, HASH_DISTRIBUTED) ||
IsCitusTableTypeCacheEntry(cacheEntry, RANGE_DISTRIBUTED))
{ {
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not start copy"), errmsg("could not start copy"),
@ -2292,6 +1964,7 @@ CitusCopyDestReceiverStartup(DestReceiver *dest, int operation,
"with missing shardminvalue/shardmaxvalue.", "with missing shardminvalue/shardmaxvalue.",
relationName))); relationName)));
} }
}
/* prevent concurrent placement changes and non-commutative DML statements */ /* prevent concurrent placement changes and non-commutative DML statements */
LockShardListMetadata(shardIntervalList, ShareLock); LockShardListMetadata(shardIntervalList, ShareLock);
@ -2670,6 +2343,58 @@ RemovePlacementStateFromCopyConnectionStateBuffer(CopyConnectionState *connectio
} }
/*
* ProcessAppendToShardOption returns the value of append_to_shard if set,
* and removes the option from the options list.
*/
static uint64
ProcessAppendToShardOption(Oid relationId, CopyStmt *copyStatement)
{
uint64 appendShardId = INVALID_SHARD_ID;
bool appendToShardSet = false;
DefElem *defel = NULL;
foreach_ptr(defel, copyStatement->options)
{
if (strncmp(defel->defname, APPEND_TO_SHARD_OPTION, NAMEDATALEN) == 0)
{
appendShardId = defGetInt64(defel);
appendToShardSet = true;
break;
}
}
if (appendToShardSet)
{
if (!IsCitusTableType(relationId, APPEND_DISTRIBUTED))
{
ereport(ERROR, (errmsg(APPEND_TO_SHARD_OPTION " is only valid for "
"append-distributed tables")));
}
/* throws an error if shard does not exist */
ShardInterval *shardInterval = LoadShardInterval(appendShardId);
/* also check whether shard belongs to table */
if (shardInterval->relationId != relationId)
{
ereport(ERROR, (errmsg("shard " UINT64_FORMAT " does not belong to table %s",
appendShardId, get_rel_name(relationId))));
}
copyStatement->options =
RemoveOptionFromList(copyStatement->options, APPEND_TO_SHARD_OPTION);
}
else if (IsCitusTableType(relationId, APPEND_DISTRIBUTED))
{
ereport(ERROR, (errmsg("COPY into append-distributed table requires using the "
APPEND_TO_SHARD_OPTION " option")));
}
return appendShardId;
}
/* /*
* ContainsLocalPlacement returns true if the current node has * ContainsLocalPlacement returns true if the current node has
* a local placement for the given shard id. * a local placement for the given shard id.
@ -2703,6 +2428,13 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu
int partitionColumnIndex = copyDest->partitionColumnIndex; int partitionColumnIndex = copyDest->partitionColumnIndex;
Datum partitionColumnValue = 0; Datum partitionColumnValue = 0;
CopyCoercionData *columnCoercionPaths = copyDest->columnCoercionPaths; CopyCoercionData *columnCoercionPaths = copyDest->columnCoercionPaths;
CitusTableCacheEntry *cacheEntry =
GetCitusTableCacheEntry(copyDest->distributedRelationId);
if (IsCitusTableTypeCacheEntry(cacheEntry, APPEND_DISTRIBUTED))
{
return copyDest->appendShardId;
}
/* /*
* Find the partition column value and corresponding shard interval * Find the partition column value and corresponding shard interval
@ -2743,8 +2475,6 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu
* For reference table, this function blindly returns the tables single * For reference table, this function blindly returns the tables single
* shard. * shard.
*/ */
CitusTableCacheEntry *cacheEntry =
GetCitusTableCacheEntry(copyDest->distributedRelationId);
ShardInterval *shardInterval = FindShardInterval(partitionColumnValue, cacheEntry); ShardInterval *shardInterval = FindShardInterval(partitionColumnValue, cacheEntry);
if (shardInterval == NULL) if (shardInterval == NULL)
{ {
@ -3230,6 +2960,7 @@ CitusCopyTo(CopyStmt *copyStatement, QueryCompletionCompat *completionTag)
if (shardIntervalCell == list_head(shardIntervalList)) if (shardIntervalCell == list_head(shardIntervalList))
{ {
/* remove header after the first shard */ /* remove header after the first shard */
copyStatement->options =
RemoveOptionFromList(copyStatement->options, "header"); RemoveOptionFromList(copyStatement->options, "header");
} }
} }

View File

@ -140,6 +140,12 @@ typedef struct CitusCopyDestReceiver
* files as if they are shards. * files as if they are shards.
*/ */
char *colocatedIntermediateResultIdPrefix; char *colocatedIntermediateResultIdPrefix;
/*
* When copying into append-partitioned tables, the destination shard is chosen
* upfront.
*/
uint64 appendShardId;
} CitusCopyDestReceiver; } CitusCopyDestReceiver;

View File

@ -25,3 +25,6 @@
# python # python
*.pyc *.pyc
# core dumps
core

View File

@ -43,7 +43,7 @@ output_files := $(patsubst $(citus_abs_srcdir)/output/%.source,expected/%.out, $
# intermediate, for muscle memory backward compatibility. # intermediate, for muscle memory backward compatibility.
check: check-full check: check-full
# check-full triggers all tests that ought to be run routinely # check-full triggers all tests that ought to be run routinely
check-full: check-multi check-multi-mx check-worker check-operations check-follower-cluster check-failure check-full: check-multi check-multi-mx check-multi-1 check-worker check-operations check-follower-cluster check-isolation check-failure
ISOLATION_DEPDIR=.deps/isolation ISOLATION_DEPDIR=.deps/isolation

View File

@ -10,10 +10,8 @@
/multi_behavioral_analytics_create_table_superuser.out /multi_behavioral_analytics_create_table_superuser.out
/multi_complex_count_distinct.out /multi_complex_count_distinct.out
/multi_copy.out /multi_copy.out
/multi_create_schema.out
/multi_load_data.out /multi_load_data.out
/multi_load_data_superuser.out /multi_load_data_superuser.out
/multi_load_large_records.out
/multi_load_more_data.out /multi_load_more_data.out
/multi_mx_copy_data.out /multi_mx_copy_data.out
/multi_outer_join.out /multi_outer_join.out

View File

@ -401,7 +401,7 @@ SELECT citus_add_local_table_to_metadata('"LocalTabLE.1!?!9012345678901234567890
-- create some objects after citus_add_local_table_to_metadata -- create some objects after citus_add_local_table_to_metadata
CREATE INDEX "my!Index2" ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id) WITH ( fillfactor = 90 ) WHERE id < 20; CREATE INDEX "my!Index2" ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id) WITH ( fillfactor = 90 ) WHERE id < 20;
NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456" NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456"
NOTICE: executing the command locally: CREATE INDEX "my!Index2_1504022" ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) WITH (fillfactor = '90' )WHERE (id < 20) NOTICE: executing the command locally: CREATE INDEX "my!Index2_1504022" ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) WITH (fillfactor = '90' ) WHERE (id < 20)
CREATE UNIQUE INDEX uniqueIndex2 ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id); CREATE UNIQUE INDEX uniqueIndex2 ON "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789"(id);
NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456" NOTICE: identifier "LocalTabLE.1!?!9012345678901234567890123456789012345678901234567890123456789" will be truncated to "LocalTabLE.1!?!901234567890123456789012345678901234567890123456"
NOTICE: executing the command locally: CREATE UNIQUE INDEX uniqueindex2_1504022 ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id ) NOTICE: executing the command locally: CREATE UNIQUE INDEX uniqueindex2_1504022 ON "CiTUS!LocalTables"."LocalTabLE.1!?!9012345678901234567890123456789_7e923997_1504022" USING btree (id )

View File

@ -120,9 +120,11 @@ SELECT create_distributed_table('test_table_statistics_append', 'id', 'append');
(1 row) (1 row)
COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH CSV; SELECT master_create_empty_shard('test_table_statistics_append') AS shardid1 \gset
COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH CSV; SELECT master_create_empty_shard('test_table_statistics_append') AS shardid2 \gset
-- originally shardminvalue and shardmaxvalue will be 0,3 and 4, 7 COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH (format 'csv', append_to_shard :shardid1);
COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH (format 'csv', append_to_shard :shardid2);
-- shardminvalue and shardmaxvalue are NULL
SELECT SELECT
ds.logicalrelid::regclass::text AS tablename, ds.logicalrelid::regclass::text AS tablename,
ds.shardid AS shardid, ds.shardid AS shardid,
@ -135,10 +137,10 @@ WHERE ds.logicalrelid::regclass::text in ('test_table_statistics_append')
ORDER BY 2, 3; ORDER BY 2, 3;
tablename | shardid | placementid | shardname | shardminvalue | shardmaxvalue tablename | shardid | placementid | shardname | shardminvalue | shardmaxvalue
--------------------------------------------------------------------- ---------------------------------------------------------------------
test_table_statistics_append | 981008 | 982016 | test_table_statistics_append_981008 | 0 | 3 test_table_statistics_append | 981008 | 982016 | test_table_statistics_append_981008 | |
test_table_statistics_append | 981008 | 982017 | test_table_statistics_append_981008 | 0 | 3 test_table_statistics_append | 981008 | 982017 | test_table_statistics_append_981008 | |
test_table_statistics_append | 981009 | 982018 | test_table_statistics_append_981009 | 4 | 7 test_table_statistics_append | 981009 | 982018 | test_table_statistics_append_981009 | |
test_table_statistics_append | 981009 | 982019 | test_table_statistics_append_981009 | 4 | 7 test_table_statistics_append | 981009 | 982019 | test_table_statistics_append_981009 | |
(4 rows) (4 rows)
-- delete some data to change shardminvalues of a shards -- delete some data to change shardminvalues of a shards

View File

@ -124,7 +124,8 @@ FROM
WHERE WHERE
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
'sensors_2001'::regclass, 'sensors_2002'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass,
'sensors_2003'::regclass, 'sensors_2004'::regclass); 'sensors_2003'::regclass, 'sensors_2004'::regclass)
ORDER BY 1,2;
logicalrelid | column_to_column_name logicalrelid | column_to_column_name
--------------------------------------------------------------------- ---------------------------------------------------------------------
sensors | measureid sensors | measureid
@ -146,7 +147,7 @@ WITH
all_shardids AS (SELECT * FROM sensors_shardid UNION SELECT * FROM sensors_2000_shardid UNION all_shardids AS (SELECT * FROM sensors_shardid UNION SELECT * FROM sensors_2000_shardid UNION
SELECT * FROM sensors_2001_shardid UNION SELECT * FROM sensors_2002_shardid SELECT * FROM sensors_2001_shardid UNION SELECT * FROM sensors_2002_shardid
UNION SELECT * FROM sensors_2003_shardid UNION SELECT * FROM sensors_2004_shardid) UNION SELECT * FROM sensors_2003_shardid UNION SELECT * FROM sensors_2004_shardid)
SELECT logicalrelid, shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid IN (SELECT * FROM all_shardids); SELECT logicalrelid, shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid IN (SELECT * FROM all_shardids) ORDER BY 1,2,3,4;
logicalrelid | shardid | shardminvalue | shardmaxvalue logicalrelid | shardid | shardminvalue | shardmaxvalue
--------------------------------------------------------------------- ---------------------------------------------------------------------
sensors | 2580001 | -1073741824 | -1 sensors | 2580001 | -1073741824 | -1
@ -357,7 +358,8 @@ FROM
WHERE WHERE
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
'sensors_2001'::regclass, 'sensors_2002'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass,
'sensors_2003'::regclass, 'sensors_2004'::regclass); 'sensors_2003'::regclass, 'sensors_2004'::regclass)
ORDER BY 1,2;
logicalrelid | column_to_column_name logicalrelid | column_to_column_name
--------------------------------------------------------------------- ---------------------------------------------------------------------
sensors | measureid sensors | measureid
@ -377,7 +379,8 @@ FROM
WHERE WHERE
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
'sensors_2001'::regclass, 'sensors_2002'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass,
'sensors_2003'::regclass, 'sensors_2004'::regclass); 'sensors_2003'::regclass, 'sensors_2004'::regclass)
ORDER BY 1,2;
logicalrelid | column_to_column_name logicalrelid | column_to_column_name
--------------------------------------------------------------------- ---------------------------------------------------------------------
sensors | measureid sensors | measureid

View File

@ -1,827 +0,0 @@
Parsed test spec with 2 sessions
starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
15
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-router-select: SELECT * FROM append_copy WHERE id = 1;
id|data|int_data
---------------------------------------------------------------------
1| b | 1
(1 row)
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2;
id|data|int_data
---------------------------------------------------------------------
0| a | 0
1| b | 1
2| c | 2
3| d | 3
4| e | 4
(5 rows)
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-adaptive-select:
SET citus.enable_repartition_joins TO ON;
SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
id|data|int_data|id|data|int_data
---------------------------------------------------------------------
0| a | 0| 0| a | 0
1| b | 1| 1| b | 1
2| c | 2| 2| c | 2
3| d | 3| 3| d | 3
4| e | 4| 4| e | 4
(5 rows)
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-insert: INSERT INTO append_copy VALUES(0, 'k', 0);
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
11
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-insert-select: INSERT INTO append_copy SELECT * FROM append_copy;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
15
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-update: UPDATE append_copy SET data = 'l' WHERE id = 0;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-delete: DELETE FROM append_copy WHERE id = 1;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
9
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-truncate: TRUNCATE append_copy; <waiting ...>
step s1-commit: COMMIT;
step s2-truncate: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
0
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-drop: DROP TABLE append_copy; <waiting ...>
step s1-commit: COMMIT;
step s2-drop: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
ERROR: relation "append_copy" does not exist
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-create-index: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,2)
(localhost,57638,t,2)
(2 rows)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id);
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-ddl-drop-index: DROP INDEX append_copy_index; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-drop-index: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY append_copy_index ON append_copy(id); <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-create-index-concurrently: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,1)
(localhost,57638,t,1)
(2 rows)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-add-column: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(2 rows)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0;
step s1-begin: BEGIN;
step s1-copy-additional-column: COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV;
step s2-ddl-drop-column: ALTER TABLE append_copy DROP new_column; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-drop-column: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"")
(localhost,57638,t,"")
(2 rows)
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; <waiting ...>
step s1-commit: COMMIT;
step s2-ddl-rename-column: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(2 rows)
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-table-size: SELECT citus_total_relation_size('append_copy');
citus_total_relation_size
---------------------------------------------------------------------
32768
(1 row)
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-master-drop-all-shards: SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); <waiting ...>
step s1-commit: COMMIT;
step s2-master-drop-all-shards: <... completed>
citus_drop_all_shards
---------------------------------------------------------------------
2
(1 row)
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
0
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-drop: DROP TABLE append_copy;
step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int);
step s1-begin: BEGIN;
step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append'); <waiting ...>
step s1-commit: COMMIT;
step s2-distribute-table: <... completed>
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
0
(1 row)
starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-router-select: SELECT * FROM append_copy WHERE id = 1;
id|data|int_data
---------------------------------------------------------------------
1| b | 1
(1 row)
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2;
id|data|int_data
---------------------------------------------------------------------
0| a | 0
1| b | 1
2| c | 2
3| d | 3
4| e | 4
(5 rows)
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-adaptive-select:
SET citus.enable_repartition_joins TO ON;
SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
id|data|int_data|id|data|int_data
---------------------------------------------------------------------
0| a | 0| 0| a | 0
1| b | 1| 1| b | 1
2| c | 2| 2| c | 2
3| d | 3| 3| d | 3
4| e | 4| 4| e | 4
(5 rows)
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-insert: INSERT INTO append_copy VALUES(0, 'k', 0);
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
11
(1 row)
starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-insert-select: INSERT INTO append_copy SELECT * FROM append_copy;
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
15
(1 row)
starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-update: UPDATE append_copy SET data = 'l' WHERE id = 0;
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-delete: DELETE FROM append_copy WHERE id = 1;
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
9
(1 row)
starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-truncate: TRUNCATE append_copy;
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
5
(1 row)
starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-drop: DROP TABLE append_copy;
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
ERROR: relation "append_copy" does not exist
step s1-select-count: SELECT COUNT(*) FROM append_copy;
ERROR: relation "append_copy" does not exist
starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id);
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,2)
(localhost,57638,t,2)
(2 rows)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id);
step s1-begin: BEGIN;
step s1-ddl-drop-index: DROP INDEX append_copy_index;
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%''');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,0)
(localhost,57638,t,0)
(2 rows)
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0;
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
ERROR: missing data for column "new_column"
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
5
(1 row)
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(2 rows)
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0;
step s1-begin: BEGIN;
step s1-ddl-drop-column: ALTER TABLE append_copy DROP new_column;
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,"")
(localhost,57638,t,"")
(2 rows)
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column;
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1');
run_command_on_workers
---------------------------------------------------------------------
(localhost,57637,t,new_column)
(localhost,57638,t,new_column)
(2 rows)
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-table-size: SELECT citus_total_relation_size('append_copy');
citus_total_relation_size
---------------------------------------------------------------------
32768
(1 row)
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s1-commit: COMMIT;
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
10
(1 row)
starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN;
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy');
citus_drop_all_shards
---------------------------------------------------------------------
1
(1 row)
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
5
(1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-drop: DROP TABLE append_copy;
step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int);
step s1-begin: BEGIN;
step s1-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT;
step s2-copy: <... completed>
step s1-select-count: SELECT COUNT(*) FROM append_copy;
count
---------------------------------------------------------------------
5
(1 row)

View File

@ -0,0 +1,104 @@
Parsed test spec with 4 sessions
starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s1-update:
UPDATE distributed_table SET y = 1 WHERE x = 1;
step s2-update:
UPDATE distributed_table SET y = 2 WHERE x = 1;
<waiting ...>
step detector-dump-wait-edges:
SELECT
waiting_transaction_num,
blocking_transaction_num,
blocking_transaction_waiting
FROM
dump_global_wait_edges()
ORDER BY
waiting_transaction_num,
blocking_transaction_num,
blocking_transaction_waiting;
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
---------------------------------------------------------------------
406| 405|f
(1 row)
transactionnumber|waitingtransactionnumbers
---------------------------------------------------------------------
405|
406| 405
(2 rows)
step s1-abort:
ABORT;
step s2-update: <... completed>
step s2-abort:
ABORT;
starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort
step s1-begin:
BEGIN;
step s2-begin:
BEGIN;
step s3-begin:
BEGIN;
step s1-update:
UPDATE distributed_table SET y = 1 WHERE x = 1;
step s2-update:
UPDATE distributed_table SET y = 2 WHERE x = 1;
<waiting ...>
step s3-update:
UPDATE distributed_table SET y = 3 WHERE x = 1;
<waiting ...>
step detector-dump-wait-edges:
SELECT
waiting_transaction_num,
blocking_transaction_num,
blocking_transaction_waiting
FROM
dump_global_wait_edges()
ORDER BY
waiting_transaction_num,
blocking_transaction_num,
blocking_transaction_waiting;
SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1;
waiting_transaction_num|blocking_transaction_num|blocking_transaction_waiting
---------------------------------------------------------------------
410| 409|f
411| 409|f
411| 410|t
(3 rows)
transactionnumber|waitingtransactionnumbers
---------------------------------------------------------------------
409|
410|409
411|409,410
(3 rows)
step s1-abort:
ABORT;
step s2-update: <... completed>
step s2-abort:
ABORT;
step s3-update: <... completed>
step s3-abort:
ABORT;

View File

@ -1,11 +1,6 @@
Parsed test spec with 2 sessions Parsed test spec with 2 sessions
starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -19,11 +14,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -42,11 +32,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -69,11 +54,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-adaptive-select s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -99,11 +79,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -117,11 +92,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -135,11 +105,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -153,11 +118,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -171,11 +131,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -190,11 +145,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -205,11 +155,6 @@ step s1-select-count: SELECT COUNT(*) FROM range_copy;
ERROR: relation "range_copy" does not exist ERROR: relation "range_copy" does not exist
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -225,17 +170,12 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%''');
run_command_on_workers run_command_on_workers
--------------------------------------------------------------------- ---------------------------------------------------------------------
(localhost,57637,t,2) (localhost,57637,t,1)
(localhost,57638,t,2) (localhost,57638,t,1)
(2 rows) (2 rows)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id);
step s1-begin: BEGIN; step s1-begin: BEGIN;
@ -258,11 +198,6 @@ run_command_on_workers
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -284,11 +219,6 @@ run_command_on_workers
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -310,11 +240,6 @@ run_command_on_workers
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0;
step s1-begin: BEGIN; step s1-begin: BEGIN;
@ -337,11 +262,6 @@ run_command_on_workers
starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -363,11 +283,6 @@ run_command_on_workers
starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -386,11 +301,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -404,11 +314,6 @@ count
starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -428,16 +333,17 @@ count
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-drop: DROP TABLE range_copy; step s1-drop: DROP TABLE range_copy;
step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int); step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
step s2-distribute-table: SELECT create_distributed_table('range_copy', 'id', 'range'); <waiting ...> step s2-distribute-table:
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 3004005;
SELECT create_distributed_table('range_copy', 'id', 'range');
UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005;
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006;
<waiting ...>
step s1-commit: COMMIT; step s1-commit: COMMIT;
step s2-distribute-table: <... completed> step s2-distribute-table: <... completed>
create_distributed_table create_distributed_table
@ -453,11 +359,6 @@ count
starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-router-select: SELECT * FROM range_copy WHERE id = 1; step s1-router-select: SELECT * FROM range_copy WHERE id = 1;
@ -476,11 +377,6 @@ count
starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-real-time-select: SELECT * FROM range_copy ORDER BY 1, 2; step s1-real-time-select: SELECT * FROM range_copy ORDER BY 1, 2;
@ -503,11 +399,6 @@ count
starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-adaptive-select s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-adaptive-select: step s1-adaptive-select:
@ -533,11 +424,6 @@ count
starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-insert: INSERT INTO range_copy VALUES(0, 'k', 0); step s1-insert: INSERT INTO range_copy VALUES(0, 'k', 0);
@ -551,11 +437,6 @@ count
starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-insert-select: INSERT INTO range_copy SELECT * FROM range_copy; step s1-insert-select: INSERT INTO range_copy SELECT * FROM range_copy;
@ -569,11 +450,6 @@ count
starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-update: UPDATE range_copy SET data = 'l' WHERE id = 0; step s1-update: UPDATE range_copy SET data = 'l' WHERE id = 0;
@ -587,11 +463,6 @@ count
starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-delete: DELETE FROM range_copy WHERE id = 1; step s1-delete: DELETE FROM range_copy WHERE id = 1;
@ -605,11 +476,6 @@ count
starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-truncate: TRUNCATE range_copy; step s1-truncate: TRUNCATE range_copy;
@ -624,11 +490,6 @@ count
starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-drop: DROP TABLE range_copy; step s1-drop: DROP TABLE range_copy;
@ -640,11 +501,6 @@ step s1-select-count: SELECT COUNT(*) FROM range_copy;
ERROR: relation "range_copy" does not exist ERROR: relation "range_copy" does not exist
starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id);
@ -660,17 +516,12 @@ count
step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%''');
run_command_on_workers run_command_on_workers
--------------------------------------------------------------------- ---------------------------------------------------------------------
(localhost,57637,t,2) (localhost,57637,t,1)
(localhost,57638,t,2) (localhost,57638,t,1)
(2 rows) (2 rows)
starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id);
step s1-begin: BEGIN; step s1-begin: BEGIN;
@ -693,11 +544,6 @@ run_command_on_workers
starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0;
@ -720,11 +566,6 @@ run_command_on_workers
starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0;
step s1-begin: BEGIN; step s1-begin: BEGIN;
@ -747,11 +588,6 @@ run_command_on_workers
starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-ddl-rename-column: ALTER TABLE range_copy RENAME data TO new_column; step s1-ddl-rename-column: ALTER TABLE range_copy RENAME data TO new_column;
@ -773,17 +609,12 @@ run_command_on_workers
starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-table-size: SELECT citus_total_relation_size('range_copy'); step s1-table-size: SELECT citus_total_relation_size('range_copy');
citus_total_relation_size citus_total_relation_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
32768 24576
(1 row) (1 row)
step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV;
@ -796,11 +627,6 @@ count
starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-master-modify-multiple-shards: DELETE FROM range_copy; step s1-master-modify-multiple-shards: DELETE FROM range_copy;
@ -814,35 +640,26 @@ count
starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); step s1-master-drop-all-shards: SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy');
citus_drop_all_shards citus_drop_all_shards
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 2
(1 row) (1 row)
step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...> step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; <waiting ...>
step s1-commit: COMMIT; step s1-commit: COMMIT;
step s2-copy: <... completed> step s2-copy: <... completed>
ERROR: could not find any shards into which to copy
step s1-select-count: SELECT COUNT(*) FROM range_copy; step s1-select-count: SELECT COUNT(*) FROM range_copy;
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
5 0
(1 row) (1 row)
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count
create_distributed_table
---------------------------------------------------------------------
(1 row)
step s1-drop: DROP TABLE range_copy; step s1-drop: DROP TABLE range_copy;
step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int); step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int);
step s1-begin: BEGIN; step s1-begin: BEGIN;

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,12 @@
Parsed test spec with 2 sessions Parsed test spec with 2 sessions
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
@ -27,12 +27,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
@ -53,12 +53,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-drop s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-drop s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
@ -75,12 +75,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
@ -108,12 +108,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-truncate s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-truncate s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
@ -142,12 +142,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s1-truncate s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes starting permutation: s1-initialize s1-begin s1-truncate s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY truncate_append_index ON truncate_append(id); <waiting ...> step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY truncate_append_index ON truncate_append(id); <waiting ...>
@ -173,12 +173,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
@ -206,12 +206,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-truncate s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-truncate s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
@ -240,12 +240,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
@ -273,12 +273,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-table-size s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-table-size s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
@ -304,12 +304,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
@ -330,12 +330,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-drop-all-shards s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-drop-all-shards s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
@ -361,9 +361,9 @@ restore_isolation_tester_func
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-truncate s2-distribute-table s1-commit s2-commit s1-select-count starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-truncate s2-distribute-table s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-drop: DROP TABLE truncate_append; step s1-drop: DROP TABLE truncate_append;
@ -393,12 +393,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-truncate: TRUNCATE truncate_append; step s1-truncate: TRUNCATE truncate_append;
@ -419,12 +419,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-truncate s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-drop: DROP TABLE truncate_append; step s1-drop: DROP TABLE truncate_append;
@ -442,12 +442,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id);
@ -475,12 +475,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
@ -509,12 +509,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0;
@ -542,12 +542,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0;
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
@ -576,12 +576,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-ddl-rename-column: ALTER TABLE truncate_append RENAME data TO new_column; step s1-ddl-rename-column: ALTER TABLE truncate_append RENAME data TO new_column;
@ -609,18 +609,18 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-truncate s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-table-size: SELECT citus_total_relation_size('truncate_append'); step s1-table-size: SELECT citus_total_relation_size('truncate_append');
citus_total_relation_size citus_total_relation_size
--------------------------------------------------------------------- ---------------------------------------------------------------------
32768 16384
(1 row) (1 row)
step s2-truncate: TRUNCATE truncate_append; step s2-truncate: TRUNCATE truncate_append;
@ -639,12 +639,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-truncate s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-master-modify-multiple-shards: DELETE FROM truncate_append; step s1-master-modify-multiple-shards: DELETE FROM truncate_append;
@ -665,12 +665,12 @@ restore_isolation_tester_func
starting permutation: s1-initialize s1-begin s2-begin s1-master-drop-all-shards s2-truncate s1-commit s2-commit s1-select-count starting permutation: s1-initialize s1-begin s2-begin s1-master-drop-all-shards s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard xxxxx);
step s1-begin: BEGIN; step s1-begin: BEGIN;
step s2-begin: BEGIN; step s2-begin: BEGIN;
step s1-master-drop-all-shards: SELECT citus_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); step s1-master-drop-all-shards: SELECT citus_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append');
@ -696,9 +696,9 @@ restore_isolation_tester_func
starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-distribute-table s2-truncate s1-commit s2-commit s1-select-count starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-distribute-table s2-truncate s1-commit s2-commit s1-select-count
create_distributed_table master_create_empty_shard
--------------------------------------------------------------------- ---------------------------------------------------------------------
5990340
(1 row) (1 row)
step s1-drop: DROP TABLE truncate_append; step s1-drop: DROP TABLE truncate_append;

View File

@ -76,6 +76,7 @@ SELECT create_distributed_table('products_append', 'product_no', 'append');
(1 row) (1 row)
SELECT master_create_empty_shard('products_append') AS shardid \gset
-- Can only add primary key constraint on distribution column (or group -- Can only add primary key constraint on distribution column (or group
-- of columns including distribution column) -- of columns including distribution column)
-- Command below should error out since 'name' is not a distribution column -- Command below should error out since 'name' is not a distribution column
@ -90,7 +91,7 @@ WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint
DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced.
HINT: Consider using hash partitioning. HINT: Consider using hash partitioning.
--- Error out since first and third rows have the same product_no --- Error out since first and third rows have the same product_no
\COPY products_append FROM STDIN DELIMITER AS ','; COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
ERROR: duplicate key value violates unique constraint "p_key_1450033" ERROR: duplicate key value violates unique constraint "p_key_1450033"
DETAIL: Key (product_no)=(1) already exists. DETAIL: Key (product_no)=(1) already exists.
DROP TABLE products_append; DROP TABLE products_append;
@ -163,6 +164,7 @@ SELECT create_distributed_table('unique_test_table_append', 'id', 'append');
(1 row) (1 row)
SELECT master_create_empty_shard('unique_test_table_append') AS shardid \gset
-- Can only add unique constraint on distribution column (or group -- Can only add unique constraint on distribution column (or group
-- of columns including distribution column) -- of columns including distribution column)
-- Command below should error out since 'name' is not a distribution column -- Command below should error out since 'name' is not a distribution column
@ -177,7 +179,7 @@ WARNING: table "unique_test_table_append" has a UNIQUE or EXCLUDE constraint
DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced.
HINT: Consider using hash partitioning. HINT: Consider using hash partitioning.
-- Error out. Table can not have two rows with the same id. -- Error out. Table can not have two rows with the same id.
\COPY unique_test_table_append FROM STDIN DELIMITER AS ','; COPY unique_test_table_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
ERROR: duplicate key value violates unique constraint "unn_id_1450067" ERROR: duplicate key value violates unique constraint "unn_id_1450067"
DETAIL: Key (id)=(X) already exists. DETAIL: Key (id)=(X) already exists.
DROP TABLE unique_test_table_append; DROP TABLE unique_test_table_append;
@ -250,12 +252,13 @@ SELECT create_distributed_table('products_append', 'product_no', 'append');
(1 row) (1 row)
SELECT master_create_empty_shard('products_append') AS shardid \gset
-- Can add column and table check constraints -- Can add column and table check constraints
ALTER TABLE products_append ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products_append ADD CONSTRAINT p_check CHECK(price > 0);
ALTER TABLE products_append ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); ALTER TABLE products_append ADD CONSTRAINT p_multi_check CHECK(price > discounted_price);
-- Error out,since the third row conflicting with the p_multi_check -- Error out,since the third row conflicting with the p_multi_check
\COPY products_append FROM STDIN DELIMITER AS ','; COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
ERROR: new row for relation "products_append_1450101" violates check constraint "p_multi_check" ERROR: new row for relation "products_append_1450101" violates check constraint "p_multi_check_1450101"
DETAIL: Failing row contains (1, Product_3, 8, 10). DETAIL: Failing row contains (1, Product_3, 8, 10).
DROP TABLE products_append; DROP TABLE products_append;
-- Check "EXCLUSION CONSTRAINT" -- Check "EXCLUSION CONSTRAINT"
@ -323,6 +326,7 @@ SELECT create_distributed_table('products_append', 'product_no','append');
(1 row) (1 row)
SELECT master_create_empty_shard('products_append') AS shardid \gset
-- Can only add exclusion constraint on distribution column (or group of column -- Can only add exclusion constraint on distribution column (or group of column
-- including distribution column) -- including distribution column)
-- Command below should error out since 'name' is not a distribution column -- Command below should error out since 'name' is not a distribution column
@ -337,7 +341,7 @@ WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint
DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced.
HINT: Consider using hash partitioning. HINT: Consider using hash partitioning.
-- Error out since first and third can not pass the exclusion check. -- Error out since first and third can not pass the exclusion check.
\COPY products_append FROM STDIN DELIMITER AS ','; COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
ERROR: conflicting key value violates exclusion constraint "exc_pno_name_1450135" ERROR: conflicting key value violates exclusion constraint "exc_pno_name_1450135"
DETAIL: Key (product_no, name)=(1, Product_1) conflicts with existing key (product_no, name)=(1, Product_1). DETAIL: Key (product_no, name)=(1, Product_1) conflicts with existing key (product_no, name)=(1, Product_1).
DROP TABLE products_append; DROP TABLE products_append;
@ -394,9 +398,10 @@ SELECT create_distributed_table('products_append', 'product_no', 'append');
(1 row) (1 row)
SELECT master_create_empty_shard('products_append') AS shardid \gset
ALTER TABLE products_append ALTER COLUMN name SET NOT NULL; ALTER TABLE products_append ALTER COLUMN name SET NOT NULL;
-- Error out since name and product_no columns can not handle NULL value. -- Error out since name and product_no columns can not handle NULL value.
\COPY products_append FROM STDIN DELIMITER AS ','; COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
DROP TABLE products_append; DROP TABLE products_append;
-- Tests for ADD CONSTRAINT is not only subcommand -- Tests for ADD CONSTRAINT is not only subcommand
CREATE TABLE products ( CREATE TABLE products (

View File

@ -112,6 +112,12 @@ SELECT create_distributed_table('customer_append', 'c_custkey', 'append');
(1 row) (1 row)
SELECT master_create_empty_shard('customer_append');
master_create_empty_shard
---------------------------------------------------------------------
360006
(1 row)
CREATE TABLE nation ( CREATE TABLE nation (
n_nationkey integer not null, n_nationkey integer not null,
n_name char(25) not null, n_name char(25) not null,
@ -155,6 +161,12 @@ SELECT create_distributed_table('part_append', 'p_partkey', 'append');
(1 row) (1 row)
SELECT master_create_empty_shard('part_append');
master_create_empty_shard
---------------------------------------------------------------------
360009
(1 row)
CREATE TABLE supplier CREATE TABLE supplier
( (
s_suppkey integer not null, s_suppkey integer not null,

View File

@ -3,37 +3,48 @@
-- --
-- This test checks that we can handle null min/max values in shard statistics -- This test checks that we can handle null min/max values in shard statistics
-- and that we don't partition or join prune shards that have null values. -- and that we don't partition or join prune shards that have null values.
SET client_min_messages TO DEBUG2; CREATE SCHEMA multi_null_minmax_value_pruning;
SET search_path TO multi_null_minmax_value_pruning;
SET citus.explain_all_tasks TO on; SET citus.explain_all_tasks TO on;
-- to avoid differing explain output - executor doesn't matter,
-- because were testing pruning here.
-- Change configuration to treat lineitem and orders tables as large
SET citus.log_multi_join_order to true; SET citus.log_multi_join_order to true;
SET citus.enable_repartition_joins to ON; SET citus.enable_repartition_joins to ON;
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; SET citus.next_shard_id = 290000;
shardminvalue | shardmaxvalue CREATE TABLE lineitem (LIKE public.lineitem);
SELECT create_distributed_table('lineitem', 'l_orderkey', 'range');
create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | 1000
(1 row) (1 row)
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; SELECT master_create_empty_shard('lineitem') as lineitem_shardid1 \gset
shardminvalue | shardmaxvalue SELECT master_create_empty_shard('lineitem') as lineitem_shardid2 \gset
CREATE TABLE orders (LIKE public.orders);
SELECT create_distributed_table('orders', 'o_orderkey', 'range');
create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | 1000
(1 row) (1 row)
SELECT master_create_empty_shard('orders') as orders_shardid1 \gset
SELECT master_create_empty_shard('orders') as orders_shardid2 \gset
SET client_min_messages TO DEBUG2;
UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '6000' WHERE shardid = :lineitem_shardid1 OR shardid = :orders_shardid1;
UPDATE pg_dist_shard SET shardminvalue = '6001', shardmaxvalue = '20000' WHERE shardid = :lineitem_shardid2 OR shardid = :orders_shardid2;
UPDATE pg_dist_partition SET colocationid = 87091 WHERE logicalrelid = 'orders'::regclass OR logicalrelid = 'lineitem'::regclass;
-- Check that partition and join pruning works when min/max values exist -- Check that partition and join pruning works when min/max values exist
-- Adding l_orderkey = 1 to make the query not router executable -- Adding l_orderkey = 1 to make the query not router executable
SELECT coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
$Q$); $Q$);
DEBUG: Creating router plan DEBUG: Router planner cannot handle multi-shard select queries
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
LOG: join order: [ "lineitem" ]
CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
coordinator_plan coordinator_plan
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (Citus Adaptive) Custom Scan (Citus Adaptive)
Task Count: 1 Task Count: 2
(2 rows) (2 rows)
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
@ -41,8 +52,8 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_orderkey = o_orderkey; WHERE l_orderkey = o_orderkey;
DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries
LOG: join order: [ "lineitem" ][ local partition join "orders" ] LOG: join order: [ "lineitem" ][ local partition join "orders" ]
DEBUG: join prunable for intervals [-2147483648,-1] and [0,2147483647] DEBUG: join prunable for intervals [1,6000] and [6001,20000]
DEBUG: join prunable for intervals [0,2147483647] and [-2147483648,-1] DEBUG: join prunable for intervals [6001,20000] and [1,6000]
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Aggregate Aggregate
@ -53,38 +64,36 @@ DEBUG: join prunable for intervals [0,2147483647] and [-2147483648,-1]
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Aggregate -> Aggregate
-> Hash Join -> Hash Join
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) Hash Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Seq Scan on lineitem_360000 lineitem -> Seq Scan on orders_290002 orders
-> Hash -> Hash
-> Seq Scan on orders_360002 orders -> Seq Scan on lineitem_290000 lineitem
-> Task -> Task
Node: host=localhost port=xxxxx dbname=regression Node: host=localhost port=xxxxx dbname=regression
-> Aggregate -> Aggregate
-> Hash Join -> Hash Join
Hash Cond: (lineitem.l_orderkey = orders.o_orderkey) Hash Cond: (orders.o_orderkey = lineitem.l_orderkey)
-> Seq Scan on lineitem_360001 lineitem -> Seq Scan on orders_290003 orders
-> Hash -> Hash
-> Seq Scan on orders_360003 orders -> Seq Scan on lineitem_290001 lineitem
(20 rows) (20 rows)
-- Now set the minimum value for a shard to null. Then check that we don't apply -- Now set the minimum value for a shard to null. Then check that we don't apply
-- partition or join pruning for the shard with null min value. Since it is not -- partition or join pruning for the shard with null min value. Since it is not
-- supported with single-repartition join, dual-repartition has been used. -- supported with single-repartition join, dual-repartition has been used.
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = :lineitem_shardid1;
SELECT coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
$Q$); $Q$);
DEBUG: Distributed planning for a fast-path router query DEBUG: Router planner cannot handle multi-shard select queries
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
DEBUG: Creating router plan LOG: join order: [ "lineitem" ]
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
DEBUG: query has a single distribution column value: 9030
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
coordinator_plan coordinator_plan
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (Citus Adaptive) Custom Scan (Citus Adaptive)
Task Count: 1 Task Count: 2
(2 rows) (2 rows)
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
@ -137,21 +146,19 @@ DETAIL: Creating dependency on merge taskId 12
-- Next, set the maximum value for another shard to null. Then check that we -- Next, set the maximum value for another shard to null. Then check that we
-- don't apply partition or join pruning for this other shard either. Since it -- don't apply partition or join pruning for this other shard either. Since it
-- is not supported with single-repartition join, dual-repartition has been used. -- is not supported with single-repartition join, dual-repartition has been used.
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = :lineitem_shardid2;
SELECT coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
$Q$); $Q$);
DEBUG: Distributed planning for a fast-path router query DEBUG: Router planner cannot handle multi-shard select queries
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
DEBUG: Creating router plan LOG: join order: [ "lineitem" ]
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
DEBUG: query has a single distribution column value: 9030
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
coordinator_plan coordinator_plan
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (Citus Adaptive) Custom Scan (Citus Adaptive)
Task Count: 1 Task Count: 2
(2 rows) (2 rows)
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
@ -204,17 +211,13 @@ DETAIL: Creating dependency on merge taskId 12
-- Last, set the minimum value to 0 and check that we don't treat it as null. We -- Last, set the minimum value to 0 and check that we don't treat it as null. We
-- should apply partition and join pruning for this shard now. Since it is not -- should apply partition and join pruning for this shard now. Since it is not
-- supported with single-repartition join, dual-repartition has been used. -- supported with single-repartition join, dual-repartition has been used.
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = :lineitem_shardid1;
SELECT coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
$Q$); $Q$);
DEBUG: Distributed planning for a fast-path router query
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
DEBUG: Creating router plan DEBUG: Creating router plan
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement CONTEXT: PL/pgSQL function public.coordinator_plan(text) line XX at FOR over EXECUTE statement
DEBUG: query has a single distribution column value: 9030
CONTEXT: PL/pgSQL function coordinator_plan(text) line XX at FOR over EXECUTE statement
coordinator_plan coordinator_plan
--------------------------------------------------------------------- ---------------------------------------------------------------------
Custom Scan (Citus Adaptive) Custom Scan (Citus Adaptive)
@ -268,7 +271,8 @@ DETAIL: Creating dependency on merge taskId 12
Merge Task Count: 4 Merge Task Count: 4
(10 rows) (10 rows)
-- Set minimum and maximum values for two shards back to their original values RESET client_min_messages;
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000; DROP SCHEMA multi_null_minmax_value_pruning CASCADE;
UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001; NOTICE: drop cascades to 2 other objects
SET client_min_messages TO NOTICE; DETAIL: drop cascades to table lineitem
drop cascades to table orders

View File

@ -199,20 +199,48 @@ FROM
orders, customer_append orders, customer_append
WHERE WHERE
o_custkey = c_custkey AND o_custkey = c_custkey AND
c_custkey < 0; c_custkey < 0 AND c_custkey > 0;
DEBUG: Router planner does not support append-partitioned tables. DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 4
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------- ---------------------------------------------------------------------
Aggregate Aggregate
-> Custom Scan (Citus Adaptive) -> Custom Scan (Citus Adaptive)
Task Count: 0 Task Count: 4
Tasks Shown: None, not supported for re-partition queries Tasks Shown: None, not supported for re-partition queries
-> MapMergeJob -> MapMergeJob
Map Task Count: 2 Map Task Count: 2
Merge Task Count: 4 Merge Task Count: 4
-> MapMergeJob -> MapMergeJob
Map Task Count: 0 Map Task Count: 3
Merge Task Count: 0 Merge Task Count: 4
(10 rows) (10 rows)
SELECT SELECT
@ -221,8 +249,36 @@ FROM
orders, customer_append orders, customer_append
WHERE WHERE
o_custkey = c_custkey AND o_custkey = c_custkey AND
c_custkey < 0; c_custkey < 0 AND c_custkey > 0;
DEBUG: Router planner does not support append-partitioned tables. DEBUG: Router planner does not support append-partitioned tables.
DEBUG: join prunable for task partitionId 0 and 1
DEBUG: join prunable for task partitionId 0 and 2
DEBUG: join prunable for task partitionId 0 and 3
DEBUG: join prunable for task partitionId 1 and 0
DEBUG: join prunable for task partitionId 1 and 2
DEBUG: join prunable for task partitionId 1 and 3
DEBUG: join prunable for task partitionId 2 and 0
DEBUG: join prunable for task partitionId 2 and 1
DEBUG: join prunable for task partitionId 2 and 3
DEBUG: join prunable for task partitionId 3 and 0
DEBUG: join prunable for task partitionId 3 and 1
DEBUG: join prunable for task partitionId 3 and 2
DEBUG: pruning merge fetch taskId 1
DETAIL: Creating dependency on merge taskId 3
DEBUG: pruning merge fetch taskId 2
DETAIL: Creating dependency on merge taskId 4
DEBUG: pruning merge fetch taskId 4
DETAIL: Creating dependency on merge taskId 6
DEBUG: pruning merge fetch taskId 5
DETAIL: Creating dependency on merge taskId 8
DEBUG: pruning merge fetch taskId 7
DETAIL: Creating dependency on merge taskId 9
DEBUG: pruning merge fetch taskId 8
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 10
DETAIL: Creating dependency on merge taskId 12
DEBUG: pruning merge fetch taskId 11
DETAIL: Creating dependency on merge taskId 16
count count
--------------------------------------------------------------------- ---------------------------------------------------------------------
0 0

View File

@ -115,13 +115,14 @@ CREATE TABLE nation_append_search_path(
n_regionkey integer not null, n_regionkey integer not null,
n_comment varchar(152) n_comment varchar(152)
); );
SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); SELECT create_distributed_table('nation_append_search_path', 'n_nationkey', 'append');
master_create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
\copy nation_append_search_path FROM STDIN with delimiter '|'; SELECT master_create_empty_shard('nation_append_search_path') AS shardid \gset
copy nation_append_search_path FROM STDIN with (delimiter '|', append_to_shard :shardid);
-- create shard with master_create_worker_shards -- create shard with master_create_worker_shards
CREATE TABLE test_schema_support.nation_hash( CREATE TABLE test_schema_support.nation_hash(
n_nationkey integer not null, n_nationkey integer not null,

View File

@ -1,52 +0,0 @@
--
-- NON_COLOCATED_JOIN_ORDER
--
-- Tests to check placements of shards must be equal to choose local join logic.
CREATE TABLE test_table_1(id int, value_1 int);
SELECT master_create_distributed_table('test_table_1', 'id', 'append');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
\copy test_table_1 FROM STDIN DELIMITER ','
\copy test_table_1 FROM STDIN DELIMITER ','
CREATE TABLE test_table_2(id int, value_1 int);
SELECT master_create_distributed_table('test_table_2', 'id', 'append');
master_create_distributed_table
---------------------------------------------------------------------
(1 row)
\copy test_table_2 FROM STDIN DELIMITER ','
\copy test_table_2 FROM STDIN DELIMITER ','
SET citus.log_multi_join_order to TRUE;
SET client_min_messages to DEBUG1;
SET citus.enable_repartition_joins TO on;
-- when joining append tables we always get dual re-partition joins
SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id;
LOG: join order: [ "test_table_1" ][ dual partition join "test_table_2" ]
count
---------------------------------------------------------------------
6
(1 row)
-- Add two shards placement of interval [8,10] to test_table_1
SET citus.shard_replication_factor to 2;
\copy test_table_1 FROM STDIN DELIMITER ','
-- Add two shards placement of interval [8,10] to test_table_2
SET citus.shard_replication_factor to 1;
\copy test_table_2 FROM STDIN DELIMITER ','
-- Although shard interval of relation are same, since they have different amount of placements
-- for interval [8,10] repartition join logic will be triggered.
SET citus.enable_repartition_joins to ON;
SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id;
LOG: join order: [ "test_table_1" ][ dual partition join "test_table_2" ]
count
---------------------------------------------------------------------
9
(1 row)
SET client_min_messages TO default;
DROP TABLE test_table_1;
DROP TABLE test_table_2;

View File

@ -625,6 +625,19 @@ BEGIN;
32 32
INSERT INTO test SELECT i,i FROM generate_series(0,100)i; INSERT INTO test SELECT i,i FROM generate_series(0,100)i;
ROLLBACK; ROLLBACK;
-- master_create_empty_shard on coordinator
BEGIN;
CREATE TABLE append_table (a INT, b INT);
SELECT create_distributed_table('append_table','a','append');
create_distributed_table
---------------------------------------------------------------------
(1 row)
SELECT master_create_empty_shard('append_table');
NOTICE: Creating placements for the append partitioned tables on the coordinator is not supported, skipping coordinator ...
ERROR: could only create 0 of 1 of required shard replicas
END;
-- alter table inside a tx block -- alter table inside a tx block
BEGIN; BEGIN;
ALTER TABLE test ADD COLUMN z single_node.new_type; ALTER TABLE test ADD COLUMN z single_node.new_type;

View File

@ -8,18 +8,9 @@ SELECT create_distributed_table('append_table', 'key', 'append');
(1 row) (1 row)
SELECT 1 FROM master_create_empty_shard('append_table'); SELECT master_create_empty_shard('append_table') AS shardid1 \gset
?column? SELECT master_create_empty_shard('append_table') AS shardid2 \gset
--------------------------------------------------------------------- SELECT master_create_empty_shard('append_table') AS shardid3 \gset
1
(1 row)
SELECT 1 FROM master_create_empty_shard('append_table');
?column?
---------------------------------------------------------------------
1
(1 row)
CREATE TABLE ref_table (value int); CREATE TABLE ref_table (value int);
CREATE INDEX ON ref_table (value); CREATE INDEX ON ref_table (value);
SELECT create_reference_table('ref_table'); SELECT create_reference_table('ref_table');
@ -28,9 +19,9 @@ SELECT create_reference_table('ref_table');
(1 row) (1 row)
\COPY append_table (key,value) FROM STDIN WITH CSV COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid1);
\COPY append_table (key,value) FROM STDIN WITH CSV COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid2);
\COPY ref_table FROM STDIN WITH CSV COPY ref_table FROM STDIN WITH CSV;
-- exercise some optimizer pushdown features with subqueries -- exercise some optimizer pushdown features with subqueries
SELECT count(*) FROM (SELECT random() FROM append_table) u; SELECT count(*) FROM (SELECT random() FROM append_table) u;
count count

View File

@ -69,7 +69,7 @@ SELECT logicalrelid FROM pg_dist_partition
t_ab t_ab
r r
tr tr
t_append t_range
(6 rows) (6 rows)
SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4::bit(8) SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4::bit(8)
@ -87,7 +87,7 @@ SELECT tgrelid::regclass, tgfoid::regproc, tgisinternal, tgenabled, tgtype::int4
t_ab | citus_truncate_trigger | t | O | 00100000 t_ab | citus_truncate_trigger | t | O | 00100000
r | citus_truncate_trigger | t | O | 00100000 r | citus_truncate_trigger | t | O | 00100000
tr | citus_truncate_trigger | t | O | 00100000 tr | citus_truncate_trigger | t | O | 00100000
t_append | citus_truncate_trigger | t | O | 00100000 t_range | citus_truncate_trigger | t | O | 00100000
(6 rows) (6 rows)
SELECT * FROM t ORDER BY a; SELECT * FROM t ORDER BY a;
@ -305,7 +305,7 @@ SELECT * FROM t3 ORDER BY a;
(3 rows) (3 rows)
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
WHERE logicalrelid = 't_append'::regclass WHERE logicalrelid = 't_range'::regclass
ORDER BY shardminvalue, shardmaxvalue; ORDER BY shardminvalue, shardmaxvalue;
shardminvalue | shardmaxvalue shardminvalue | shardmaxvalue
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -313,7 +313,7 @@ SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
5 | 7 5 | 7
(2 rows) (2 rows)
SELECT * FROM t_append ORDER BY id; SELECT * FROM t_range ORDER BY id;
id | value_1 id | value_1
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | 2 1 | 2
@ -324,9 +324,11 @@ SELECT * FROM t_append ORDER BY id;
7 | 4 7 | 4
(6 rows) (6 rows)
\copy t_append FROM STDIN DELIMITER ',' SELECT master_create_empty_shard('t_range') AS new_shard_id \gset
UPDATE pg_dist_shard SET shardminvalue = '9', shardmaxvalue = '11' WHERE shardid = :new_shard_id;
\copy t_range FROM STDIN with (DELIMITER ',')
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
WHERE logicalrelid = 't_append'::regclass WHERE logicalrelid = 't_range'::regclass
ORDER BY shardminvalue, shardmaxvalue; ORDER BY shardminvalue, shardmaxvalue;
shardminvalue | shardmaxvalue shardminvalue | shardmaxvalue
--------------------------------------------------------------------- ---------------------------------------------------------------------
@ -335,7 +337,7 @@ SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
9 | 11 9 | 11
(3 rows) (3 rows)
SELECT * FROM t_append ORDER BY id; SELECT * FROM t_range ORDER BY id;
id | value_1 id | value_1
--------------------------------------------------------------------- ---------------------------------------------------------------------
1 | 2 1 | 2

View File

@ -69,12 +69,16 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex
RETURNS void RETURNS void
AS 'citus', $$master_create_worker_shards$$ AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT; LANGUAGE C STRICT;
CREATE TABLE t_append(id int, value_1 int); CREATE TABLE t_range(id int, value_1 int);
SELECT master_create_distributed_table('t_append', 'id', 'append'); SELECT create_distributed_table('t_range', 'id', 'range');
master_create_distributed_table create_distributed_table
--------------------------------------------------------------------- ---------------------------------------------------------------------
(1 row) (1 row)
\copy t_append FROM STDIN DELIMITER ',' SELECT master_create_empty_shard('t_range') as shardid1 \gset
\copy t_append FROM STDIN DELIMITER ',' SELECT master_create_empty_shard('t_range') as shardid2 \gset
UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '3' WHERE shardid = :shardid1;
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '7' WHERE shardid = :shardid2;
\copy t_range FROM STDIN with (DELIMITER ',')
\copy t_range FROM STDIN with (DELIMITER ',')

View File

@ -24,7 +24,7 @@ drop cascades to table upgrade_basic.t_ab
drop cascades to table upgrade_basic.t2 drop cascades to table upgrade_basic.t2
drop cascades to table upgrade_basic.r drop cascades to table upgrade_basic.r
drop cascades to table upgrade_basic.tr drop cascades to table upgrade_basic.tr
drop cascades to table upgrade_basic.t_append drop cascades to table upgrade_basic.t_range
-- as we updated citus to available version, -- as we updated citus to available version,
-- "isn" extension -- "isn" extension
-- "new_schema" schema -- "new_schema" schema

View File

@ -2,7 +2,6 @@
-- MULTI_AGG_TYPE_CONVERSION -- MULTI_AGG_TYPE_CONVERSION
-- --
-- Test aggregate type conversions using sums of integers and division operator -- Test aggregate type conversions using sums of integers and division operator
SELECT sum(l_suppkey) FROM lineitem; SELECT sum(l_suppkey) FROM lineitem;
SELECT sum(l_suppkey) / 2 FROM lineitem; SELECT sum(l_suppkey) / 2 FROM lineitem;
@ -19,8 +18,9 @@ CREATE TABLE aggregate_type (
double_value float(40) not null, double_value float(40) not null,
interval_value interval not null); interval_value interval not null);
SELECT create_distributed_table('aggregate_type', 'float_value', 'append'); SELECT create_distributed_table('aggregate_type', 'float_value', 'append');
SELECT master_create_empty_shard('aggregate_type') AS shardid \gset
\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' with (append_to_shard :shardid);
-- Test conversions using aggregates on floats and division -- Test conversions using aggregates on floats and division

View File

@ -29,7 +29,8 @@ CREATE TABLE lineitem_alter (
) )
WITH ( fillfactor = 80 ); WITH ( fillfactor = 80 );
SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT master_create_empty_shard('lineitem_alter') AS shardid \gset
copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
-- verify that the storage options made it to the table definitions -- verify that the storage options made it to the table definitions
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
@ -65,7 +66,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1;
ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT;
-- \copy to verify that default values take effect -- \copy to verify that default values take effect
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
@ -80,7 +82,10 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
-- \copy should fail because it will try to insert NULLs for a NOT NULL column -- \copy should fail because it will try to insert NULLs for a NOT NULL column
-- Note, this operation will create a table on the workers but it won't be in the metadata -- Note, this operation will create a table on the workers but it won't be in the metadata
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' BEGIN;
SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
END;
-- Verify that DROP NOT NULL works -- Verify that DROP NOT NULL works
@ -88,7 +93,8 @@ ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
-- \copy should succeed now -- \copy should succeed now
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
SELECT count(*) from lineitem_alter; SELECT count(*) from lineitem_alter;
-- Verify that SET DATA TYPE works -- Verify that SET DATA TYPE works

View File

@ -20,6 +20,8 @@ CREATE TABLE multi_append_table_to_shard_left
left_text TEXT not null left_text TEXT not null
); );
SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append');
SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid1 \gset
SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid2 \gset
CREATE TABLE multi_append_table_to_shard_right_reference_hash CREATE TABLE multi_append_table_to_shard_right_reference_hash
( (
@ -32,8 +34,8 @@ SELECT create_distributed_table('multi_append_table_to_shard_right_reference_has
-- Replicate 'left' table on both workers -- Replicate 'left' table on both workers
SELECT set_config('citus.shard_replication_factor', '2', false); SELECT set_config('citus.shard_replication_factor', '2', false);
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid1);
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid2);
-- Place 'right' table on both workers -- Place 'right' table on both workers
\copy multi_append_table_to_shard_right_reference FROM '@abs_srcdir@/data/agg.data' \copy multi_append_table_to_shard_right_reference FROM '@abs_srcdir@/data/agg.data'

View File

@ -35,6 +35,9 @@ COPY customer_copy_hash (c_custkey,c_name) FROM STDIN;
notinteger,customernot notinteger,customernot
\. \.
-- Test invalid option
COPY customer_copy_hash (c_custkey,c_name) FROM STDIN (append_to_shard 1);
-- Confirm that no data was copied -- Confirm that no data was copied
SELECT count(*) FROM customer_copy_hash; SELECT count(*) FROM customer_copy_hash;
@ -231,46 +234,55 @@ CREATE TABLE customer_copy_append (
c_acctbal decimal(15,2), c_acctbal decimal(15,2),
c_mktsegment char(10), c_mktsegment char(10),
c_comment varchar(117)); c_comment varchar(117));
SELECT master_create_distributed_table('customer_copy_append', 'c_custkey', 'append'); SELECT create_distributed_table('customer_copy_append', 'c_custkey', 'append');
-- Test syntax error -- Test syntax error
COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); BEGIN;
SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset
COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid);
1,customer1 1,customer1
2,customer2 2,customer2
notinteger,customernot notinteger,customernot
\. \.
END;
-- Test that no shard is created for failing copy -- Test that no shard is created for failing copy
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass;
-- Test empty copy -- Test empty copy
COPY customer_copy_append FROM STDIN; BEGIN;
SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset
COPY customer_copy_append FROM STDIN WITH (append_to_shard :shardid);
\. \.
END;
-- Test that no shard is created for copying zero rows -- Test that a shard is created
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass;
-- Test proper copy -- Test proper copy
COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); BEGIN;
SELECT master_create_empty_shard('customer_copy_append') AS shardid \gset
COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid);
1,customer1 1,customer1
2,customer2 2,customer2
\. \.
END;
-- Check whether data was copied properly -- Check whether data was copied properly
SELECT * FROM customer_copy_append; SELECT * FROM customer_copy_append;
-- Manipulate manipulate and check shard statistics for append-partitioned table shard -- Manipulate manipulate and check shard statistics for append-partitioned table shard
UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = 560131; UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = 560132;
UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560131; UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560132;
SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131; SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560132;
SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131; SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560132;
-- Update shard statistics for append-partitioned shard -- Update shard statistics for append-partitioned shard
SELECT master_update_shard_statistics(560131); SELECT master_update_shard_statistics(560132);
SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131; SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560132;
SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131; SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560132;
-- Create lineitem table -- Create lineitem table
CREATE TABLE lineitem_copy_append ( CREATE TABLE lineitem_copy_append (
@ -290,33 +302,18 @@ CREATE TABLE lineitem_copy_append (
l_shipinstruct char(25) not null, l_shipinstruct char(25) not null,
l_shipmode char(10) not null, l_shipmode char(10) not null,
l_comment varchar(44) not null); l_comment varchar(44) not null);
SELECT master_create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append'); SELECT create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append');
-- Test multiple shard creation BEGIN;
SET citus.shard_max_size TO '256kB'; SELECT master_create_empty_shard('lineitem_copy_append') AS shardid \gset
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'; END;
SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::regclass; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::regclass;
-- Test round robin shard policy -- trigger some errors on the append_to_shard option
SET citus.shard_replication_factor TO 1; COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard 1);
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard 560000);
COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|';
SELECT
pg_dist_shard_placement.shardid,
pg_dist_shard_placement.nodeport
FROM
pg_dist_shard,
pg_dist_shard_placement
WHERE
pg_dist_shard.shardid = pg_dist_shard_placement.shardid AND
logicalrelid = 'lineitem_copy_append'::regclass
ORDER BY
pg_dist_shard.shardid DESC
LIMIT
5;
-- Test schema support on append partitioned tables -- Test schema support on append partitioned tables
CREATE SCHEMA append; CREATE SCHEMA append;
@ -330,11 +327,13 @@ CREATE TABLE append.customer_copy (
c_mktsegment char(10), c_mktsegment char(10),
c_comment varchar(117)); c_comment varchar(117));
SELECT master_create_distributed_table('append.customer_copy', 'c_custkey', 'append'); SELECT create_distributed_table('append.customer_copy', 'c_custkey', 'append');
SELECT master_create_empty_shard('append.customer_copy') AS shardid1 \gset
SELECT master_create_empty_shard('append.customer_copy') AS shardid2 \gset
-- Test copy from the master node -- Test copy from the master node
COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|'); COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard :shardid1);
COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|'); COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid2);
-- Test the content of the table -- Test the content of the table
SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.customer_copy; SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.customer_copy;
@ -421,8 +420,10 @@ CREATE TABLE packed_numbers_append (
packed_numbers number_pack[] packed_numbers number_pack[]
); );
SELECT master_create_distributed_table('packed_numbers_append', 'id', 'append'); SELECT create_distributed_table('packed_numbers_append', 'id', 'append');
COPY packed_numbers_append FROM :'temp_dir''copy_test_array_of_composite'; SELECT master_create_empty_shard('packed_numbers_append') AS shardid \gset
COPY packed_numbers_append FROM :'temp_dir''copy_test_array_of_composite' WITH (append_to_shard :shardid);
-- Verify data is actually copied -- Verify data is actually copied
SELECT * FROM packed_numbers_append; SELECT * FROM packed_numbers_append;
@ -434,8 +435,10 @@ CREATE TABLE super_packed_numbers_append (
super_packed_number super_number_pack super_packed_number super_number_pack
); );
SELECT master_create_distributed_table('super_packed_numbers_append', 'id', 'append'); SELECT create_distributed_table('super_packed_numbers_append', 'id', 'append');
COPY super_packed_numbers_append FROM :'temp_dir''copy_test_composite_of_composite'; SELECT master_create_empty_shard('super_packed_numbers_append') AS shardid \gset
COPY super_packed_numbers_append FROM :'temp_dir''copy_test_composite_of_composite' WITH (append_to_shard :shardid);
-- Verify data is actually copied -- Verify data is actually copied
SELECT * FROM super_packed_numbers_append; SELECT * FROM super_packed_numbers_append;
@ -448,9 +451,10 @@ CREATE TABLE composite_partition_column_table(
composite_column number_pack composite_column number_pack
); );
SELECT master_create_distributed_table('composite_partition_column_table', 'composite_column', 'append'); SELECT create_distributed_table('composite_partition_column_table', 'composite_column', 'append');
SELECT master_create_empty_shard('composite_partition_column_table') AS shardid \gset
\COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv'); COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid);
1,"(1,1)" 1,"(1,1)"
2,"(2,2)" 2,"(2,2)"
\. \.
@ -458,20 +462,22 @@ SELECT master_create_distributed_table('composite_partition_column_table', 'comp
-- Test copy on append distributed tables do not create shards on removed workers -- Test copy on append distributed tables do not create shards on removed workers
CREATE TABLE numbers_append (a int, b int); CREATE TABLE numbers_append (a int, b int);
SELECT master_create_distributed_table('numbers_append', 'a', 'append'); SELECT create_distributed_table('numbers_append', 'a', 'append');
-- no shards is created yet -- no shards is created yet
SELECT shardid, nodename, nodeport SELECT shardid, nodename, nodeport
FROM pg_dist_shard_placement join pg_dist_shard using(shardid) FROM pg_dist_shard_placement join pg_dist_shard using(shardid)
WHERE logicalrelid = 'numbers_append'::regclass order by placementid; WHERE logicalrelid = 'numbers_append'::regclass order by placementid;
COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset
SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset
COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1);
1,1 1,1
2,2 2,2
\. \.
COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2);
3,5
4,6 4,6
\. \.
@ -487,12 +493,15 @@ SELECT master_disable_node('localhost', :worker_1_port);
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
-- add two new shards and verify they are created at the other node -- add two new shards and verify they are created at the other node
COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset
SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset
COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1);
5,7 5,7
6,8 6,8
\. \.
COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2);
7,9 7,9
8,10 8,10
\. \.
@ -507,12 +516,15 @@ SELECT 1 FROM master_activate_node('localhost', :worker_1_port);
RESET client_min_messages; RESET client_min_messages;
RESET citus.shard_replication_factor; RESET citus.shard_replication_factor;
-- add two new shards and verify they are created at both workers -- add two new shards and verify they are created at both workers
COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); SELECT master_create_empty_shard('numbers_append') AS shardid1 \gset
SELECT master_create_empty_shard('numbers_append') AS shardid2 \gset
COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid1);
9,11 9,11
10,12 10,12
\. \.
COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); COPY numbers_append FROM STDIN WITH (FORMAT 'csv', append_to_shard :shardid2);
11,13 11,13
12,14 12,14
\. \.
@ -625,10 +637,6 @@ SELECT shardid, shardstate, nodename, nodeport
\c - :default_user - :worker_1_port \c - :default_user - :worker_1_port
ALTER USER test_user WITH login; ALTER USER test_user WITH login;
-- there is a dangling shard in worker_2, drop it
\c - test_user - :worker_2_port
DROP TABLE numbers_hash_other_560176;
\c - test_user - :master_port \c - test_user - :master_port
DROP TABLE numbers_hash; DROP TABLE numbers_hash;
@ -644,7 +652,7 @@ CREATE TABLE numbers_hash(a int, b int);
SELECT create_distributed_table('numbers_hash', 'a'); SELECT create_distributed_table('numbers_hash', 'a');
\c - - - :worker_1_port \c - - - :worker_1_port
ALTER TABLE numbers_hash_560180 DROP COLUMN b; ALTER TABLE numbers_hash_560170 DROP COLUMN b;
\c - - - :master_port \c - - - :master_port
-- operation will fail to modify a shard and roll back -- operation will fail to modify a shard and roll back

View File

@ -1,15 +0,0 @@
SET citus.next_shard_id TO 250000;
CREATE SCHEMA tpch
CREATE TABLE nation (
n_nationkey integer not null,
n_name char(25) not null,
n_regionkey integer not null,
n_comment varchar(152));
SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append');
\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
SELECT count(*) from tpch.nation;

View File

@ -18,9 +18,9 @@ SET citus.next_shard_id TO 290000;
\copy orders_reference FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' \copy orders_reference FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard 360006)
\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' \copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\copy part_append FROM '@abs_srcdir@/data/part.data' with delimiter '|' \copy part_append FROM '@abs_srcdir@/data/part.data' with (delimiter '|', append_to_shard 360009)
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
\copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'

View File

@ -1,23 +0,0 @@
--
-- MULTI_STAGE_LARGE_RECORDS
--
-- Tests for loading data with large records (i.e. greater than the read buffer
-- size, which is 32kB) in a distributed cluster. These tests make sure that we
-- are creating shards of correct size even when records are large.
SET citus.next_shard_id TO 300000;
SET citus.shard_max_size TO "256kB";
CREATE TABLE large_records_table (data_id integer, data text);
SELECT master_create_distributed_table('large_records_table', 'data_id', 'append');
\copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class
WHERE pg_class.oid=logicalrelid AND relname='large_records_table'
ORDER BY shardid;
RESET citus.shard_max_size;

View File

@ -14,9 +14,15 @@ SET citus.next_shard_id TO 280000;
\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
\copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' SELECT master_create_empty_shard('customer_append') AS shardid1 \gset
\copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' SELECT master_create_empty_shard('customer_append') AS shardid2 \gset
\copy part_append FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid1);
copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with (delimiter '|', append_to_shard :shardid2);
SELECT master_create_empty_shard('part_append') AS shardid \gset
copy part_append FROM '@abs_srcdir@/data/part.more.data' with (delimiter '|', append_to_shard :shardid);
-- Exchange partition files in binary format in remaining tests -- Exchange partition files in binary format in remaining tests
ALTER SYSTEM SET citus.binary_worker_copy_format TO on; ALTER SYSTEM SET citus.binary_worker_copy_format TO on;

View File

@ -44,7 +44,6 @@ test: isolation_create_distributed_table isolation_master_append_table
test: isolation_multi_shard_modify_vs_all test: isolation_multi_shard_modify_vs_all
test: isolation_modify_with_subquery_vs_dml test: isolation_modify_with_subquery_vs_dml
test: isolation_hash_copy_vs_all test: isolation_hash_copy_vs_all
test: isolation_append_copy_vs_all
test: isolation_range_copy_vs_all test: isolation_range_copy_vs_all
test: isolation_partitioned_copy_vs_all test: isolation_partitioned_copy_vs_all
test: isolation_select_vs_all test: isolation_select_vs_all

View File

@ -127,12 +127,6 @@ test: with_modifying cte_prepared_modify cte_nested_modification
test: ensure_no_intermediate_data_leak test: ensure_no_intermediate_data_leak
test: with_executors with_join with_partitioning with_transactions with_dml test: with_executors with_join with_partitioning with_transactions with_dml
# ----------
# Tests to check our large record loading and shard deletion behavior
# ----------
test: multi_load_large_records
# ---------- # ----------
# Tests around DDL statements run on distributed tables # Tests around DDL statements run on distributed tables
# ---------- # ----------
@ -140,12 +134,6 @@ test: multi_index_statements
test: multi_alter_table_statements test: multi_alter_table_statements
test: multi_alter_table_add_constraints test: multi_alter_table_add_constraints
# ----------
# multi_create_schema tests creation, loading, and querying of a table in a new
# schema (namespace).
# ----------
test: multi_create_schema
# ---------- # ----------
# Tests to check if we inform the user about potential caveats of creating new # Tests to check if we inform the user about potential caveats of creating new
# databases, schemas, roles, and authentication information. # databases, schemas, roles, and authentication information.

View File

@ -50,7 +50,7 @@ test: set_operation_and_local_tables
test: subqueries_deep subquery_view subquery_partitioning subqueries_not_supported test: subqueries_deep subquery_view subquery_partitioning subqueries_not_supported
test: subquery_in_targetlist subquery_in_where subquery_complex_target_list subquery_append test: subquery_in_targetlist subquery_in_where subquery_complex_target_list subquery_append
test: subquery_prepared_statements test: subquery_prepared_statements
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins non_colocated_join_order test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins
test: cte_inline recursive_view_local_table values test: cte_inline recursive_view_local_table values
test: pg13 pg12 test: pg13 pg12
# run pg14 sequentially as it syncs metadata # run pg14 sequentially as it syncs metadata

View File

@ -53,7 +53,6 @@ test: insert_select_connection_leak
# ---------- # ----------
test: subquery_basics subquery_local_tables subquery_executors set_operations set_operation_and_local_tables test: subquery_basics subquery_local_tables subquery_executors set_operations set_operation_and_local_tables
test: subquery_partitioning subquery_complex_target_list subqueries_not_supported test: subquery_partitioning subquery_complex_target_list subqueries_not_supported
test: non_colocated_join_order
test: subquery_prepared_statements pg12 cte_inline test: subquery_prepared_statements pg12 cte_inline
# ---------- # ----------
@ -114,8 +113,6 @@ test: with_executors with_partitioning with_dml
# ---------- # ----------
# Tests to check our large record loading and shard deletion behavior # Tests to check our large record loading and shard deletion behavior
# ---------- # ----------
test: multi_load_large_records
test: multi_master_delete_protocol
test: multi_shard_modify test: multi_shard_modify
# ---------- # ----------

View File

@ -59,7 +59,7 @@ test: multi_partitioning_utils
# ---------- # ----------
test: subquery_local_tables subquery_executors subquery_and_cte set_operations set_operation_and_local_tables test: subquery_local_tables subquery_executors subquery_and_cte set_operations set_operation_and_local_tables
test: subqueries_deep subquery_view subquery_partitioning subqueries_not_supported subquery_in_where test: subqueries_deep subquery_view subquery_partitioning subqueries_not_supported subquery_in_where
test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins non_colocated_join_order test: non_colocated_leaf_subquery_joins non_colocated_subquery_joins
test: subquery_prepared_statements pg12 cte_inline test: subquery_prepared_statements pg12 cte_inline
# ---------- # ----------
@ -122,8 +122,6 @@ test: with_executors with_partitioning with_dml
# ---------- # ----------
# Tests to check our large record loading and shard deletion behavior # Tests to check our large record loading and shard deletion behavior
# ---------- # ----------
test: multi_load_large_records
test: multi_master_delete_protocol
test: multi_shard_modify test: multi_shard_modify
# ---------- # ----------
@ -131,12 +129,6 @@ test: multi_shard_modify
# ---------- # ----------
test: multi_alter_table_add_constraints test: multi_alter_table_add_constraints
# ----------
# multi_create_schema tests creation, loading, and querying of a table in a new
# schema (namespace).
# ----------
test: multi_create_schema
# ---------- # ----------
# Tests to check the sequential and parallel executions of DDL and modification # Tests to check the sequential and parallel executions of DDL and modification
# commands # commands

View File

@ -39,7 +39,8 @@ SELECT create_distributed_table('aggregate_type', 'float_value', 'append');
(1 row) (1 row)
\copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' SELECT master_create_empty_shard('aggregate_type') AS shardid \gset
copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' with (append_to_shard :shardid);
-- Test conversions using aggregates on floats and division -- Test conversions using aggregates on floats and division
SELECT min(float_value), max(float_value), SELECT min(float_value), max(float_value),
sum(float_value), count(float_value), avg(float_value) sum(float_value), count(float_value), avg(float_value)

View File

@ -26,22 +26,23 @@ CREATE TABLE lineitem_alter (
WITH ( fillfactor = 80 ); WITH ( fillfactor = 80 );
SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); SELECT create_distributed_table('lineitem_alter', 'l_orderkey', 'append');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- --------------------------
(1 row) (1 row)
\copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT master_create_empty_shard('lineitem_alter') AS shardid \gset
copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
-- verify that the storage options made it to the table definitions -- verify that the storage options made it to the table definitions
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
relname | reloptions relname | reloptions
--------------------------------------------------------------------- ----------------+-----------------
lineitem_alter | {fillfactor=80} lineitem_alter | {fillfactor=80}
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' ORDER BY relname; SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' ORDER BY relname;
relname | reloptions relname | reloptions
--------------------------------------------------------------------- -----------------------+-----------------
lineitem_alter_220000 | {fillfactor=80} lineitem_alter_220000 | {fillfactor=80}
(1 row) (1 row)
@ -60,7 +61,7 @@ FROM
JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid)
ORDER BY attnum; ORDER BY attnum;
attname | atttypid attname | atttypid
--------------------------------------------------------------------- -----------------+-------------------
tableoid | oid tableoid | oid
cmax | cid cmax | cid
xmax | xid xmax | xid
@ -93,7 +94,7 @@ ORDER BY attnum;
\c - - - :master_port \c - - - :master_port
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
--------------------------------------------------------------------- -----------------+-----------------------+-----------
l_orderkey | bigint | not null l_orderkey | bigint | not null
l_partkey | integer | not null l_partkey | integer | not null
l_suppkey | integer | not null l_suppkey | integer | not null
@ -119,13 +120,13 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
float_column | count float_column | count
--------------------------------------------------------------------- --------------+-------
| 6000 | 6000
(1 row) (1 row)
SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
int_column1 | count int_column1 | count
--------------------------------------------------------------------- -------------+-------
1 | 6000 1 | 6000
(1 row) (1 row)
@ -133,17 +134,18 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1;
ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT;
-- \copy to verify that default values take effect -- \copy to verify that default values take effect
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column;
float_column | count float_column | count
--------------------------------------------------------------------- --------------+-------
| 6000 | 6000
1 | 6000 1 | 6000
(2 rows) (2 rows)
SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
int_column1 | count int_column1 | count
--------------------------------------------------------------------- -------------+-------
| 6000 | 6000
1 | 6000 1 | 6000
(2 rows) (2 rows)
@ -152,7 +154,7 @@ SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1;
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL; ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
--------------------------------------------------------------------- -----------------+-----------------------+--------------------
l_orderkey | bigint | not null l_orderkey | bigint | not null
l_partkey | integer | not null l_partkey | integer | not null
l_suppkey | integer | not null l_suppkey | integer | not null
@ -180,14 +182,17 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT;
-- \copy should fail because it will try to insert NULLs for a NOT NULL column -- \copy should fail because it will try to insert NULLs for a NOT NULL column
-- Note, this operation will create a table on the workers but it won't be in the metadata -- Note, this operation will create a table on the workers but it won't be in the metadata
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' BEGIN;
ERROR: null value in column "int_column2" violates not-null constraint SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
ERROR: null value in column "int_column2" of relation "lineitem_alter_220002" violates not-null constraint
DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null).
END;
-- Verify that DROP NOT NULL works -- Verify that DROP NOT NULL works
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
--------------------------------------------------------------------- -----------------+-----------------------+-----------
l_orderkey | bigint | not null l_orderkey | bigint | not null
l_partkey | integer | not null l_partkey | integer | not null
l_suppkey | integer | not null l_suppkey | integer | not null
@ -212,17 +217,18 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite
(21 rows) (21 rows)
-- \copy should succeed now -- \copy should succeed now
\copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT master_create_empty_shard('lineitem_alter') as shardid \gset
copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', append_to_shard :shardid);
SELECT count(*) from lineitem_alter; SELECT count(*) from lineitem_alter;
count count
--------------------------------------------------------------------- -------
18000 18000
(1 row) (1 row)
-- Verify that SET DATA TYPE works -- Verify that SET DATA TYPE works
SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2;
int_column2 | pg_typeof | count int_column2 | pg_typeof | count
--------------------------------------------------------------------- -------------+-----------+-------
| integer | 6000 | integer | 6000
2 | integer | 12000 2 | integer | 12000
(2 rows) (2 rows)
@ -230,7 +236,7 @@ SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP B
ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT; ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
--------------------------------------------------------------------- -----------------+-----------------------+-----------
l_orderkey | bigint | not null l_orderkey | bigint | not null
l_partkey | integer | not null l_partkey | integer | not null
l_suppkey | integer | not null l_suppkey | integer | not null
@ -256,7 +262,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite
SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2;
int_column2 | pg_typeof | count int_column2 | pg_typeof | count
--------------------------------------------------------------------- -------------+------------------+-------
| double precision | 6000 | double precision | 6000
2 | double precision | 12000 2 | double precision | 12000
(2 rows) (2 rows)
@ -269,7 +275,7 @@ ALTER TABLE lineitem_alter DROP COLUMN date_column;
ALTER TABLE lineitem_alter RENAME COLUMN l_orderkey TO l_orderkey_renamed; ALTER TABLE lineitem_alter RENAME COLUMN l_orderkey TO l_orderkey_renamed;
SELECT SUM(l_orderkey_renamed) FROM lineitem_alter; SELECT SUM(l_orderkey_renamed) FROM lineitem_alter;
sum sum
--------------------------------------------------------------------- ----------
53620791 53620791
(1 row) (1 row)
@ -288,13 +294,13 @@ ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS int_column2;
ALTER TABLE IF EXISTS lineitem_alter RENAME COLUMN l_orderkey_renamed TO l_orderkey; ALTER TABLE IF EXISTS lineitem_alter RENAME COLUMN l_orderkey_renamed TO l_orderkey;
SELECT SUM(l_orderkey) FROM lineitem_alter; SELECT SUM(l_orderkey) FROM lineitem_alter;
sum sum
--------------------------------------------------------------------- ----------
53620791 53620791
(1 row) (1 row)
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
--------------------------------------------------------------------- -----------------+-----------------------+-----------
l_orderkey | bigint | not null l_orderkey | bigint | not null
l_partkey | integer | not null l_partkey | integer | not null
l_suppkey | integer | not null l_suppkey | integer | not null
@ -319,7 +325,7 @@ ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER,
ADD COLUMN int_column2 INTEGER; ADD COLUMN int_column2 INTEGER;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
--------------------------------------------------------------------- -----------------+-----------------------+-----------
l_orderkey | bigint | not null l_orderkey | bigint | not null
l_partkey | integer | not null l_partkey | integer | not null
l_suppkey | integer | not null l_suppkey | integer | not null
@ -348,7 +354,7 @@ DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VAL
ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2; ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
--------------------------------------------------------------------- -----------------+-----------------------+-----------
l_orderkey | bigint | not null l_orderkey | bigint | not null
l_partkey | integer | not null l_partkey | integer | not null
l_suppkey | integer | not null l_suppkey | integer | not null
@ -386,11 +392,13 @@ DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VAL
-- types -- types
ALTER TABLE lineitem_alter ADD COLUMN new_column non_existent_type; ALTER TABLE lineitem_alter ADD COLUMN new_column non_existent_type;
ERROR: type "non_existent_type" does not exist ERROR: type "non_existent_type" does not exist
LINE 1: ALTER TABLE lineitem_alter ADD COLUMN new_column non_existen...
^
ALTER TABLE lineitem_alter ALTER COLUMN null_column SET NOT NULL; ALTER TABLE lineitem_alter ALTER COLUMN null_column SET NOT NULL;
ERROR: column "null_column" contains null values ERROR: column "null_column" of relation "lineitem_alter_220000" contains null values
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:57637
ALTER TABLE lineitem_alter ALTER COLUMN l_partkey SET DEFAULT 'a'; ALTER TABLE lineitem_alter ALTER COLUMN l_partkey SET DEFAULT 'a';
ERROR: invalid input syntax for integer: "a" ERROR: invalid input syntax for type integer: "a"
-- Verify that we error out on RENAME CONSTRAINT statement -- Verify that we error out on RENAME CONSTRAINT statement
ALTER TABLE lineitem_alter RENAME CONSTRAINT constraint_a TO constraint_b; ALTER TABLE lineitem_alter RENAME CONSTRAINT constraint_a TO constraint_b;
ERROR: renaming constraints belonging to distributed tables is currently unsupported ERROR: renaming constraints belonging to distributed tables is currently unsupported
@ -405,7 +413,7 @@ NOTICE: relation "non_existent_table" does not exist, skipping
-- node -- node
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
--------------------------------------------------------------------- -----------------+-----------------------+-----------
l_orderkey | bigint | not null l_orderkey | bigint | not null
l_partkey | integer | not null l_partkey | integer | not null
l_suppkey | integer | not null l_suppkey | integer | not null
@ -432,7 +440,7 @@ CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber);
COMMIT; COMMIT;
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- --------------+----------------
temp_index_1 | lineitem_alter temp_index_1 | lineitem_alter
(1 row) (1 row)
@ -444,7 +452,7 @@ CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
COMMIT; COMMIT;
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- --------------+----------------
temp_index_2 | lineitem_alter temp_index_2 | lineitem_alter
(1 row) (1 row)
@ -456,7 +464,7 @@ ALTER TABLE lineitem_alter ADD COLUMN first integer;
COMMIT; COMMIT;
SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass;
Column | Type | Modifiers Column | Type | Modifiers
--------------------------------------------------------------------- -----------------+-----------------------+-----------
l_orderkey | bigint | not null l_orderkey | bigint | not null
l_partkey | integer | not null l_partkey | integer | not null
l_suppkey | integer | not null l_suppkey | integer | not null
@ -480,7 +488,7 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineite
SELECT "Column", "Type", "Definition" FROM index_attrs WHERE SELECT "Column", "Type", "Definition" FROM index_attrs WHERE
relid = 'temp_index_2'::regclass; relid = 'temp_index_2'::regclass;
Column | Type | Definition Column | Type | Definition
--------------------------------------------------------------------- ------------+--------+------------
l_orderkey | bigint | l_orderkey l_orderkey | bigint | l_orderkey
(1 row) (1 row)
@ -493,7 +501,7 @@ CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey);
ROLLBACK; ROLLBACK;
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- -----------+-----------
(0 rows) (0 rows)
-- ensure that errors cause full rollback -- ensure that errors cause full rollback
@ -504,7 +512,7 @@ ERROR: relation "temp_index_2" already exists
ROLLBACK; ROLLBACK;
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- -----------+-----------
(0 rows) (0 rows)
-- verify that SAVEPOINT is allowed... -- verify that SAVEPOINT is allowed...
@ -522,7 +530,7 @@ ROLLBACK TO my_savepoint;
COMMIT; COMMIT;
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- --------------+----------------
temp_index_2 | lineitem_alter temp_index_2 | lineitem_alter
(1 row) (1 row)
@ -536,12 +544,12 @@ BEGIN;
CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
ALTER TABLE lineitem_alter ADD COLUMN first integer; ALTER TABLE lineitem_alter ADD COLUMN first integer;
ERROR: column "first" of relation "lineitem_alter_220000" already exists ERROR: column "first" of relation "lineitem_alter_220000" already exists
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:57638
COMMIT; COMMIT;
-- Nothing from the block should have committed -- Nothing from the block should have committed
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- -----------+-----------
(0 rows) (0 rows)
-- Create single-shard table (to avoid deadlocks in the upcoming test hackery) -- Create single-shard table (to avoid deadlocks in the upcoming test hackery)
@ -550,7 +558,7 @@ SET citus.shard_count TO 1;
SET citus.shard_replication_factor TO 2; SET citus.shard_replication_factor TO 2;
SELECT create_distributed_table('single_shard_items', 'id', 'hash'); SELECT create_distributed_table('single_shard_items', 'id', 'hash');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- --------------------------
(1 row) (1 row)
@ -558,13 +566,13 @@ SELECT create_distributed_table('single_shard_items', 'id', 'hash');
CREATE UNIQUE INDEX replica_idx on single_shard_items(id); CREATE UNIQUE INDEX replica_idx on single_shard_items(id);
SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items';
relreplident relreplident
--------------------------------------------------------------------- --------------
d d
(1 row) (1 row)
SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;');
run_command_on_workers run_command_on_workers
--------------------------------------------------------------------- ------------------------
(localhost,57637,t,d) (localhost,57637,t,d)
(localhost,57638,t,d) (localhost,57638,t,d)
(2 rows) (2 rows)
@ -572,13 +580,13 @@ SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname L
ALTER TABLE single_shard_items REPLICA IDENTITY nothing; ALTER TABLE single_shard_items REPLICA IDENTITY nothing;
SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items';
relreplident relreplident
--------------------------------------------------------------------- --------------
n n
(1 row) (1 row)
SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;');
run_command_on_workers run_command_on_workers
--------------------------------------------------------------------- ------------------------
(localhost,57637,t,n) (localhost,57637,t,n)
(localhost,57638,t,n) (localhost,57638,t,n)
(2 rows) (2 rows)
@ -586,13 +594,13 @@ SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname L
ALTER TABLE single_shard_items REPLICA IDENTITY full; ALTER TABLE single_shard_items REPLICA IDENTITY full;
SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items';
relreplident relreplident
--------------------------------------------------------------------- --------------
f f
(1 row) (1 row)
SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;');
run_command_on_workers run_command_on_workers
--------------------------------------------------------------------- ------------------------
(localhost,57637,t,f) (localhost,57637,t,f)
(localhost,57638,t,f) (localhost,57638,t,f)
(2 rows) (2 rows)
@ -600,13 +608,13 @@ SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname L
ALTER TABLE single_shard_items REPLICA IDENTITY USING INDEX replica_idx; ALTER TABLE single_shard_items REPLICA IDENTITY USING INDEX replica_idx;
SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items';
relreplident relreplident
--------------------------------------------------------------------- --------------
i i
(1 row) (1 row)
SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;');
run_command_on_workers run_command_on_workers
--------------------------------------------------------------------- ------------------------
(localhost,57637,t,i) (localhost,57637,t,i)
(localhost,57638,t,i) (localhost,57638,t,i)
(2 rows) (2 rows)
@ -614,13 +622,13 @@ SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname L
ALTER TABLE single_shard_items REPLICA IDENTITY default, REPLICA IDENTITY USING INDEX replica_idx, REPLICA IDENTITY nothing; ALTER TABLE single_shard_items REPLICA IDENTITY default, REPLICA IDENTITY USING INDEX replica_idx, REPLICA IDENTITY nothing;
SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items'; SELECT relreplident FROM pg_class WHERE relname = 'single_shard_items';
relreplident relreplident
--------------------------------------------------------------------- --------------
n n
(1 row) (1 row)
SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;'); SELECT run_command_on_workers('SELECT relreplident FROM pg_class WHERE relname LIKE ''single_shard_items_%'' LIMIT 1;');
run_command_on_workers run_command_on_workers
--------------------------------------------------------------------- ------------------------
(localhost,57637,t,n) (localhost,57637,t,n)
(localhost,57638,t,n) (localhost,57638,t,n)
(2 rows) (2 rows)
@ -650,11 +658,11 @@ CREATE INDEX single_index_3 ON single_shard_items(name);
COMMIT; COMMIT;
ERROR: duplicate key value violates unique constraint "ddl_commands_command_key" ERROR: duplicate key value violates unique constraint "ddl_commands_command_key"
DETAIL: Key (command)=(CREATE INDEX) already exists. DETAIL: Key (command)=(CREATE INDEX) already exists.
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:57638
-- Nothing from the block should have committed -- Nothing from the block should have committed
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1;
indexname | tablename indexname | tablename
--------------------------------------------------------------------- -----------+-----------
(0 rows) (0 rows)
-- Even if 1PC is picked for multi-shard commands -- Even if 1PC is picked for multi-shard commands
@ -666,11 +674,11 @@ CREATE INDEX single_index_3 ON single_shard_items(name);
COMMIT; COMMIT;
ERROR: duplicate key value violates unique constraint "ddl_commands_command_key" ERROR: duplicate key value violates unique constraint "ddl_commands_command_key"
DETAIL: Key (command)=(CREATE INDEX) already exists. DETAIL: Key (command)=(CREATE INDEX) already exists.
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:57638
-- Nothing from the block should have committed -- Nothing from the block should have committed
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1;
indexname | tablename indexname | tablename
--------------------------------------------------------------------- -----------+-----------
(0 rows) (0 rows)
\c - - - :worker_2_port \c - - - :worker_2_port
@ -683,7 +691,7 @@ BEGIN;
CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
SELECT count(*) FROM lineitem_alter; SELECT count(*) FROM lineitem_alter;
count count
--------------------------------------------------------------------- -------
18000 18000
(1 row) (1 row)
@ -692,7 +700,7 @@ ROLLBACK;
BEGIN; BEGIN;
SELECT count(*) FROM lineitem_alter; SELECT count(*) FROM lineitem_alter;
count count
--------------------------------------------------------------------- -------
18000 18000
(1 row) (1 row)
@ -700,7 +708,7 @@ CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey);
COMMIT; COMMIT;
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- --------------+----------------
temp_index_2 | lineitem_alter temp_index_2 | lineitem_alter
(1 row) (1 row)
@ -710,14 +718,14 @@ SET citus.multi_shard_commit_protocol TO '2pc';
CREATE INDEX temp_index_3 ON lineitem_alter(l_orderkey); CREATE INDEX temp_index_3 ON lineitem_alter(l_orderkey);
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- --------------+----------------
temp_index_3 | lineitem_alter temp_index_3 | lineitem_alter
(1 row) (1 row)
DROP INDEX temp_index_3; DROP INDEX temp_index_3;
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- -----------+-----------
(0 rows) (0 rows)
RESET citus.multi_shard_commit_protocol; RESET citus.multi_shard_commit_protocol;
@ -726,7 +734,7 @@ CREATE TABLE test_ab (a int, b int);
SET citus.shard_count TO 8; SET citus.shard_count TO 8;
SELECT create_distributed_table('test_ab', 'a', 'hash'); SELECT create_distributed_table('test_ab', 'a', 'hash');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- --------------------------
(1 row) (1 row)
@ -735,11 +743,11 @@ INSERT INTO test_ab VALUES (2, 11);
CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a); CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a);
ERROR: could not create unique index "temp_unique_index_1_220011" ERROR: could not create unique index "temp_unique_index_1_220011"
DETAIL: Key (a)=(2) is duplicated. DETAIL: Key (a)=(2) is duplicated.
CONTEXT: while executing command on localhost:xxxxx CONTEXT: while executing command on localhost:57638
SELECT shardid FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard SELECT shardid FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard
WHERE logicalrelid='test_ab'::regclass AND shardstate=3; WHERE logicalrelid='test_ab'::regclass AND shardstate=3;
shardid shardid
--------------------------------------------------------------------- ---------
(0 rows) (0 rows)
-- Check that the schema on the worker still looks reasonable -- Check that the schema on the worker still looks reasonable
@ -750,7 +758,7 @@ FROM
JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid)
ORDER BY attnum; ORDER BY attnum;
attname | atttypid attname | atttypid
--------------------------------------------------------------------- -------------------------------+-------------------
tableoid | oid tableoid | oid
cmax | cid cmax | cid
xmax | xid xmax | xid
@ -787,7 +795,7 @@ ORDER BY attnum;
-- verify that we can rename distributed tables -- verify that we can rename distributed tables
SHOW citus.enable_ddl_propagation; SHOW citus.enable_ddl_propagation;
citus.enable_ddl_propagation citus.enable_ddl_propagation
--------------------------------------------------------------------- ------------------------------
on on
(1 row) (1 row)
@ -795,7 +803,7 @@ ALTER TABLE lineitem_alter RENAME TO lineitem_renamed;
-- verify rename is performed -- verify rename is performed
SELECT relname FROM pg_class WHERE relname = 'lineitem_renamed'; SELECT relname FROM pg_class WHERE relname = 'lineitem_renamed';
relname relname
--------------------------------------------------------------------- ------------------
lineitem_renamed lineitem_renamed
(1 row) (1 row)
@ -803,7 +811,7 @@ SELECT relname FROM pg_class WHERE relname = 'lineitem_renamed';
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_renamed%' ORDER BY relname; SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_renamed%' ORDER BY relname;
relname relname
--------------------------------------------------------------------- -------------------------
lineitem_renamed_220000 lineitem_renamed_220000
lineitem_renamed_220001 lineitem_renamed_220001
lineitem_renamed_220003 lineitem_renamed_220003
@ -816,7 +824,7 @@ ALTER TABLE lineitem_renamed RENAME TO lineitem_alter;
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
relname relname
--------------------------------------------------------------------- -----------------------
lineitem_alter_220000 lineitem_alter_220000
lineitem_alter_220001 lineitem_alter_220001
lineitem_alter_220003 lineitem_alter_220003
@ -827,14 +835,14 @@ SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <>
ALTER TABLE lineitem_alter SET(fillfactor=40); ALTER TABLE lineitem_alter SET(fillfactor=40);
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
relname | reloptions relname | reloptions
--------------------------------------------------------------------- ----------------+-----------------
lineitem_alter | {fillfactor=40} lineitem_alter | {fillfactor=40}
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
relname | reloptions relname | reloptions
--------------------------------------------------------------------- -----------------------+-----------------
lineitem_alter_220000 | {fillfactor=40} lineitem_alter_220000 | {fillfactor=40}
lineitem_alter_220001 | {fillfactor=40} lineitem_alter_220001 | {fillfactor=40}
lineitem_alter_220003 | {fillfactor=40} lineitem_alter_220003 | {fillfactor=40}
@ -844,14 +852,14 @@ SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AN
ALTER TABLE lineitem_alter RESET(fillfactor); ALTER TABLE lineitem_alter RESET(fillfactor);
SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter'; SELECT relname, reloptions FROM pg_class WHERE relname = 'lineitem_alter';
relname | reloptions relname | reloptions
--------------------------------------------------------------------- ----------------+------------
lineitem_alter | lineitem_alter |
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname; SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'lineitem_alter%' AND relname <> 'lineitem_alter_220002' /* failed copy trails */ ORDER BY relname;
relname | reloptions relname | reloptions
--------------------------------------------------------------------- -----------------------+------------
lineitem_alter_220000 | lineitem_alter_220000 |
lineitem_alter_220001 | lineitem_alter_220001 |
lineitem_alter_220003 | lineitem_alter_220003 |
@ -864,7 +872,7 @@ ALTER INDEX temp_index_1 RENAME TO idx_lineitem_linenumber;
-- verify rename is performed -- verify rename is performed
SELECT relname FROM pg_class WHERE relname = 'idx_lineitem_linenumber'; SELECT relname FROM pg_class WHERE relname = 'idx_lineitem_linenumber';
relname relname
--------------------------------------------------------------------- -------------------------
idx_lineitem_linenumber idx_lineitem_linenumber
(1 row) (1 row)
@ -872,7 +880,7 @@ SELECT relname FROM pg_class WHERE relname = 'idx_lineitem_linenumber';
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'idx_lineitem_linenumber%' ORDER BY relname; SELECT relname FROM pg_class WHERE relname LIKE 'idx_lineitem_linenumber%' ORDER BY relname;
relname relname
--------------------------------------------------------------------- --------------------------------
idx_lineitem_linenumber_220000 idx_lineitem_linenumber_220000
idx_lineitem_linenumber_220001 idx_lineitem_linenumber_220001
idx_lineitem_linenumber_220003 idx_lineitem_linenumber_220003
@ -888,7 +896,7 @@ ALTER TABLE lineitem_alter RENAME TO lineitem_renamed;
-- verify rename is performed -- verify rename is performed
SELECT relname FROM pg_class WHERE relname = 'lineitem_alter' or relname = 'lineitem_renamed'; SELECT relname FROM pg_class WHERE relname = 'lineitem_alter' or relname = 'lineitem_renamed';
relname relname
--------------------------------------------------------------------- ------------------
lineitem_renamed lineitem_renamed
(1 row) (1 row)
@ -900,13 +908,15 @@ ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int;
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0; SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0;
ERROR: column "column_only_added_to_master" does not exist ERROR: column "column_only_added_to_master" does not exist
LINE 1: SELECT column_only_added_to_master FROM lineitem_alter_22000...
^
\c - - - :master_port \c - - - :master_port
-- ddl propagation flag is reset to default, disable it again -- ddl propagation flag is reset to default, disable it again
SET citus.enable_ddl_propagation to false; SET citus.enable_ddl_propagation to false;
-- following query succeeds since it accesses an previously existing column -- following query succeeds since it accesses an previously existing column
SELECT l_orderkey FROM lineitem_alter LIMIT 0; SELECT l_orderkey FROM lineitem_alter LIMIT 0;
l_orderkey l_orderkey
--------------------------------------------------------------------- ------------
(0 rows) (0 rows)
-- make master and workers have the same schema again -- make master and workers have the same schema again
@ -914,7 +924,7 @@ ALTER TABLE lineitem_alter DROP COLUMN column_only_added_to_master;
-- now this should succeed -- now this should succeed
SELECT * FROM lineitem_alter LIMIT 0; SELECT * FROM lineitem_alter LIMIT 0;
l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment | null_column l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment | null_column
--------------------------------------------------------------------- ------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+----------------+------------+-----------+-------------
(0 rows) (0 rows)
-- previously unsupported statements are accepted by postgresql now -- previously unsupported statements are accepted by postgresql now
@ -930,7 +940,7 @@ ERROR: cannot execute ALTER TABLE command dropping partition column
CREATE UNIQUE INDEX unique_lineitem_partkey on lineitem_alter(l_partkey); CREATE UNIQUE INDEX unique_lineitem_partkey on lineitem_alter(l_partkey);
SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- -------------------------+----------------
unique_lineitem_partkey | lineitem_alter unique_lineitem_partkey | lineitem_alter
(1 row) (1 row)
@ -938,7 +948,7 @@ SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter';
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alter_%'; SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alter_%';
indexname | tablename indexname | tablename
--------------------------------------------------------------------- -----------+-----------
(0 rows) (0 rows)
\c - - - :master_port \c - - - :master_port
@ -948,7 +958,7 @@ SET citus.shard_replication_factor TO 2;
CREATE TABLE sequence_deadlock_test (a serial, b serial); CREATE TABLE sequence_deadlock_test (a serial, b serial);
SELECT create_distributed_table('sequence_deadlock_test', 'a'); SELECT create_distributed_table('sequence_deadlock_test', 'a');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- --------------------------
(1 row) (1 row)
@ -969,7 +979,7 @@ CREATE TABLE trigger_table (
); );
SELECT create_distributed_table('trigger_table', 'id'); SELECT create_distributed_table('trigger_table', 'id');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- --------------------------
(1 row) (1 row)
@ -988,7 +998,7 @@ FOR EACH ROW EXECUTE PROCEDURE update_value();
INSERT INTO trigger_table VALUES (1, 'trigger disabled'); INSERT INTO trigger_table VALUES (1, 'trigger disabled');
SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value;
value | count value | count
--------------------------------------------------------------------- -----------------+-------
trigger enabled | 1 trigger enabled | 1
(1 row) (1 row)
@ -997,7 +1007,7 @@ ERROR: triggers are only supported for local tables added to metadata
INSERT INTO trigger_table VALUES (1, 'trigger disabled'); INSERT INTO trigger_table VALUES (1, 'trigger disabled');
SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value;
value | count value | count
--------------------------------------------------------------------- -----------------+-------
trigger enabled | 2 trigger enabled | 2
(1 row) (1 row)
@ -1006,7 +1016,7 @@ ERROR: triggers are only supported for local tables added to metadata
INSERT INTO trigger_table VALUES (1, 'trigger disabled'); INSERT INTO trigger_table VALUES (1, 'trigger disabled');
SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value;
value | count value | count
--------------------------------------------------------------------- -----------------+-------
trigger enabled | 3 trigger enabled | 3
(1 row) (1 row)
@ -1029,7 +1039,7 @@ DROP TABLE lineitem_alter;
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%';
relname relname
--------------------------------------------------------------------- -----------------------
lineitem_alter_220002 lineitem_alter_220002
(1 row) (1 row)
@ -1039,7 +1049,7 @@ BEGIN;
CREATE TABLE test_table_1(id int); CREATE TABLE test_table_1(id int);
SELECT create_distributed_table('test_table_1','id'); SELECT create_distributed_table('test_table_1','id');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- --------------------------
(1 row) (1 row)
@ -1050,7 +1060,7 @@ END;
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%'; SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%';
relname relname
--------------------------------------------------------------------- ---------
(0 rows) (0 rows)
\c - - - :master_port \c - - - :master_port
@ -1059,14 +1069,14 @@ CREATE TABLE logged_test(id int);
ALTER TABLE logged_test SET UNLOGGED; ALTER TABLE logged_test SET UNLOGGED;
SELECT create_distributed_table('logged_test', 'id'); SELECT create_distributed_table('logged_test', 'id');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- --------------------------
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname;
relname | logged_info relname | logged_info
--------------------------------------------------------------------- --------------------+-------------
logged_test_220022 | unlogged logged_test_220022 | unlogged
logged_test_220023 | unlogged logged_test_220023 | unlogged
logged_test_220024 | unlogged logged_test_220024 | unlogged
@ -1078,14 +1088,14 @@ SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logg
ALTER TABLE logged_test SET LOGGED; ALTER TABLE logged_test SET LOGGED;
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname;
relname | logged_info relname | logged_info
--------------------------------------------------------------------- -------------+-------------
logged_test | logged logged_test | logged
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname;
relname | logged_info relname | logged_info
--------------------------------------------------------------------- --------------------+-------------
logged_test_220022 | logged logged_test_220022 | logged
logged_test_220023 | logged logged_test_220023 | logged
logged_test_220024 | logged logged_test_220024 | logged
@ -1096,14 +1106,14 @@ SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logg
ALTER TABLE logged_test SET UNLOGGED; ALTER TABLE logged_test SET UNLOGGED;
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname;
relname | logged_info relname | logged_info
--------------------------------------------------------------------- -------------+-------------
logged_test | unlogged logged_test | unlogged
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname; SELECT relname, CASE relpersistence WHEN 'u' THEN 'unlogged' WHEN 'p' then 'logged' ELSE 'unknown' END AS logged_info FROM pg_class WHERE relname ~ 'logged_test*' ORDER BY relname;
relname | logged_info relname | logged_info
--------------------------------------------------------------------- --------------------+-------------
logged_test_220022 | unlogged logged_test_220022 | unlogged
logged_test_220023 | unlogged logged_test_220023 | unlogged
logged_test_220024 | unlogged logged_test_220024 | unlogged
@ -1116,21 +1126,21 @@ DROP TABLE logged_test;
CREATE TABLE hash_dist(id bigint primary key, f1 text) WITH (fillfactor=40); CREATE TABLE hash_dist(id bigint primary key, f1 text) WITH (fillfactor=40);
SELECT create_distributed_table('hash_dist','id'); SELECT create_distributed_table('hash_dist','id');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- --------------------------
(1 row) (1 row)
-- verify that the storage options made it to the table definitions -- verify that the storage options made it to the table definitions
SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist'; SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist';
relname | reloptions relname | reloptions
--------------------------------------------------------------------- -----------+-----------------
hash_dist | {fillfactor=40} hash_dist | {fillfactor=40}
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist%' ORDER BY relname; SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'hash_dist%' ORDER BY relname;
relname | reloptions relname | reloptions
--------------------------------------------------------------------- ------------------+-----------------
hash_dist_220026 | {fillfactor=40} hash_dist_220026 | {fillfactor=40}
hash_dist_220027 | {fillfactor=40} hash_dist_220027 | {fillfactor=40}
hash_dist_220028 | {fillfactor=40} hash_dist_220028 | {fillfactor=40}
@ -1142,14 +1152,14 @@ SELECT relname, reloptions FROM pg_class WHERE relkind = 'r' AND relname LIKE 'h
ALTER INDEX hash_dist_pkey SET(fillfactor=40); ALTER INDEX hash_dist_pkey SET(fillfactor=40);
SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
relname | reloptions relname | reloptions
--------------------------------------------------------------------- ----------------+-----------------
hash_dist_pkey | {fillfactor=40} hash_dist_pkey | {fillfactor=40}
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname;
relname | reloptions relname | reloptions
--------------------------------------------------------------------- -----------------------+-----------------
hash_dist_pkey_220026 | {fillfactor=40} hash_dist_pkey_220026 | {fillfactor=40}
hash_dist_pkey_220027 | {fillfactor=40} hash_dist_pkey_220027 | {fillfactor=40}
hash_dist_pkey_220028 | {fillfactor=40} hash_dist_pkey_220028 | {fillfactor=40}
@ -1160,14 +1170,14 @@ SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' OR
ALTER INDEX hash_dist_pkey RESET(fillfactor); ALTER INDEX hash_dist_pkey RESET(fillfactor);
SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey'; SELECT relname, reloptions FROM pg_class WHERE relname = 'hash_dist_pkey';
relname | reloptions relname | reloptions
--------------------------------------------------------------------- ----------------+------------
hash_dist_pkey | hash_dist_pkey |
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname; SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'hash_dist_pkey%' ORDER BY relname;
relname | reloptions relname | reloptions
--------------------------------------------------------------------- -----------------------+------------
hash_dist_pkey_220026 | hash_dist_pkey_220026 |
hash_dist_pkey_220027 | hash_dist_pkey_220027 |
hash_dist_pkey_220028 | hash_dist_pkey_220028 |
@ -1184,14 +1194,14 @@ CREATE UNIQUE INDEX another_index ON hash_dist(id) WITH (fillfactor=50);
-- show the index and its storage options on coordinator, then workers -- show the index and its storage options on coordinator, then workers
SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index'; SELECT relname, reloptions FROM pg_class WHERE relname = 'another_index';
relname | reloptions relname | reloptions
--------------------------------------------------------------------- ---------------+-----------------
another_index | {fillfactor=50} another_index | {fillfactor=50}
(1 row) (1 row)
\c - - - :worker_1_port \c - - - :worker_1_port
SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index%' ORDER BY relname; SELECT relname, reloptions FROM pg_class WHERE relname LIKE 'another_index%' ORDER BY relname;
relname | reloptions relname | reloptions
--------------------------------------------------------------------- ----------------------+-----------------
another_index_220026 | {fillfactor=50} another_index_220026 | {fillfactor=50}
another_index_220027 | {fillfactor=50} another_index_220027 | {fillfactor=50}
another_index_220028 | {fillfactor=50} another_index_220028 | {fillfactor=50}
@ -1208,7 +1218,7 @@ SET citus.shard_replication_factor TO 1;
CREATE TABLE test_table_1(id int); CREATE TABLE test_table_1(id int);
SELECT create_distributed_table('test_table_1', 'id'); SELECT create_distributed_table('test_table_1', 'id');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- --------------------------
(1 row) (1 row)
@ -1225,7 +1235,7 @@ HINT: You can issue each command separately such as ALTER TABLE test_table_1 AD
CREATE TABLE reference_table(i int UNIQUE); CREATE TABLE reference_table(i int UNIQUE);
SELECT create_reference_table('reference_table'); SELECT create_reference_table('reference_table');
create_reference_table create_reference_table
--------------------------------------------------------------------- ------------------------
(1 row) (1 row)
@ -1241,7 +1251,7 @@ DROP TABLE reference_table;
CREATE TABLE referenced_table(i int UNIQUE); CREATE TABLE referenced_table(i int UNIQUE);
SELECT create_distributed_table('referenced_table', 'i'); SELECT create_distributed_table('referenced_table', 'i');
create_distributed_table create_distributed_table
--------------------------------------------------------------------- --------------------------
(1 row) (1 row)

View File

@ -25,6 +25,8 @@ SELECT create_distributed_table('multi_append_table_to_shard_left', 'left_number
(1 row) (1 row)
SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid1 \gset
SELECT master_create_empty_shard('multi_append_table_to_shard_left') AS shardid2 \gset
CREATE TABLE multi_append_table_to_shard_right_reference_hash CREATE TABLE multi_append_table_to_shard_right_reference_hash
( (
right_number INTEGER not null, right_number INTEGER not null,
@ -45,8 +47,8 @@ SELECT set_config('citus.shard_replication_factor', '2', false);
2 2
(1 row) (1 row)
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid1);
\copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' with (append_to_shard :shardid2);
-- Place 'right' table on both workers -- Place 'right' table on both workers
\copy multi_append_table_to_shard_right_reference FROM '@abs_srcdir@/data/agg.data' \copy multi_append_table_to_shard_right_reference FROM '@abs_srcdir@/data/agg.data'
-- Reset shard replication factor to ensure tasks will be assigned to both workers -- Reset shard replication factor to ensure tasks will be assigned to both workers
@ -81,7 +83,7 @@ SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage'
FROM FROM
pg_dist_shard pg_dist_shard
WHERE 'multi_append_table_to_shard_right_reference_hash'::regclass::oid = logicalrelid; WHERE 'multi_append_table_to_shard_right_reference_hash'::regclass::oid = logicalrelid;
ERROR: cannot append to shardId 230001 ERROR: cannot append to shardId 230003
DETAIL: We currently don't support appending to shards in hash-partitioned, reference and local tables DETAIL: We currently don't support appending to shards in hash-partitioned, reference and local tables
-- Clean up after test -- Clean up after test
DROP TABLE multi_append_table_to_shard_stage; DROP TABLE multi_append_table_to_shard_stage;

File diff suppressed because it is too large Load Diff

View File

@ -1,20 +0,0 @@
SET citus.next_shard_id TO 250000;
CREATE SCHEMA tpch
CREATE TABLE nation (
n_nationkey integer not null,
n_name char(25) not null,
n_regionkey integer not null,
n_comment varchar(152));
SELECT create_distributed_table('tpch.nation', 'n_nationkey', 'append');
create_distributed_table
--------------------------
(1 row)
\copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
SELECT count(*) from tpch.nation;
count
-------
25
(1 row)

View File

@ -13,9 +13,9 @@ SET citus.next_shard_id TO 290000;
\copy orders_reference FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders_reference FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|'
\copy orders_reference FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' \copy orders_reference FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|'
\copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|'
\copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \copy customer_append FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', append_to_shard 360006)
\copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' \copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|'
\copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|'
\copy part_append FROM '@abs_srcdir@/data/part.data' with delimiter '|' \copy part_append FROM '@abs_srcdir@/data/part.data' with (delimiter '|', append_to_shard 360009)
\copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'
\copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|'

View File

@ -1,26 +0,0 @@
--
-- MULTI_STAGE_LARGE_RECORDS
--
-- Tests for loading data with large records (i.e. greater than the read buffer
-- size, which is 32kB) in a distributed cluster. These tests make sure that we
-- are creating shards of correct size even when records are large.
SET citus.next_shard_id TO 300000;
SET citus.shard_max_size TO "256kB";
CREATE TABLE large_records_table (data_id integer, data text);
SELECT master_create_distributed_table('large_records_table', 'data_id', 'append');
master_create_distributed_table
---------------------------------
(1 row)
\copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class
WHERE pg_class.oid=logicalrelid AND relname='large_records_table'
ORDER BY shardid;
shardminvalue | shardmaxvalue
---------------+---------------
1 | 1
2 | 2
(2 rows)
RESET citus.shard_max_size;

View File

@ -8,9 +8,12 @@ SET citus.next_shard_id TO 280000;
\copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|'
\copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|'
\copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|'
\copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' SELECT master_create_empty_shard('customer_append') AS shardid1 \gset
\copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' SELECT master_create_empty_shard('customer_append') AS shardid2 \gset
\copy part_append FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' copy customer_append FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', append_to_shard :shardid1);
copy customer_append FROM '@abs_srcdir@/data/customer.3.data' with (delimiter '|', append_to_shard :shardid2);
SELECT master_create_empty_shard('part_append') AS shardid \gset
copy part_append FROM '@abs_srcdir@/data/part.more.data' with (delimiter '|', append_to_shard :shardid);
-- Exchange partition files in binary format in remaining tests -- Exchange partition files in binary format in remaining tests
ALTER SYSTEM SET citus.binary_worker_copy_format TO on; ALTER SYSTEM SET citus.binary_worker_copy_format TO on;
SELECT pg_reload_conf(); SELECT pg_reload_conf();
@ -21,14 +24,14 @@ SELECT pg_reload_conf();
SELECT success FROM run_command_on_workers('ALTER SYSTEM SET citus.binary_worker_copy_format TO on'); SELECT success FROM run_command_on_workers('ALTER SYSTEM SET citus.binary_worker_copy_format TO on');
success success
--------- ---------------------------------------------------------------------
t t
t t
(2 rows) (2 rows)
SELECT success FROM run_command_on_workers('SELECT pg_reload_conf()'); SELECT success FROM run_command_on_workers('SELECT pg_reload_conf()');
success success
--------- ---------------------------------------------------------------------
t t
t t
(2 rows) (2 rows)

View File

@ -1,118 +0,0 @@
//
// How we organize this isolation test spec, is explained at README.md file in this directory.
//
// create append distributed table to test behavior of COPY in concurrent operations
setup
{
SET citus.shard_replication_factor TO 1;
CREATE TABLE append_copy(id integer, data text, int_data int);
SELECT create_distributed_table('append_copy', 'id', 'append');
}
// drop distributed table
teardown
{
DROP TABLE IF EXISTS append_copy CASCADE;
}
// session 1
session "s1"
step "s1-initialize" { COPY append_copy FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; }
step "s1-begin" { BEGIN; }
step "s1-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; }
step "s1-copy-additional-column" { COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5 && echo 6, g, 6, 6 && echo 7, h, 7, 7 && echo 8, i, 8, 8 && echo 9, j, 9, 9' WITH CSV; }
step "s1-router-select" { SELECT * FROM append_copy WHERE id = 1; }
step "s1-real-time-select" { SELECT * FROM append_copy ORDER BY 1, 2; }
step "s1-adaptive-select"
{
SET citus.enable_repartition_joins TO ON;
SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
}
step "s1-insert" { INSERT INTO append_copy VALUES(0, 'k', 0); }
step "s1-insert-select" { INSERT INTO append_copy SELECT * FROM append_copy; }
step "s1-update" { UPDATE append_copy SET data = 'l' WHERE id = 0; }
step "s1-delete" { DELETE FROM append_copy WHERE id = 1; }
step "s1-truncate" { TRUNCATE append_copy; }
step "s1-drop" { DROP TABLE append_copy; }
step "s1-ddl-create-index" { CREATE INDEX append_copy_index ON append_copy(id); }
step "s1-ddl-drop-index" { DROP INDEX append_copy_index; }
step "s1-ddl-add-column" { ALTER TABLE append_copy ADD new_column int DEFAULT 0; }
step "s1-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; }
step "s1-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; }
step "s1-table-size" { SELECT citus_total_relation_size('append_copy'); }
step "s1-master-drop-all-shards" { SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); }
step "s1-create-non-distributed-table" { CREATE TABLE append_copy(id integer, data text, int_data int); }
step "s1-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); }
step "s1-select-count" { SELECT COUNT(*) FROM append_copy; }
step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); }
step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); }
step "s1-commit" { COMMIT; }
// session 2
session "s2"
step "s2-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5 && echo 6, g, 6 && echo 7, h, 7 && echo 8, i, 8 && echo 9, j, 9' WITH CSV; }
step "s2-router-select" { SELECT * FROM append_copy WHERE id = 1; }
step "s2-real-time-select" { SELECT * FROM append_copy ORDER BY 1, 2; }
step "s2-adaptive-select"
{
SET citus.enable_repartition_joins TO ON;
SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4;
}
step "s2-insert" { INSERT INTO append_copy VALUES(0, 'k', 0); }
step "s2-insert-select" { INSERT INTO append_copy SELECT * FROM append_copy; }
step "s2-update" { UPDATE append_copy SET data = 'l' WHERE id = 0; }
step "s2-delete" { DELETE FROM append_copy WHERE id = 1; }
step "s2-truncate" { TRUNCATE append_copy; }
step "s2-drop" { DROP TABLE append_copy; }
step "s2-ddl-create-index" { CREATE INDEX append_copy_index ON append_copy(id); }
step "s2-ddl-drop-index" { DROP INDEX append_copy_index; }
step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY append_copy_index ON append_copy(id); }
step "s2-ddl-add-column" { ALTER TABLE append_copy ADD new_column int DEFAULT 0; }
step "s2-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; }
step "s2-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; }
step "s2-table-size" { SELECT citus_total_relation_size('append_copy'); }
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); }
step "s2-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); }
// permutations - COPY vs COPY
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count"
// permutations - COPY first
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-adaptive-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-truncate" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-drop" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes"
permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-copy" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns"
permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-copy-additional-column" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-table-size" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count"
// permutations - COPY second
permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-adaptive-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-update" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-delete" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-drop" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes"
permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes"
permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns"
permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns"
permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns"
permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s2-copy" "s1-commit" "s1-select-count"
permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-copy" "s1-commit" "s1-select-count"

View File

@ -1,12 +1,15 @@
setup setup
{ {
SET citus.next_shard_id TO 4080102;
CREATE TABLE table_to_append(id int); CREATE TABLE table_to_append(id int);
CREATE TABLE table_to_be_appended(id int); CREATE TABLE table_to_be_appended(id int);
SELECT create_distributed_table('table_to_append', 'id', 'append'); SELECT create_distributed_table('table_to_append', 'id', 'append');
SELECT master_create_empty_shard('table_to_append');
INSERT INTO table_to_be_appended SELECT generate_series(1,1000); INSERT INTO table_to_be_appended SELECT generate_series(1,1000);
COPY table_to_append FROM PROGRAM 'echo 0 && echo 7 && echo 8 && echo 9 && echo 10000'; COPY table_to_append FROM PROGRAM 'echo 0 && echo 7 && echo 8 && echo 9 && echo 10000' WITH (append_to_shard 4080102);
} }
teardown teardown

View File

@ -2,12 +2,17 @@
// How we organize this isolation test spec, is explained at README.md file in this directory. // How we organize this isolation test spec, is explained at README.md file in this directory.
// //
// create append distributed table to test behavior of COPY in concurrent operations // create range distributed table to test behavior of COPY in concurrent operations
setup setup
{ {
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 3004005;
CREATE TABLE range_copy(id integer, data text, int_data int); CREATE TABLE range_copy(id integer, data text, int_data int);
SELECT create_distributed_table('range_copy', 'id', 'append'); SELECT create_distributed_table('range_copy', 'id', 'range');
SELECT master_create_empty_shard('range_copy');
SELECT master_create_empty_shard('range_copy');
UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005;
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006;
} }
// drop distributed table // drop distributed table
@ -76,7 +81,13 @@ step "s2-ddl-rename-column" { ALTER TABLE range_copy RENAME data TO new_column;
step "s2-table-size" { SELECT citus_total_relation_size('range_copy'); } step "s2-table-size" { SELECT citus_total_relation_size('range_copy'); }
step "s2-master-modify-multiple-shards" { DELETE FROM range_copy; } step "s2-master-modify-multiple-shards" { DELETE FROM range_copy; }
step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); } step "s2-master-drop-all-shards" { SELECT citus_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); }
step "s2-distribute-table" { SELECT create_distributed_table('range_copy', 'id', 'range'); } step "s2-distribute-table" {
SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 3004005;
SELECT create_distributed_table('range_copy', 'id', 'range');
UPDATE pg_dist_shard SET shardminvalue = '0', shardmaxvalue = '4' WHERE shardid = 3004005;
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '9' WHERE shardid = 3004006;
}
// permutations - COPY vs COPY // permutations - COPY vs COPY
permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count"

View File

@ -9,8 +9,10 @@ setup
SELECT citus_internal.refresh_isolation_tester_prepared_statement(); SELECT citus_internal.refresh_isolation_tester_prepared_statement();
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 6780300;
CREATE TABLE select_append(id integer, data text, int_data int); CREATE TABLE select_append(id integer, data text, int_data int);
SELECT create_distributed_table('select_append', 'id', 'append'); SELECT create_distributed_table('select_append', 'id', 'append');
SELECT master_create_empty_shard('select_append');
} }
// drop distributed table // drop distributed table
@ -22,7 +24,7 @@ teardown
// session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH (format 'csv', append_to_shard 6780300); }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
step "s1-disable-binary-protocol" { step "s1-disable-binary-protocol" {

View File

@ -9,8 +9,10 @@ setup
SELECT citus_internal.refresh_isolation_tester_prepared_statement(); SELECT citus_internal.refresh_isolation_tester_prepared_statement();
SET citus.shard_replication_factor TO 1; SET citus.shard_replication_factor TO 1;
SET citus.next_shard_id TO 5990340;
CREATE TABLE truncate_append(id integer, data text); CREATE TABLE truncate_append(id integer, data text);
SELECT create_distributed_table('truncate_append', 'id', 'append'); SELECT create_distributed_table('truncate_append', 'id', 'append');
SELECT master_create_empty_shard('truncate_append');
} }
// drop distributed table // drop distributed table
@ -23,7 +25,7 @@ teardown
// session 1 // session 1
session "s1" session "s1"
step "s1-initialize" { COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH CSV; } step "s1-initialize" { COPY truncate_append FROM PROGRAM 'echo 0, a && echo 1, b && echo 2, c && echo 3, d && echo 4, e' WITH (format 'csv', append_to_shard 5990340); }
step "s1-begin" { BEGIN; } step "s1-begin" { BEGIN; }
step "s1-truncate" { TRUNCATE truncate_append; } step "s1-truncate" { TRUNCATE truncate_append; }
step "s1-drop" { DROP TABLE truncate_append; } step "s1-drop" { DROP TABLE truncate_append; }

View File

@ -10,10 +10,8 @@
/multi_behavioral_analytics_create_table_superuser.sql /multi_behavioral_analytics_create_table_superuser.sql
/multi_complex_count_distinct.sql /multi_complex_count_distinct.sql
/multi_copy.sql /multi_copy.sql
/multi_create_schema.sql
/multi_load_data.sql /multi_load_data.sql
/multi_load_data_superuser.sql /multi_load_data_superuser.sql
/multi_load_large_records.sql
/multi_load_more_data.sql /multi_load_more_data.sql
/multi_mx_copy_data.sql /multi_mx_copy_data.sql
/multi_outer_join.sql /multi_outer_join.sql

View File

@ -63,10 +63,12 @@ ORDER BY 2, 3;
-- here we update shardlength, shardminvalue and shardmaxvalue -- here we update shardlength, shardminvalue and shardmaxvalue
CREATE TABLE test_table_statistics_append (id int); CREATE TABLE test_table_statistics_append (id int);
SELECT create_distributed_table('test_table_statistics_append', 'id', 'append'); SELECT create_distributed_table('test_table_statistics_append', 'id', 'append');
COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH CSV; SELECT master_create_empty_shard('test_table_statistics_append') AS shardid1 \gset
COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH CSV; SELECT master_create_empty_shard('test_table_statistics_append') AS shardid2 \gset
COPY test_table_statistics_append FROM PROGRAM 'echo 0 && echo 1 && echo 2 && echo 3' WITH (format 'csv', append_to_shard :shardid1);
COPY test_table_statistics_append FROM PROGRAM 'echo 4 && echo 5 && echo 6 && echo 7' WITH (format 'csv', append_to_shard :shardid2);
-- originally shardminvalue and shardmaxvalue will be 0,3 and 4, 7 -- shardminvalue and shardmaxvalue are NULL
SELECT SELECT
ds.logicalrelid::regclass::text AS tablename, ds.logicalrelid::regclass::text AS tablename,
ds.shardid AS shardid, ds.shardid AS shardid,

View File

@ -85,7 +85,8 @@ FROM
WHERE WHERE
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
'sensors_2001'::regclass, 'sensors_2002'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass,
'sensors_2003'::regclass, 'sensors_2004'::regclass); 'sensors_2003'::regclass, 'sensors_2004'::regclass)
ORDER BY 1,2;
-- show that all the tables prune to the same shard for the same distribution key -- show that all the tables prune to the same shard for the same distribution key
WITH WITH
@ -98,7 +99,7 @@ WITH
all_shardids AS (SELECT * FROM sensors_shardid UNION SELECT * FROM sensors_2000_shardid UNION all_shardids AS (SELECT * FROM sensors_shardid UNION SELECT * FROM sensors_2000_shardid UNION
SELECT * FROM sensors_2001_shardid UNION SELECT * FROM sensors_2002_shardid SELECT * FROM sensors_2001_shardid UNION SELECT * FROM sensors_2002_shardid
UNION SELECT * FROM sensors_2003_shardid UNION SELECT * FROM sensors_2004_shardid) UNION SELECT * FROM sensors_2003_shardid UNION SELECT * FROM sensors_2004_shardid)
SELECT logicalrelid, shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid IN (SELECT * FROM all_shardids); SELECT logicalrelid, shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid IN (SELECT * FROM all_shardids) ORDER BY 1,2,3,4;
VACUUM ANALYZE sensors, sensors_2000, sensors_2001, sensors_2002, sensors_2003; VACUUM ANALYZE sensors, sensors_2000, sensors_2001, sensors_2002, sensors_2003;
@ -190,7 +191,8 @@ FROM
WHERE WHERE
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
'sensors_2001'::regclass, 'sensors_2002'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass,
'sensors_2003'::regclass, 'sensors_2004'::regclass); 'sensors_2003'::regclass, 'sensors_2004'::regclass)
ORDER BY 1,2;
\c - - - :worker_1_port \c - - - :worker_1_port
SET search_path TO drop_column_partitioned_table; SET search_path TO drop_column_partitioned_table;
@ -201,7 +203,8 @@ FROM
WHERE WHERE
logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass, logicalrelid IN ('sensors'::regclass, 'sensors_2000'::regclass,
'sensors_2001'::regclass, 'sensors_2002'::regclass, 'sensors_2001'::regclass, 'sensors_2002'::regclass,
'sensors_2003'::regclass, 'sensors_2004'::regclass); 'sensors_2003'::regclass, 'sensors_2004'::regclass)
ORDER BY 1,2;
\c - - - :master_port \c - - - :master_port
SET client_min_messages TO WARNING; SET client_min_messages TO WARNING;

View File

@ -67,6 +67,7 @@ CREATE TABLE products_append (
); );
SELECT create_distributed_table('products_append', 'product_no', 'append'); SELECT create_distributed_table('products_append', 'product_no', 'append');
SELECT master_create_empty_shard('products_append') AS shardid \gset
-- Can only add primary key constraint on distribution column (or group -- Can only add primary key constraint on distribution column (or group
-- of columns including distribution column) -- of columns including distribution column)
@ -75,7 +76,7 @@ ALTER TABLE products_append ADD CONSTRAINT p_key_name PRIMARY KEY(name);
ALTER TABLE products_append ADD CONSTRAINT p_key PRIMARY KEY(product_no); ALTER TABLE products_append ADD CONSTRAINT p_key PRIMARY KEY(product_no);
--- Error out since first and third rows have the same product_no --- Error out since first and third rows have the same product_no
\COPY products_append FROM STDIN DELIMITER AS ','; COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
1, Product_1, 10 1, Product_1, 10
2, Product_2, 15 2, Product_2, 15
1, Product_3, 8 1, Product_3, 8
@ -138,6 +139,7 @@ DROP TABLE unique_test_table_ref;
-- Check "UNIQUE CONSTRAINT" with append table -- Check "UNIQUE CONSTRAINT" with append table
CREATE TABLE unique_test_table_append(id int, name varchar(20)); CREATE TABLE unique_test_table_append(id int, name varchar(20));
SELECT create_distributed_table('unique_test_table_append', 'id', 'append'); SELECT create_distributed_table('unique_test_table_append', 'id', 'append');
SELECT master_create_empty_shard('unique_test_table_append') AS shardid \gset
-- Can only add unique constraint on distribution column (or group -- Can only add unique constraint on distribution column (or group
-- of columns including distribution column) -- of columns including distribution column)
@ -146,7 +148,7 @@ ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_name UNIQUE(name);
ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_id UNIQUE(id); ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_id UNIQUE(id);
-- Error out. Table can not have two rows with the same id. -- Error out. Table can not have two rows with the same id.
\COPY unique_test_table_append FROM STDIN DELIMITER AS ','; COPY unique_test_table_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
1, Product_1 1, Product_1
2, Product_2 2, Product_2
1, Product_3 1, Product_3
@ -207,13 +209,14 @@ CREATE TABLE products_append (
); );
SELECT create_distributed_table('products_append', 'product_no', 'append'); SELECT create_distributed_table('products_append', 'product_no', 'append');
SELECT master_create_empty_shard('products_append') AS shardid \gset
-- Can add column and table check constraints -- Can add column and table check constraints
ALTER TABLE products_append ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products_append ADD CONSTRAINT p_check CHECK(price > 0);
ALTER TABLE products_append ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); ALTER TABLE products_append ADD CONSTRAINT p_multi_check CHECK(price > discounted_price);
-- Error out,since the third row conflicting with the p_multi_check -- Error out,since the third row conflicting with the p_multi_check
\COPY products_append FROM STDIN DELIMITER AS ','; COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
1, Product_1, 10, 5 1, Product_1, 10, 5
2, Product_2, 15, 8 2, Product_2, 15, 8
1, Product_3, 8, 10 1, Product_3, 8, 10
@ -277,6 +280,7 @@ CREATE TABLE products_append (
); );
SELECT create_distributed_table('products_append', 'product_no','append'); SELECT create_distributed_table('products_append', 'product_no','append');
SELECT master_create_empty_shard('products_append') AS shardid \gset
-- Can only add exclusion constraint on distribution column (or group of column -- Can only add exclusion constraint on distribution column (or group of column
-- including distribution column) -- including distribution column)
@ -285,7 +289,7 @@ ALTER TABLE products_append ADD CONSTRAINT exc_name EXCLUDE USING btree (name wi
ALTER TABLE products_append ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); ALTER TABLE products_append ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =);
-- Error out since first and third can not pass the exclusion check. -- Error out since first and third can not pass the exclusion check.
\COPY products_append FROM STDIN DELIMITER AS ','; COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
1, Product_1, 10 1, Product_1, 10
1, Product_2, 15 1, Product_2, 15
1, Product_1, 8 1, Product_1, 8
@ -335,11 +339,12 @@ CREATE TABLE products_append (
); );
SELECT create_distributed_table('products_append', 'product_no', 'append'); SELECT create_distributed_table('products_append', 'product_no', 'append');
SELECT master_create_empty_shard('products_append') AS shardid \gset
ALTER TABLE products_append ALTER COLUMN name SET NOT NULL; ALTER TABLE products_append ALTER COLUMN name SET NOT NULL;
-- Error out since name and product_no columns can not handle NULL value. -- Error out since name and product_no columns can not handle NULL value.
\COPY products_append FROM STDIN DELIMITER AS ','; COPY products_append FROM STDIN WITH (DELIMITER ',', append_to_shard :shardid);
1, \N, 10 1, \N, 10
\N, Product_2, 15 \N, Product_2, 15
1, Product_1, 8 1, Product_1, 8

View File

@ -98,6 +98,7 @@ CREATE TABLE customer_append (
c_mktsegment char(10) not null, c_mktsegment char(10) not null,
c_comment varchar(117) not null); c_comment varchar(117) not null);
SELECT create_distributed_table('customer_append', 'c_custkey', 'append'); SELECT create_distributed_table('customer_append', 'c_custkey', 'append');
SELECT master_create_empty_shard('customer_append');
CREATE TABLE nation ( CREATE TABLE nation (
n_nationkey integer not null, n_nationkey integer not null,
@ -130,6 +131,7 @@ CREATE TABLE part_append (
p_retailprice decimal(15,2) not null, p_retailprice decimal(15,2) not null,
p_comment varchar(23) not null); p_comment varchar(23) not null);
SELECT create_distributed_table('part_append', 'p_partkey', 'append'); SELECT create_distributed_table('part_append', 'p_partkey', 'append');
SELECT master_create_empty_shard('part_append');
CREATE TABLE supplier CREATE TABLE supplier
( (

View File

@ -4,24 +4,35 @@
-- This test checks that we can handle null min/max values in shard statistics -- This test checks that we can handle null min/max values in shard statistics
-- and that we don't partition or join prune shards that have null values. -- and that we don't partition or join prune shards that have null values.
CREATE SCHEMA multi_null_minmax_value_pruning;
SET search_path TO multi_null_minmax_value_pruning;
SET client_min_messages TO DEBUG2;
SET citus.explain_all_tasks TO on; SET citus.explain_all_tasks TO on;
-- to avoid differing explain output - executor doesn't matter,
-- because were testing pruning here.
-- Change configuration to treat lineitem and orders tables as large
SET citus.log_multi_join_order to true; SET citus.log_multi_join_order to true;
SET citus.enable_repartition_joins to ON; SET citus.enable_repartition_joins to ON;
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; SET citus.next_shard_id = 290000;
SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001;
CREATE TABLE lineitem (LIKE public.lineitem);
SELECT create_distributed_table('lineitem', 'l_orderkey', 'range');
SELECT master_create_empty_shard('lineitem') as lineitem_shardid1 \gset
SELECT master_create_empty_shard('lineitem') as lineitem_shardid2 \gset
CREATE TABLE orders (LIKE public.orders);
SELECT create_distributed_table('orders', 'o_orderkey', 'range');
SELECT master_create_empty_shard('orders') as orders_shardid1 \gset
SELECT master_create_empty_shard('orders') as orders_shardid2 \gset
SET client_min_messages TO DEBUG2;
UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '6000' WHERE shardid = :lineitem_shardid1 OR shardid = :orders_shardid1;
UPDATE pg_dist_shard SET shardminvalue = '6001', shardmaxvalue = '20000' WHERE shardid = :lineitem_shardid2 OR shardid = :orders_shardid2;
UPDATE pg_dist_partition SET colocationid = 87091 WHERE logicalrelid = 'orders'::regclass OR logicalrelid = 'lineitem'::regclass;
-- Check that partition and join pruning works when min/max values exist -- Check that partition and join pruning works when min/max values exist
-- Adding l_orderkey = 1 to make the query not router executable -- Adding l_orderkey = 1 to make the query not router executable
SELECT coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1;
$Q$); $Q$);
@ -34,9 +45,9 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
-- partition or join pruning for the shard with null min value. Since it is not -- partition or join pruning for the shard with null min value. Since it is not
-- supported with single-repartition join, dual-repartition has been used. -- supported with single-repartition join, dual-repartition has been used.
UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = :lineitem_shardid1;
SELECT coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
$Q$); $Q$);
@ -49,9 +60,9 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
-- don't apply partition or join pruning for this other shard either. Since it -- don't apply partition or join pruning for this other shard either. Since it
-- is not supported with single-repartition join, dual-repartition has been used. -- is not supported with single-repartition join, dual-repartition has been used.
UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = :lineitem_shardid2;
SELECT coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
$Q$); $Q$);
@ -64,9 +75,9 @@ SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
-- should apply partition and join pruning for this shard now. Since it is not -- should apply partition and join pruning for this shard now. Since it is not
-- supported with single-repartition join, dual-repartition has been used. -- supported with single-repartition join, dual-repartition has been used.
UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = :lineitem_shardid1;
SELECT coordinator_plan($Q$ SELECT public.coordinator_plan($Q$
EXPLAIN (COSTS FALSE) EXPLAIN (COSTS FALSE)
SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030;
$Q$); $Q$);
@ -75,9 +86,5 @@ EXPLAIN (COSTS FALSE)
SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders
WHERE l_partkey = o_custkey; WHERE l_partkey = o_custkey;
-- Set minimum and maximum values for two shards back to their original values RESET client_min_messages;
DROP SCHEMA multi_null_minmax_value_pruning CASCADE;
UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000;
UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid = 290001;
SET client_min_messages TO NOTICE;

View File

@ -52,14 +52,14 @@ FROM
orders, customer_append orders, customer_append
WHERE WHERE
o_custkey = c_custkey AND o_custkey = c_custkey AND
c_custkey < 0; c_custkey < 0 AND c_custkey > 0;
SELECT SELECT
count(*) count(*)
FROM FROM
orders, customer_append orders, customer_append
WHERE WHERE
o_custkey = c_custkey AND o_custkey = c_custkey AND
c_custkey < 0; c_custkey < 0 AND c_custkey > 0;
-- Dual hash-repartition join test case. Note that this query doesn't produce -- Dual hash-repartition join test case. Note that this query doesn't produce
-- meaningful results and is only to test hash-partitioning of two large tables -- meaningful results and is only to test hash-partitioning of two large tables

View File

@ -82,9 +82,10 @@ CREATE TABLE nation_append_search_path(
n_regionkey integer not null, n_regionkey integer not null,
n_comment varchar(152) n_comment varchar(152)
); );
SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); SELECT create_distributed_table('nation_append_search_path', 'n_nationkey', 'append');
SELECT master_create_empty_shard('nation_append_search_path') AS shardid \gset
\copy nation_append_search_path FROM STDIN with delimiter '|'; copy nation_append_search_path FROM STDIN with (delimiter '|', append_to_shard :shardid);
0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai
1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon
2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special

View File

@ -1,70 +0,0 @@
--
-- NON_COLOCATED_JOIN_ORDER
--
-- Tests to check placements of shards must be equal to choose local join logic.
CREATE TABLE test_table_1(id int, value_1 int);
SELECT master_create_distributed_table('test_table_1', 'id', 'append');
\copy test_table_1 FROM STDIN DELIMITER ','
1,2
2,3
3,4
\.
\copy test_table_1 FROM STDIN DELIMITER ','
5,2
6,3
7,4
\.
CREATE TABLE test_table_2(id int, value_1 int);
SELECT master_create_distributed_table('test_table_2', 'id', 'append');
\copy test_table_2 FROM STDIN DELIMITER ','
1,2
2,3
3,4
\.
\copy test_table_2 FROM STDIN DELIMITER ','
5,2
6,3
7,4
\.
SET citus.log_multi_join_order to TRUE;
SET client_min_messages to DEBUG1;
SET citus.enable_repartition_joins TO on;
-- when joining append tables we always get dual re-partition joins
SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id;
-- Add two shards placement of interval [8,10] to test_table_1
SET citus.shard_replication_factor to 2;
\copy test_table_1 FROM STDIN DELIMITER ','
8,2
9,3
10,4
\.
-- Add two shards placement of interval [8,10] to test_table_2
SET citus.shard_replication_factor to 1;
\copy test_table_2 FROM STDIN DELIMITER ','
8,2
9,3
10,4
\.
-- Although shard interval of relation are same, since they have different amount of placements
-- for interval [8,10] repartition join logic will be triggered.
SET citus.enable_repartition_joins to ON;
SELECT count(*) FROM test_table_1, test_table_2 WHERE test_table_1.id = test_table_2.id;
SET client_min_messages TO default;
DROP TABLE test_table_1;
DROP TABLE test_table_2;

View File

@ -365,6 +365,13 @@ BEGIN;
INSERT INTO test SELECT i,i FROM generate_series(0,100)i; INSERT INTO test SELECT i,i FROM generate_series(0,100)i;
ROLLBACK; ROLLBACK;
-- master_create_empty_shard on coordinator
BEGIN;
CREATE TABLE append_table (a INT, b INT);
SELECT create_distributed_table('append_table','a','append');
SELECT master_create_empty_shard('append_table');
END;
-- alter table inside a tx block -- alter table inside a tx block
BEGIN; BEGIN;
ALTER TABLE test ADD COLUMN z single_node.new_type; ALTER TABLE test ADD COLUMN z single_node.new_type;

View File

@ -5,14 +5,15 @@ CREATE TABLE append_table (key text, value int, extra int default 0);
CREATE INDEX ON append_table (key); CREATE INDEX ON append_table (key);
SELECT create_distributed_table('append_table', 'key', 'append'); SELECT create_distributed_table('append_table', 'key', 'append');
SELECT 1 FROM master_create_empty_shard('append_table'); SELECT master_create_empty_shard('append_table') AS shardid1 \gset
SELECT 1 FROM master_create_empty_shard('append_table'); SELECT master_create_empty_shard('append_table') AS shardid2 \gset
SELECT master_create_empty_shard('append_table') AS shardid3 \gset
CREATE TABLE ref_table (value int); CREATE TABLE ref_table (value int);
CREATE INDEX ON ref_table (value); CREATE INDEX ON ref_table (value);
SELECT create_reference_table('ref_table'); SELECT create_reference_table('ref_table');
\COPY append_table (key,value) FROM STDIN WITH CSV COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid1);
abc,234 abc,234
bcd,123 bcd,123
bcd,234 bcd,234
@ -21,7 +22,7 @@ def,456
efg,234 efg,234
\. \.
\COPY append_table (key,value) FROM STDIN WITH CSV COPY append_table (key,value) FROM STDIN WITH (format 'csv', append_to_shard :shardid2);
abc,123 abc,123
efg,123 efg,123
hij,123 hij,123
@ -30,7 +31,7 @@ ijk,1
jkl,0 jkl,0
\. \.
\COPY ref_table FROM STDIN WITH CSV COPY ref_table FROM STDIN WITH CSV;
123 123
234 234
345 345

View File

@ -99,22 +99,24 @@ INSERT INTO t3 VALUES (3, 33);
SELECT * FROM t3 ORDER BY a; SELECT * FROM t3 ORDER BY a;
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
WHERE logicalrelid = 't_append'::regclass WHERE logicalrelid = 't_range'::regclass
ORDER BY shardminvalue, shardmaxvalue; ORDER BY shardminvalue, shardmaxvalue;
SELECT * FROM t_append ORDER BY id; SELECT * FROM t_range ORDER BY id;
\copy t_append FROM STDIN DELIMITER ',' SELECT master_create_empty_shard('t_range') AS new_shard_id \gset
UPDATE pg_dist_shard SET shardminvalue = '9', shardmaxvalue = '11' WHERE shardid = :new_shard_id;
\copy t_range FROM STDIN with (DELIMITER ',')
9,2 9,2
10,3 10,3
11,4 11,4
\. \.
SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard
WHERE logicalrelid = 't_append'::regclass WHERE logicalrelid = 't_range'::regclass
ORDER BY shardminvalue, shardmaxvalue; ORDER BY shardminvalue, shardmaxvalue;
SELECT * FROM t_append ORDER BY id; SELECT * FROM t_range ORDER BY id;
ROLLBACK; ROLLBACK;

View File

@ -50,16 +50,20 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex
RETURNS void RETURNS void
AS 'citus', $$master_create_worker_shards$$ AS 'citus', $$master_create_worker_shards$$
LANGUAGE C STRICT; LANGUAGE C STRICT;
CREATE TABLE t_append(id int, value_1 int); CREATE TABLE t_range(id int, value_1 int);
SELECT master_create_distributed_table('t_append', 'id', 'append'); SELECT create_distributed_table('t_range', 'id', 'range');
SELECT master_create_empty_shard('t_range') as shardid1 \gset
SELECT master_create_empty_shard('t_range') as shardid2 \gset
UPDATE pg_dist_shard SET shardminvalue = '1', shardmaxvalue = '3' WHERE shardid = :shardid1;
UPDATE pg_dist_shard SET shardminvalue = '5', shardmaxvalue = '7' WHERE shardid = :shardid2;
\copy t_append FROM STDIN DELIMITER ',' \copy t_range FROM STDIN with (DELIMITER ',')
1,2 1,2
2,3 2,3
3,4 3,4
\. \.
\copy t_append FROM STDIN DELIMITER ',' \copy t_range FROM STDIN with (DELIMITER ',')
5,2 5,2
6,3 6,3
7,4 7,4