From 0e12d045b12a65190363207fc525a17f68c1a218 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Fri, 12 Jun 2020 15:02:51 +0200 Subject: [PATCH] Support use of binary protocol in between nodes (#3877) This can save a lot of data to be sent in some cases, thus improving performance for which inter query bandwidth is the bottleneck. There's some issues with enabling this as default, so that's currently not done. --- src/backend/distributed/commands/multi_copy.c | 32 +- .../distributed/connection/remote_commands.c | 4 +- .../distributed/executor/adaptive_executor.c | 349 ++++++++++++-- .../distributed_intermediate_results.c | 4 +- .../master/citus_create_restore_point.c | 2 +- src/backend/distributed/shared_library_init.c | 11 + .../transaction/worker_transaction.c | 2 +- src/include/distributed/adaptive_executor.h | 1 + src/include/distributed/remote_commands.h | 3 +- src/test/regress/bin/normalize.sed | 3 + src/test/regress/expected/binary_protocol.out | 169 +++++++ .../expected/isolation_select_vs_all.out | 446 +++++++++++++++--- .../isolation_select_vs_all_on_mx.out | 9 +- .../multi_mx_function_call_delegation.out | 11 + .../regress/spec/isolation_select_vs_all.spec | 11 +- .../spec/isolation_select_vs_all_on_mx.spec | 7 +- src/test/regress/sql/binary_protocol.sql | 84 ++++ .../sql/multi_mx_function_call_delegation.sql | 10 + 18 files changed, 1025 insertions(+), 133 deletions(-) create mode 100644 src/test/regress/expected/binary_protocol.out create mode 100644 src/test/regress/sql/binary_protocol.sql diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 6e871cd47..00f811824 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -217,6 +217,7 @@ static void OpenCopyConnectionsForNewShards(CopyStmt *copyStatement, bool useBinaryCopyFormat); static List * RemoveOptionFromList(List *optionList, char *optionName); static bool BinaryOutputFunctionDefined(Oid typeId); +static bool BinaryInputFunctionDefined(Oid typeId); static void SendCopyBinaryHeaders(CopyOutState copyOutState, int64 shardId, List *connectionList); static void SendCopyBinaryFooters(CopyOutState copyOutState, int64 shardId, @@ -952,6 +953,11 @@ CanUseBinaryCopyFormatForType(Oid typeId) return false; } + if (!BinaryInputFunctionDefined(typeId)) + { + return false; + } + if (typeId >= FirstNormalObjectId) { char typeCategory = '\0'; @@ -986,12 +992,28 @@ BinaryOutputFunctionDefined(Oid typeId) get_type_io_data(typeId, IOFunc_send, &typeLength, &typeByVal, &typeAlign, &typeDelim, &typeIoParam, &typeFunctionId); - if (OidIsValid(typeFunctionId)) - { - return true; - } + return OidIsValid(typeFunctionId); +} - return false; + +/* + * BinaryInputFunctionDefined checks whether binary output function is defined + * for the given type. + */ +static bool +BinaryInputFunctionDefined(Oid typeId) +{ + Oid typeFunctionId = InvalidOid; + Oid typeIoParam = InvalidOid; + int16 typeLength = 0; + bool typeByVal = false; + char typeAlign = 0; + char typeDelim = 0; + + get_type_io_data(typeId, IOFunc_receive, &typeLength, &typeByVal, + &typeAlign, &typeDelim, &typeIoParam, &typeFunctionId); + + return OidIsValid(typeFunctionId); } diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index e9e0570b3..84d3d4838 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -474,7 +474,7 @@ ExecuteOptionalRemoteCommand(MultiConnection *connection, const char *command, int SendRemoteCommandParams(MultiConnection *connection, const char *command, int parameterCount, const Oid *parameterTypes, - const char *const *parameterValues) + const char *const *parameterValues, bool binaryResults) { PGconn *pgConn = connection->pgConn; @@ -492,7 +492,7 @@ SendRemoteCommandParams(MultiConnection *connection, const char *command, Assert(PQisnonblocking(pgConn)); int rc = PQsendQueryParams(pgConn, command, parameterCount, parameterTypes, - parameterValues, NULL, NULL, 0); + parameterValues, NULL, NULL, binaryResults ? 1 : 0); return rc; } diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c index a5467a93b..99b8edd01 100644 --- a/src/backend/distributed/executor/adaptive_executor.c +++ b/src/backend/distributed/executor/adaptive_executor.c @@ -129,6 +129,7 @@ #include "access/transam.h" #include "access/xact.h" +#include "access/htup_details.h" #include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "commands/schemacmds.h" @@ -137,6 +138,7 @@ #include "distributed/citus_custom_scan.h" #include "distributed/citus_safe_lib.h" #include "distributed/connection_management.h" +#include "distributed/commands/multi_copy.h" #include "distributed/deparse_shard_query.h" #include "distributed/distributed_execution_locks.h" #include "distributed/listutils.h" @@ -163,9 +165,11 @@ #include "portability/instr_time.h" #include "storage/fd.h" #include "storage/latch.h" +#include "utils/builtins.h" #include "utils/int8.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/syscache.h" #include "utils/timestamp.h" #define SLOW_START_DISABLED 0 @@ -277,12 +281,13 @@ typedef struct DistributedExecution * The following fields are used while receiving results from remote nodes. * We store this information here to avoid re-allocating it every time. * - * columnArray field is reset/calculated per row, so might be useless for other - * contexts. The benefit of keeping it here is to avoid allocating the array - * over and over again. + * columnArray field is reset/calculated per row, so might be useless for + * other contexts. The benefit of keeping it here is to avoid allocating + * the array over and over again. */ uint32 allocatedColumnCount; - char **columnArray; + void **columnArray; + StringInfoData *stringInfoDataArray; /* * jobIdList contains all jobs in the job tree, this is used to @@ -437,6 +442,7 @@ struct TaskPlacementExecution; /* GUC, determining whether Citus opens 1 connection per task */ bool ForceMaxQueryParallelization = false; int MaxAdaptiveExecutorPoolSize = 16; +bool EnableBinaryProtocol = true; /* GUC, number of ms to wait between opening connections to the same worker */ int ExecutorSlowStartInterval = 10; @@ -478,6 +484,10 @@ typedef struct ShardCommandExecution /* cached AttInMetadata for task */ AttInMetadata **attributeInputMetadata; + /* indicates whether the attributeInputMetadata has binary or text + * encoding/decoding functions */ + bool binaryResults; + /* order in which the command should be replicated on replicas */ PlacementExecutionOrder executionOrder; @@ -632,6 +642,10 @@ static int RebuildWaitEventSet(DistributedExecution *execution); static void ProcessWaitEvents(DistributedExecution *execution, WaitEvent *events, int eventCount, bool *cancellationReceived); static long MillisecondsBetweenTimestamps(instr_time startTime, instr_time endTime); +static HeapTuple BuildTupleFromBytes(AttInMetadata *attinmeta, fmStringInfo *values); +static AttInMetadata * TupleDescGetAttBinaryInMetadata(TupleDesc tupdesc); +static void SetAttributeInputMetadata(DistributedExecution *execution, + ShardCommandExecution *shardCommandExecution); /* * AdaptiveExecutorPreExecutorRun gets called right before postgres starts its executor @@ -1089,7 +1103,28 @@ CreateDistributedExecution(RowModifyLevel modLevel, List *taskList, * allocate for. We start with 16, and reallocate when we need more. */ execution->allocatedColumnCount = 16; - execution->columnArray = palloc0(execution->allocatedColumnCount * sizeof(char *)); + execution->columnArray = palloc0(execution->allocatedColumnCount * sizeof(void *)); + if (EnableBinaryProtocol) + { + /* + * Initialize enough StringInfos for each column. These StringInfos + * (and thus the backing buffers) will be reused for each row. + * We will reference these StringInfos in the columnArray if the value + * is not NULL. + * + * NOTE: StringInfos are always grown in the memory context in which + * they were initially created. So appending in any memory context will + * result in bufferes that are still valid after removing that memory + * context. + */ + execution->stringInfoDataArray = palloc0( + execution->allocatedColumnCount * + sizeof(StringInfoData)); + for (int i = 0; i < execution->allocatedColumnCount; i++) + { + initStringInfo(&execution->stringInfoDataArray[i]); + } + } if (ShouldExecuteTasksLocally(taskList)) { @@ -1735,23 +1770,7 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution) sizeof(TaskPlacementExecution *)); shardCommandExecution->placementExecutionCount = placementExecutionCount; - TupleDestination *tupleDest = task->tupleDest ? - task->tupleDest : - execution->defaultTupleDest; - uint32 queryCount = task->queryCount; - shardCommandExecution->attributeInputMetadata = palloc0(queryCount * - sizeof(AttInMetadata *)); - - for (uint32 queryIndex = 0; queryIndex < queryCount; queryIndex++) - { - TupleDesc tupleDescriptor = tupleDest->tupleDescForQuery(tupleDest, - queryIndex); - shardCommandExecution->attributeInputMetadata[queryIndex] = - tupleDescriptor ? - TupleDescGetAttInMetadata(tupleDescriptor) : - NULL; - } - + SetAttributeInputMetadata(execution, shardCommandExecution); ShardPlacement *taskPlacement = NULL; foreach_ptr(taskPlacement, task->taskPlacementList) { @@ -1895,6 +1914,53 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution) } +/* + * SetAttributeInputMetadata sets attributeInputMetadata in + * shardCommandExecution for all the queries that are part of its task. + * This contains the deserialization functions for the tuples that will be + * received. It also sets binaryResults when applicable. + */ +static void +SetAttributeInputMetadata(DistributedExecution *execution, + ShardCommandExecution *shardCommandExecution) +{ + TupleDestination *tupleDest = shardCommandExecution->task->tupleDest ? + shardCommandExecution->task->tupleDest : + execution->defaultTupleDest; + uint32 queryCount = shardCommandExecution->task->queryCount; + shardCommandExecution->attributeInputMetadata = palloc0(queryCount * + sizeof(AttInMetadata *)); + + for (uint32 queryIndex = 0; queryIndex < queryCount; queryIndex++) + { + AttInMetadata *attInMetadata = NULL; + TupleDesc tupleDescriptor = tupleDest->tupleDescForQuery(tupleDest, + queryIndex); + if (tupleDescriptor == NULL) + { + attInMetadata = NULL; + } + /* + * We only allow binary results when queryCount is 1, because we + * cannot use binary results with SendRemoteCommand. Which must be + * used if queryCount is larger than 1. + */ + else if (EnableBinaryProtocol && queryCount == 1 && + CanUseBinaryCopyFormat(tupleDescriptor)) + { + attInMetadata = TupleDescGetAttBinaryInMetadata(tupleDescriptor); + shardCommandExecution->binaryResults = true; + } + else + { + attInMetadata = TupleDescGetAttInMetadata(tupleDescriptor); + } + + shardCommandExecution->attributeInputMetadata[queryIndex] = attInMetadata; + } +} + + /* * UseConnectionPerPlacement returns whether we should use a separate connection * per placement even if another connection is idle. We mostly use this in testing @@ -3382,6 +3448,7 @@ StartPlacementExecutionOnSession(TaskPlacementExecution *placementExecution, MultiConnection *connection = session->connection; ShardCommandExecution *shardCommandExecution = placementExecution->shardCommandExecution; + bool binaryResults = shardCommandExecution->binaryResults; Task *task = shardCommandExecution->task; ShardPlacement *taskPlacement = placementExecution->shardPlacement; List *placementAccessList = PlacementAccessListForTask(task, taskPlacement); @@ -3428,11 +3495,32 @@ StartPlacementExecutionOnSession(TaskPlacementExecution *placementExecution, ExtractParametersForRemoteExecution(paramListInfo, ¶meterTypes, ¶meterValues); querySent = SendRemoteCommandParams(connection, queryString, parameterCount, - parameterTypes, parameterValues); + parameterTypes, parameterValues, + binaryResults); } else { - querySent = SendRemoteCommand(connection, queryString); + /* + * We only need to use SendRemoteCommandParams when we desire + * binaryResults. One downside of SendRemoteCommandParams is that it + * only supports one query in the query string. In some cases we have + * more than one query. In those cases we already make sure before that + * binaryResults is false. + * + * XXX: It also seems that SendRemoteCommandParams does something + * strange/incorrectly with select statements. In + * isolation_select_vs_all.spec, when doing an s1-router-select in one + * session blocked an s2-ddl-create-index-concurrently in another. + */ + if (!binaryResults) + { + querySent = SendRemoteCommand(connection, queryString); + } + else + { + querySent = SendRemoteCommandParams(connection, queryString, 0, NULL, NULL, + binaryResults); + } } if (querySent == 0) @@ -3478,11 +3566,11 @@ ReceiveResults(WorkerSession *session, bool storeRows) * into tuple. The context is reseted on every row, thus we create it at the * start of the loop and reset on every iteration. */ - MemoryContext ioContext = AllocSetContextCreate(CurrentMemoryContext, - "IoContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + MemoryContext rowContext = AllocSetContextCreate(CurrentMemoryContext, + "RowContext", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); while (!PQisBusy(connection->pgConn)) { @@ -3567,16 +3655,48 @@ ReceiveResults(WorkerSession *session, bool storeRows) if (columnCount > execution->allocatedColumnCount) { pfree(execution->columnArray); + int oldColumnCount = execution->allocatedColumnCount; execution->allocatedColumnCount = columnCount; execution->columnArray = palloc0(execution->allocatedColumnCount * - sizeof(char *)); + sizeof(void *)); + if (EnableBinaryProtocol) + { + /* + * Using repalloc here, to not throw away any previously + * created StringInfos. + */ + execution->stringInfoDataArray = repalloc( + execution->stringInfoDataArray, + execution->allocatedColumnCount * + sizeof(StringInfoData)); + for (int i = oldColumnCount; i < columnCount; i++) + { + initStringInfo(&execution->stringInfoDataArray[i]); + } + } } - char **columnArray = execution->columnArray; + void **columnArray = execution->columnArray; + StringInfoData *stringInfoDataArray = execution->stringInfoDataArray; + bool binaryResults = shardCommandExecution->binaryResults; + + /* + * stringInfoDataArray is NULL when EnableBinaryProtocol is false. So + * we make sure binaryResults is also false in that case. Otherwise we + * cannot store them anywhere. + */ + Assert(EnableBinaryProtocol || !binaryResults); for (uint32 rowIndex = 0; rowIndex < rowsProcessed; rowIndex++) { - memset(columnArray, 0, columnCount * sizeof(char *)); + /* + * Switch to a temporary memory context that we reset after each + * tuple. This protects us from any memory leaks that might be + * present in anything we do to parse a tuple. + */ + MemoryContext oldContext = MemoryContextSwitchTo(rowContext); + + memset(columnArray, 0, columnCount * sizeof(void *)); for (columnIndex = 0; columnIndex < columnCount; columnIndex++) { @@ -3586,34 +3706,55 @@ ReceiveResults(WorkerSession *session, bool storeRows) } else { - columnArray[columnIndex] = PQgetvalue(result, rowIndex, columnIndex); + int valueLength = PQgetlength(result, rowIndex, columnIndex); + char *value = PQgetvalue(result, rowIndex, columnIndex); + if (binaryResults) + { + if (PQfformat(result, columnIndex) == 0) + { + ereport(ERROR, (errmsg("unexpected text result"))); + } + resetStringInfo(&stringInfoDataArray[columnIndex]); + appendBinaryStringInfo(&stringInfoDataArray[columnIndex], + value, valueLength); + columnArray[columnIndex] = &stringInfoDataArray[columnIndex]; + } + else + { + if (PQfformat(result, columnIndex) == 1) + { + ereport(ERROR, (errmsg("unexpected binary result"))); + } + columnArray[columnIndex] = value; + } if (SubPlanLevel > 0 && executionStats != NULL) { - executionStats->totalIntermediateResultSize += PQgetlength(result, - rowIndex, - columnIndex); + executionStats->totalIntermediateResultSize += valueLength; } } } - /* - * Switch to a temporary memory context that we reset after each tuple. This - * protects us from any memory leaks that might be present in I/O functions - * called by BuildTupleFromCStrings. - */ - MemoryContext oldContextPerRow = MemoryContextSwitchTo(ioContext); - AttInMetadata *attInMetadata = shardCommandExecution->attributeInputMetadata[queryIndex]; - HeapTuple heapTuple = BuildTupleFromCStrings(attInMetadata, columnArray); + HeapTuple heapTuple; + if (binaryResults) + { + heapTuple = BuildTupleFromBytes(attInMetadata, + (fmStringInfo *) columnArray); + } + else + { + heapTuple = BuildTupleFromCStrings(attInMetadata, + (char **) columnArray); + } - MemoryContextSwitchTo(oldContextPerRow); + MemoryContextSwitchTo(oldContext); tupleDest->putTuple(tupleDest, task, placementExecution->placementExecutionIndex, queryIndex, heapTuple); - MemoryContextReset(ioContext); + MemoryContextReset(rowContext); execution->rowsProcessed++; } @@ -3627,12 +3768,126 @@ ReceiveResults(WorkerSession *session, bool storeRows) } /* the context is local to the function, so not needed anymore */ - MemoryContextDelete(ioContext); + MemoryContextDelete(rowContext); return fetchDone; } +/* + * TupleDescGetAttBinaryInMetadata - Build an AttInMetadata structure based on + * the supplied TupleDesc. AttInMetadata can be used in conjunction with + * fmStringInfos containing binary encoded types to produce a properly formed + * tuple. + * + * NOTE: This function is a copy of the PG function TupleDescGetAttInMetadata, + * except that it uses getTypeBinaryInputInfo instead of getTypeInputInfo. + */ +static AttInMetadata * +TupleDescGetAttBinaryInMetadata(TupleDesc tupdesc) +{ + int natts = tupdesc->natts; + int i; + Oid atttypeid; + Oid attinfuncid; + + AttInMetadata *attinmeta = (AttInMetadata *) palloc(sizeof(AttInMetadata)); + + /* "Bless" the tupledesc so that we can make rowtype datums with it */ + attinmeta->tupdesc = BlessTupleDesc(tupdesc); + + /* + * Gather info needed later to call the "in" function for each attribute + */ + FmgrInfo *attinfuncinfo = (FmgrInfo *) palloc0(natts * sizeof(FmgrInfo)); + Oid *attioparams = (Oid *) palloc0(natts * sizeof(Oid)); + int32 *atttypmods = (int32 *) palloc0(natts * sizeof(int32)); + + for (i = 0; i < natts; i++) + { + Form_pg_attribute att = TupleDescAttr(tupdesc, i); + + /* Ignore dropped attributes */ + if (!att->attisdropped) + { + atttypeid = att->atttypid; + getTypeBinaryInputInfo(atttypeid, &attinfuncid, &attioparams[i]); + fmgr_info(attinfuncid, &attinfuncinfo[i]); + atttypmods[i] = att->atttypmod; + } + } + attinmeta->attinfuncs = attinfuncinfo; + attinmeta->attioparams = attioparams; + attinmeta->atttypmods = atttypmods; + + return attinmeta; +} + + +/* + * BuildTupleFromBytes - build a HeapTuple given user data in binary form. + * values is an array of StringInfos, one for each attribute of the return + * tuple. A NULL StringInfo pointer indicates we want to create a NULL field. + * + * NOTE: This function is a copy of the PG function BuildTupleFromCStrings, + * except that it uses ReceiveFunctionCall instead of InputFunctionCall. + */ +static HeapTuple +BuildTupleFromBytes(AttInMetadata *attinmeta, fmStringInfo *values) +{ + TupleDesc tupdesc = attinmeta->tupdesc; + int natts = tupdesc->natts; + int i; + + Datum *dvalues = (Datum *) palloc(natts * sizeof(Datum)); + bool *nulls = (bool *) palloc(natts * sizeof(bool)); + + /* + * Call the "in" function for each non-dropped attribute, even for nulls, + * to support domains. + */ + for (i = 0; i < natts; i++) + { + if (!TupleDescAttr(tupdesc, i)->attisdropped) + { + /* Non-dropped attributes */ + dvalues[i] = ReceiveFunctionCall(&attinmeta->attinfuncs[i], + values[i], + attinmeta->attioparams[i], + attinmeta->atttypmods[i]); + if (values[i] != NULL) + { + nulls[i] = false; + } + else + { + nulls[i] = true; + } + } + else + { + /* Handle dropped attributes by setting to NULL */ + dvalues[i] = (Datum) 0; + nulls[i] = true; + } + } + + /* + * Form a tuple + */ + HeapTuple tuple = heap_form_tuple(tupdesc, dvalues, nulls); + + /* + * Release locally palloc'd space. XXX would probably be good to pfree + * values of pass-by-reference datums, as well. + */ + pfree(dvalues); + pfree(nulls); + + return tuple; +} + + /* * WorkerPoolFailed marks a worker pool and all the placement executions scheduled * on it as failed. diff --git a/src/backend/distributed/executor/distributed_intermediate_results.c b/src/backend/distributed/executor/distributed_intermediate_results.c index adeac51c7..84d836919 100644 --- a/src/backend/distributed/executor/distributed_intermediate_results.c +++ b/src/backend/distributed/executor/distributed_intermediate_results.c @@ -217,7 +217,7 @@ WrapTasksForPartitioning(const char *resultIdPrefix, List *selectTaskList, { StringInfo wrappedQuery = makeStringInfo(); appendStringInfo(wrappedQuery, - "SELECT %u, partition_index" + "SELECT %u::int, partition_index" ", %s || '_' || partition_index::text " ", rows_written " "FROM worker_partition_query_result" @@ -334,7 +334,7 @@ ExecutePartitionTaskList(List *taskList, CitusTableCacheEntry *targetRelation) #endif TupleDescInitEntry(resultDescriptor, (AttrNumber) 1, "node_id", - INT8OID, -1, 0); + INT4OID, -1, 0); TupleDescInitEntry(resultDescriptor, (AttrNumber) 2, "partition_index", INT4OID, -1, 0); TupleDescInitEntry(resultDescriptor, (AttrNumber) 3, "result_id", diff --git a/src/backend/distributed/master/citus_create_restore_point.c b/src/backend/distributed/master/citus_create_restore_point.c index 78d6f9e32..4b3130b93 100644 --- a/src/backend/distributed/master/citus_create_restore_point.c +++ b/src/backend/distributed/master/citus_create_restore_point.c @@ -166,7 +166,7 @@ CreateRemoteRestorePoints(char *restoreName, List *connectionList) { int querySent = SendRemoteCommandParams(connection, CREATE_RESTORE_POINT_COMMAND, parameterCount, parameterTypes, - parameterValues); + parameterValues, false); if (querySent == 0) { ReportConnectionError(connection, ERROR); diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 22a5cab39..b353d21a6 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -543,6 +543,17 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL, NULL, NULL, NULL); + DefineCustomBoolVariable( + "citus.enable_binary_protocol", + gettext_noop( + "Enables communication between nodes using binary protocol when possible"), + NULL, + &EnableBinaryProtocol, + false, + PGC_USERSET, + GUC_STANDARD, + NULL, NULL, NULL); + DefineCustomBoolVariable( "citus.override_table_visibility", gettext_noop("Enables replacing occurencens of pg_catalog.pg_table_visible() " diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index 2317c68b3..bd0db1d8e 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -504,7 +504,7 @@ SendCommandToWorkersParamsInternal(TargetWorkerSet targetWorkerSet, const char * foreach_ptr(connection, connectionList) { int querySent = SendRemoteCommandParams(connection, command, parameterCount, - parameterTypes, parameterValues); + parameterTypes, parameterValues, false); if (querySent == 0) { ReportConnectionError(connection, ERROR); diff --git a/src/include/distributed/adaptive_executor.h b/src/include/distributed/adaptive_executor.h index 55fe0726c..f12058284 100644 --- a/src/include/distributed/adaptive_executor.h +++ b/src/include/distributed/adaptive_executor.h @@ -6,6 +6,7 @@ /* GUC, determining whether Citus opens 1 connection per task */ extern bool ForceMaxQueryParallelization; extern int MaxAdaptiveExecutorPoolSize; +extern bool EnableBinaryProtocol; /* GUC, number of ms to wait between opening connections to the same worker */ extern int ExecutorSlowStartInterval; diff --git a/src/include/distributed/remote_commands.h b/src/include/distributed/remote_commands.h index 7c7a3a411..f7be3fc5a 100644 --- a/src/include/distributed/remote_commands.h +++ b/src/include/distributed/remote_commands.h @@ -48,7 +48,8 @@ extern int ExecuteOptionalRemoteCommand(MultiConnection *connection, extern int SendRemoteCommand(MultiConnection *connection, const char *command); extern int SendRemoteCommandParams(MultiConnection *connection, const char *command, int parameterCount, const Oid *parameterTypes, - const char *const *parameterValues); + const char *const *parameterValues, + bool binaryResults); extern List * ReadFirstColumnAsText(PGresult *queryResult); extern PGresult * GetRemoteCommandResult(MultiConnection *connection, bool raiseInterrupts); diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index fe1783460..4d5a5f611 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -123,3 +123,6 @@ s/Citus.*currently supports/Citus currently supports/g # Warnings in multi_explain s/prepared transaction with identifier .* does not exist/prepared transaction with identifier "citus_x_yyyyyy_zzz_w" does not exist/g s/failed to roll back prepared transaction '.*'/failed to roll back prepared transaction 'citus_x_yyyyyy_zzz_w'/g + +# Errors with binary decoding where OIDs should be normalized +s/wrong data type: [0-9]+, expected [0-9]+/wrong data type: XXXX, expected XXXX/g diff --git a/src/test/regress/expected/binary_protocol.out b/src/test/regress/expected/binary_protocol.out new file mode 100644 index 000000000..e12de5f07 --- /dev/null +++ b/src/test/regress/expected/binary_protocol.out @@ -0,0 +1,169 @@ +SET citus.shard_count = 4; +SET citus.next_shard_id TO 4754000; +CREATE SCHEMA binary_protocol; +SET search_path TO binary_protocol; +SET citus.enable_binary_protocol = TRUE; +CREATE TABLE t(id int); +SELECT create_distributed_table('t', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO t (SELECT i FROM generate_series(1, 10) i); +SELECT * FROM t ORDER BY id; + id +--------------------------------------------------------------------- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +-- Select more than 16 columns to trigger growing of columns +SELECT id, id, id, id, id, + id, id, id, id, id, + id, id, id, id, id, + id, id, id, id, id, + id, id, id, id, id, + id, id, id, id, id + FROM t ORDER BY id; + id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id | id +--------------------------------------------------------------------- + 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 + 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 + 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 + 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 + 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 + 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 + 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 + 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 | 10 +(10 rows) + +INSERT INTO t SELECT count(*) from t; +INSERT INTO t (SELECT id+1 from t); +SELECT * FROM t ORDER BY id; + id +--------------------------------------------------------------------- + 1 + 2 + 2 + 3 + 3 + 4 + 4 + 5 + 5 + 6 + 6 + 7 + 7 + 8 + 8 + 9 + 9 + 10 + 10 + 10 + 11 + 11 +(22 rows) + +CREATE TYPE composite_type AS ( + i integer, + i2 integer +); +CREATE TABLE composite_type_table +( + id bigserial, + col composite_type[] +); +SELECT create_distributed_table('composite_type_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO composite_type_table(col) VALUES (ARRAY[(1, 2)::composite_type]); +SELECT * FROM composite_type_table; + id | col +--------------------------------------------------------------------- + 1 | {"(1,2)"} +(1 row) + +CREATE TYPE nested_composite_type AS ( + a composite_type, + b composite_type +); +CREATE TABLE nested_composite_type_table +( + id bigserial, + col nested_composite_type +); +SELECT create_distributed_table('nested_composite_type_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO nested_composite_type_table(col) VALUES (((1, 2), (3,4))::nested_composite_type); +SELECT * FROM nested_composite_type_table; + id | col +--------------------------------------------------------------------- + 1 | ("(1,2)","(3,4)") +(1 row) + +CREATE TABLE binaryless_builtin ( +col1 aclitem NOT NULL, +col2 character varying(255) NOT NULL +); +SELECT create_reference_table('binaryless_builtin'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO binaryless_builtin VALUES ('user postgres=r/postgres', 'test'); +SELECT * FROM binaryless_builtin; + col1 | col2 +--------------------------------------------------------------------- + postgres=r/postgres | test +(1 row) + +CREATE TABLE test_table_1(id int, val1 int); +CREATE TABLE test_table_2(id int, val1 bigint); +SELECT create_distributed_table('test_table_1', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('test_table_2', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO test_table_1 VALUES(1,1),(2,4),(3,3); +INSERT INTO test_table_2 VALUES(1,1),(3,3),(4,5); +SELECT id, val1 +FROM test_table_1 LEFT JOIN test_table_2 USING(id, val1) +ORDER BY 1, 2; + id | val1 +--------------------------------------------------------------------- + 1 | 1 + 2 | 4 + 3 | 3 +(3 rows) + +\set VERBOSITY terse +DROP SCHEMA binary_protocol CASCADE; +NOTICE: drop cascades to 8 other objects diff --git a/src/test/regress/expected/isolation_select_vs_all.out b/src/test/regress/expected/isolation_select_vs_all.out index b1136066b..3cd3b8dae 100644 --- a/src/test/regress/expected/isolation_select_vs_all.out +++ b/src/test/regress/expected/isolation_select_vs_all.out @@ -19,6 +19,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -43,6 +46,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -55,8 +61,8 @@ id data int_data 1 b 1 step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -70,6 +76,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-router-select s1-commit s1-select-count create_distributed_table @@ -94,6 +103,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -122,6 +134,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -138,8 +153,8 @@ id data int_data 3 d 3 4 e 4 step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -153,6 +168,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-router-select s1-commit s1-select-count create_distributed_table @@ -161,8 +179,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -180,6 +198,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -188,8 +209,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -211,6 +232,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -219,8 +243,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -230,8 +254,8 @@ id data int_data id data int_d 3 d 3 3 d 3 4 e 4 4 e 4 step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -245,6 +269,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-insert s1-commit s1-select-count create_distributed_table @@ -262,6 +289,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-insert-select s1-commit s1-select-count create_distributed_table @@ -279,6 +309,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-update s1-commit s1-select-count create_distributed_table @@ -296,6 +329,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-delete s1-commit s1-select-count create_distributed_table @@ -313,6 +349,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-truncate s1-commit s1-select-count create_distributed_table @@ -331,6 +370,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-drop s1-commit s1-select-count create_distributed_table @@ -347,6 +389,9 @@ step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -369,6 +414,9 @@ run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-router-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -393,13 +441,20 @@ run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) +restore_isolation_tester_func -starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes + + +starting permutation: s1-initialize s1-begin s1-disable-binary-protocol s1-router-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; +step s1-disable-binary-protocol: + -- Workaround router-select blocking blocking create-index-concurrently + SET citus.enable_binary_protocol TO false; + step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data @@ -415,6 +470,9 @@ run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -438,6 +496,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-router-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -462,6 +523,9 @@ run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -485,6 +549,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-table-size s1-commit s1-select-count create_distributed_table @@ -505,6 +572,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-router-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table @@ -522,6 +592,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s2-master-apply-delete-command s1-commit s1-select-count create_distributed_table @@ -538,6 +611,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table @@ -554,6 +630,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-router-select s2-distribute-table s1-commit s1-select-count create_distributed_table @@ -574,6 +653,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-insert s2-router-select s1-commit s1-select-count create_distributed_table @@ -591,6 +673,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-insert-select s2-router-select s1-commit s1-select-count create_distributed_table @@ -608,6 +693,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-update s2-router-select s1-commit s1-select-count create_distributed_table @@ -625,6 +713,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-delete s2-router-select s1-commit s1-select-count create_distributed_table @@ -642,6 +733,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-truncate s2-router-select s1-commit s1-select-count create_distributed_table @@ -659,6 +753,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-drop s2-router-select s1-commit s1-select-count create_distributed_table @@ -673,6 +770,9 @@ step s2-router-select: <... completed> error in steps s1-commit s2-router-select: ERROR: relation "select_append" does not exist step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-router-select s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -695,6 +795,9 @@ run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-router-select s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -719,6 +822,9 @@ run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -742,6 +848,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -766,6 +875,9 @@ run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -789,6 +901,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-table-size s2-router-select s1-commit s1-select-count create_distributed_table @@ -809,6 +924,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-router-select s1-commit s1-select-count create_distributed_table @@ -826,6 +944,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s1-commit s1-select-count create_distributed_table @@ -842,6 +963,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s1-commit s1-select-count create_distributed_table @@ -858,6 +982,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-router-select s1-commit s1-select-count create_distributed_table @@ -878,6 +1005,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert s1-commit s1-select-count create_distributed_table @@ -899,6 +1029,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert-select s1-commit s1-select-count create_distributed_table @@ -920,6 +1053,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-update s1-commit s1-select-count create_distributed_table @@ -941,6 +1077,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-delete s1-commit s1-select-count create_distributed_table @@ -962,6 +1101,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-truncate s1-commit s1-select-count create_distributed_table @@ -984,6 +1126,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-drop s1-commit s1-select-count create_distributed_table @@ -1004,6 +1149,9 @@ step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1030,6 +1178,9 @@ run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-real-time-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1058,6 +1209,9 @@ run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1084,6 +1238,9 @@ run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1111,6 +1268,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-real-time-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1139,6 +1299,9 @@ run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1166,6 +1329,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-table-size s1-commit s1-select-count create_distributed_table @@ -1190,6 +1356,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-real-time-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table @@ -1211,6 +1380,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-real-time-select s2-distribute-table s1-commit s1-select-count create_distributed_table @@ -1231,6 +1403,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-insert s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -1252,6 +1427,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-insert-select s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -1273,6 +1451,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-update s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -1294,6 +1475,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-delete s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -1315,6 +1499,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-truncate s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -1332,6 +1519,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-drop s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -1346,6 +1536,9 @@ step s2-real-time-select: <... completed> error in steps s1-commit s2-real-time-select: ERROR: relation "select_append" does not exist step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-real-time-select s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1372,6 +1565,9 @@ run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-real-time-select s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1400,6 +1596,9 @@ run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1427,6 +1626,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1455,6 +1657,9 @@ run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1482,6 +1687,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-table-size s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -1506,6 +1714,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -1527,6 +1738,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-real-time-select s1-commit s1-select-count create_distributed_table @@ -1547,6 +1761,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-insert s1-commit s1-select-count create_distributed_table @@ -1555,8 +1772,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1571,6 +1788,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-insert-select s1-commit s1-select-count create_distributed_table @@ -1579,8 +1799,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1595,6 +1815,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-update s1-commit s1-select-count create_distributed_table @@ -1603,8 +1826,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1619,6 +1842,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-delete s1-commit s1-select-count create_distributed_table @@ -1627,8 +1853,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1643,6 +1869,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-truncate s1-commit s1-select-count create_distributed_table @@ -1651,8 +1880,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1668,6 +1897,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-drop s1-commit s1-select-count create_distributed_table @@ -1676,8 +1908,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1691,6 +1923,9 @@ step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1699,8 +1934,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1720,6 +1955,9 @@ run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-task-tracker-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1729,8 +1967,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1751,6 +1989,9 @@ run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -1759,8 +2000,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1780,6 +2021,9 @@ run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1788,8 +2032,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1810,6 +2054,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-task-tracker-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1819,8 +2066,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data new_column id data int_data new_column @@ -1841,6 +2088,9 @@ run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table @@ -1849,8 +2099,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1871,6 +2121,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-table-size s1-commit s1-select-count create_distributed_table @@ -1879,8 +2132,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1898,6 +2151,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table @@ -1906,8 +2162,8 @@ create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1922,6 +2178,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-task-tracker-select s2-distribute-table s1-commit s1-select-count create_distributed_table @@ -1931,8 +2190,8 @@ step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1945,6 +2204,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-insert s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -1954,8 +2216,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1969,6 +2231,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-insert-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -1978,8 +2243,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -1993,6 +2258,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-update s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -2002,8 +2270,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -2017,6 +2285,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-delete s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -2026,8 +2297,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -2041,6 +2312,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-truncate s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -2050,8 +2324,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> @@ -2061,6 +2335,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-drop s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -2070,14 +2347,17 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> error in steps s1-commit s2-task-tracker-select: ERROR: relation "select_append" does not exist step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-task-tracker-select s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -2087,8 +2367,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -2107,6 +2387,9 @@ run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-task-tracker-select s1-commit s1-select-count s1-show-indexes create_distributed_table @@ -2117,8 +2400,8 @@ step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX select_append_index; step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> @@ -2138,6 +2421,9 @@ run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -2147,8 +2433,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> @@ -2168,6 +2454,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -2178,8 +2467,8 @@ step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> @@ -2199,6 +2488,9 @@ run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table @@ -2208,8 +2500,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> @@ -2229,6 +2521,9 @@ run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-table-size s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -2241,8 +2536,8 @@ citus_total_relation_size 32768 step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -2256,6 +2551,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 +restore_isolation_tester_func + + starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -2265,8 +2563,8 @@ step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, step s1-begin: BEGIN; step s1-master-modify-multiple-shards: DELETE FROM select_append; step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -2280,6 +2578,9 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-task-tracker-select s1-commit s1-select-count create_distributed_table @@ -2293,8 +2594,8 @@ create_distributed_table step s2-task-tracker-select: - SET citus.task_executor_type TO "task-tracker"; - SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; + SET citus.task_executor_type TO "task-tracker"; + SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data @@ -2303,3 +2604,6 @@ step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 +restore_isolation_tester_func + + diff --git a/src/test/regress/expected/isolation_select_vs_all_on_mx.out b/src/test/regress/expected/isolation_select_vs_all_on_mx.out index 5c5ce570f..9c4480617 100644 --- a/src/test/regress/expected/isolation_select_vs_all_on_mx.out +++ b/src/test/regress/expected/isolation_select_vs_all_on_mx.out @@ -387,7 +387,7 @@ restore_isolation_tester_func -starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection +starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-disable-binary-protocol-on-worker s1-select s2-coordinator-create-index-concurrently s1-commit-worker s1-stop-connection step s1-start-session-level-connection: SELECT start_session_level_connection_to_node('localhost', 57637); @@ -400,6 +400,13 @@ step s1-begin-on-worker: run_commands_on_session_level_connection_to_node +step s1-disable-binary-protocol-on-worker: + -- Workaround router-select blocking blocking create-index-concurrently + SELECT run_commands_on_session_level_connection_to_node('SET citus.enable_binary_protocol TO false'); + +run_commands_on_session_level_connection_to_node + + step s1-select: SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index 0d20d7cdb..9702e9779 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -232,6 +232,17 @@ DEBUG: pushing down the function call (S,S) (1 row) +-- This is currently an undetected failure when using the binary protocol +-- It should not be enabled by default until this is resolved. The tests above +-- will fail too, when changing the default to TRUE; +SET citus.enable_binary_protocol = TRUE; +select mx_call_func_custom_types('S', 'A'); +DEBUG: pushing down the function call +ERROR: wrong data type: XXXX, expected XXXX +select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); +DEBUG: pushing down the function call +ERROR: wrong data type: XXXX, expected XXXX +RESET citus.enable_binary_protocol; -- We don't allow distributing calls inside transactions begin; select mx_call_func(2, 0); diff --git a/src/test/regress/spec/isolation_select_vs_all.spec b/src/test/regress/spec/isolation_select_vs_all.spec index 49f0740c0..20a66c634 100644 --- a/src/test/regress/spec/isolation_select_vs_all.spec +++ b/src/test/regress/spec/isolation_select_vs_all.spec @@ -5,6 +5,9 @@ // create range distributed table to test behavior of SELECT in concurrent operations setup { + SELECT citus_internal.replace_isolation_tester_func(); + SELECT citus_internal.refresh_isolation_tester_prepared_statement(); + SET citus.shard_replication_factor TO 1; CREATE TABLE select_append(id integer, data text, int_data int); SELECT create_distributed_table('select_append', 'id', 'append'); @@ -14,12 +17,18 @@ setup teardown { DROP TABLE IF EXISTS select_append CASCADE; + SELECT citus_internal.restore_isolation_tester_func(); } // session 1 session "s1" step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0 && echo 1, b, 1 && echo 2, c, 2 && echo 3, d, 3 && echo 4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } + +step "s1-disable-binary-protocol" { + -- Workaround router-select blocking blocking create-index-concurrently + SET citus.enable_binary_protocol TO false; +} step "s1-router-select" { SELECT * FROM select_append WHERE id = 1; } step "s1-real-time-select" { SELECT * FROM select_append ORDER BY 1, 2; } step "s1-task-tracker-select" @@ -96,7 +105,7 @@ permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-truncate" "s1-comm permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-router-select" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" -permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" +permutation "s1-initialize" "s1-begin" "s1-disable-binary-protocol" "s1-router-select" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-router-select" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" diff --git a/src/test/regress/spec/isolation_select_vs_all_on_mx.spec b/src/test/regress/spec/isolation_select_vs_all_on_mx.spec index b5ae28371..c567d579f 100644 --- a/src/test/regress/spec/isolation_select_vs_all_on_mx.spec +++ b/src/test/regress/spec/isolation_select_vs_all_on_mx.spec @@ -29,6 +29,11 @@ step "s1-begin-on-worker" SELECT run_commands_on_session_level_connection_to_node('BEGIN'); } +step "s1-disable-binary-protocol-on-worker" { + -- Workaround router-select blocking blocking create-index-concurrently + SELECT run_commands_on_session_level_connection_to_node('SET citus.enable_binary_protocol TO false'); +} + step "s1-select" { SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM select_table'); @@ -135,4 +140,4 @@ permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-copy" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" "s3-select-count" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select" "s2-begin" "s2-index" "s1-commit-worker" "s2-commit" "s1-stop-connection" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select" "s2-start-session-level-connection" "s2-begin-on-worker" "s2-select-for-update" "s1-commit-worker" "s2-commit-worker" "s1-stop-connection" "s2-stop-connection" -permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-select" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s1-stop-connection" +permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-disable-binary-protocol-on-worker" "s1-select" "s2-coordinator-create-index-concurrently" "s1-commit-worker" "s1-stop-connection" diff --git a/src/test/regress/sql/binary_protocol.sql b/src/test/regress/sql/binary_protocol.sql new file mode 100644 index 000000000..1c4fd858e --- /dev/null +++ b/src/test/regress/sql/binary_protocol.sql @@ -0,0 +1,84 @@ +SET citus.shard_count = 4; +SET citus.next_shard_id TO 4754000; +CREATE SCHEMA binary_protocol; +SET search_path TO binary_protocol; +SET citus.enable_binary_protocol = TRUE; + +CREATE TABLE t(id int); +SELECT create_distributed_table('t', 'id'); + +INSERT INTO t (SELECT i FROM generate_series(1, 10) i); + +SELECT * FROM t ORDER BY id; +-- Select more than 16 columns to trigger growing of columns +SELECT id, id, id, id, id, + id, id, id, id, id, + id, id, id, id, id, + id, id, id, id, id, + id, id, id, id, id, + id, id, id, id, id + FROM t ORDER BY id; + +INSERT INTO t SELECT count(*) from t; + +INSERT INTO t (SELECT id+1 from t); + +SELECT * FROM t ORDER BY id; + +CREATE TYPE composite_type AS ( + i integer, + i2 integer +); + +CREATE TABLE composite_type_table +( + id bigserial, + col composite_type[] +); + + +SELECT create_distributed_table('composite_type_table', 'id'); +INSERT INTO composite_type_table(col) VALUES (ARRAY[(1, 2)::composite_type]); + +SELECT * FROM composite_type_table; + +CREATE TYPE nested_composite_type AS ( + a composite_type, + b composite_type +); + +CREATE TABLE nested_composite_type_table +( + id bigserial, + col nested_composite_type +); +SELECT create_distributed_table('nested_composite_type_table', 'id'); + +INSERT INTO nested_composite_type_table(col) VALUES (((1, 2), (3,4))::nested_composite_type); + +SELECT * FROM nested_composite_type_table; + + +CREATE TABLE binaryless_builtin ( +col1 aclitem NOT NULL, +col2 character varying(255) NOT NULL +); +SELECT create_reference_table('binaryless_builtin'); + +INSERT INTO binaryless_builtin VALUES ('user postgres=r/postgres', 'test'); +SELECT * FROM binaryless_builtin; + +CREATE TABLE test_table_1(id int, val1 int); +CREATE TABLE test_table_2(id int, val1 bigint); +SELECT create_distributed_table('test_table_1', 'id'); +SELECT create_distributed_table('test_table_2', 'id'); +INSERT INTO test_table_1 VALUES(1,1),(2,4),(3,3); +INSERT INTO test_table_2 VALUES(1,1),(3,3),(4,5); + +SELECT id, val1 +FROM test_table_1 LEFT JOIN test_table_2 USING(id, val1) +ORDER BY 1, 2; + +\set VERBOSITY terse +DROP SCHEMA binary_protocol CASCADE; + diff --git a/src/test/regress/sql/multi_mx_function_call_delegation.sql b/src/test/regress/sql/multi_mx_function_call_delegation.sql index c153087fd..45ed820ba 100644 --- a/src/test/regress/sql/multi_mx_function_call_delegation.sql +++ b/src/test/regress/sql/multi_mx_function_call_delegation.sql @@ -104,6 +104,16 @@ select squares(4); select multi_mx_function_call_delegation.mx_call_func(2, 0); select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); + +-- This is currently an undetected failure when using the binary protocol +-- It should not be enabled by default until this is resolved. The tests above +-- will fail too, when changing the default to TRUE; +SET citus.enable_binary_protocol = TRUE; +select mx_call_func_custom_types('S', 'A'); +select multi_mx_function_call_delegation.mx_call_func_custom_types('S', 'A'); +RESET citus.enable_binary_protocol; + + -- We don't allow distributing calls inside transactions begin; select mx_call_func(2, 0);