diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ae8d8c625..9ca4ef6c7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -131,8 +131,6 @@ cd build cmake .. make -j5 sudo make install -# Optionally, you might instead want to use `sudo make install-all` -# since `multi_extension` regression test would fail due to missing downgrade scripts. cd ../.. git clone https://github.com/citusdata/tools.git diff --git a/ci/README.md b/ci/README.md index eef0fd4de..31fdb7e0e 100644 --- a/ci/README.md +++ b/ci/README.md @@ -156,9 +156,9 @@ git merge "community/$PR_BRANCH" familiar with the change. 5. You should rerun the `check-merge-to-enterprise` check on `community/$PR_BRANCH`. You can use re-run from failed option in circle CI. -6. You can now merge the PR on enterprise. Be sure to NOT use "squash and merge", +6. You can now merge the PR on community. Be sure to NOT use "squash and merge", but instead use the regular "merge commit" mode. -7. You can now merge the PR on community. Be sure to NOT use "squash and merge", +7. You can now merge the PR on enterprise. Be sure to NOT use "squash and merge", but instead use the regular "merge commit" mode. The subsequent PRs on community will be able to pass the diff --git a/ci/check_enterprise_merge.sh b/ci/check_enterprise_merge.sh index 040a5c224..d29ffcad8 100755 --- a/ci/check_enterprise_merge.sh +++ b/ci/check_enterprise_merge.sh @@ -66,10 +66,10 @@ fi git merge --abort # If we have a conflict on enterprise merge on the master branch, we have a problem. -# Provide an error message to indicate that enterprise merge is needed. +# Provide an error message to indicate that enterprise merge is needed to fix this check. if [[ $PR_BRANCH = master ]]; then - echo "ERROR: Master branch has merge conlicts with enterprise-master." - echo "Try re-running this job if you merged community PR before enterprise PR. Otherwise conflicts need to be resolved as a separate PR on enterprise." + echo "ERROR: Master branch has merge conflicts with enterprise-master." + echo "Try re-running this CI job after merging your changes into enterprise-master." exit 1 fi diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index 8183feddc..916962b4a 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -24,10 +24,12 @@ #include "catalog/pg_am.h" #include "catalog/pg_publication.h" #include "catalog/pg_trigger.h" +#include "catalog/pg_extension.h" #include "catalog/storage.h" #include "catalog/storage_xlog.h" #include "commands/progress.h" #include "commands/vacuum.h" +#include "commands/extension.h" #include "executor/executor.h" #include "nodes/makefuncs.h" #include "optimizer/plancat.h" @@ -154,6 +156,20 @@ static void ColumnarReadMissingRowsIntoIndex(TableScanDesc scan, Relation indexR static ItemPointerData TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetItemPointer); +/* functions for CheckCitusColumnarVersion */ +static bool CheckAvailableVersionColumnar(int elevel); +static bool CheckInstalledVersionColumnar(int elevel); +static char * AvailableExtensionVersionColumnar(void); +static char * InstalledExtensionVersionColumnar(void); +static bool CitusColumnarHasBeenLoadedInternal(void); +static bool CitusColumnarHasBeenLoaded(void); +static bool CheckCitusColumnarVersion(int elevel); +static bool MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion); + +/* global variables for CheckCitusColumnarVersion */ +static bool extensionLoadedColumnar = false; +static bool EnableVersionChecksColumnar = true; +static bool citusVersionKnownCompatibleColumnar = false; /* Custom tuple slot ops used for columnar. Initialized in columnar_tableam_init(). */ static TupleTableSlotOps TTSOpsColumnar; @@ -171,7 +187,7 @@ columnar_beginscan(Relation relation, Snapshot snapshot, ParallelTableScanDesc parallel_scan, uint32 flags) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); int natts = relation->rd_att->natts; @@ -194,6 +210,7 @@ columnar_beginscan_extended(Relation relation, Snapshot snapshot, ParallelTableScanDesc parallel_scan, uint32 flags, Bitmapset *attr_needed, List *scanQual) { + CheckCitusColumnarVersion(ERROR); Oid relfilenode = relation->rd_node.relNode; /* @@ -418,7 +435,7 @@ columnar_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan) static IndexFetchTableData * columnar_index_fetch_begin(Relation rel) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); Oid relfilenode = rel->rd_node.relNode; if (PendingWritesInUpperTransactions(relfilenode, GetCurrentSubTransactionId())) @@ -643,7 +660,7 @@ static bool columnar_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, Snapshot snapshot) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); uint64 rowNumber = tid_to_row_number(slot->tts_tid); StripeMetadata *stripeMetadata = FindStripeByRowNumber(rel, rowNumber, snapshot); @@ -656,7 +673,7 @@ static TransactionId columnar_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); /* * XXX: We didn't bother implementing index_delete_tuple for neither of @@ -717,7 +734,7 @@ static void columnar_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); /* * columnar_init_write_state allocates the write state in a longer @@ -765,7 +782,7 @@ static void columnar_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); ColumnarWriteState *writeState = columnar_init_write_state(relation, RelationGetDescr(relation), @@ -841,7 +858,7 @@ columnar_relation_set_new_filenode(Relation rel, TransactionId *freezeXid, MultiXactId *minmulti) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); if (persistence == RELPERSISTENCE_UNLOGGED) { @@ -878,8 +895,7 @@ columnar_relation_set_new_filenode(Relation rel, static void columnar_relation_nontransactional_truncate(Relation rel) { - CheckCitusVersion(ERROR); - + CheckCitusColumnarVersion(ERROR); RelFileNode relfilenode = rel->rd_node; NonTransactionDropWriteState(relfilenode.relNode); @@ -926,7 +942,7 @@ columnar_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, double *tups_vacuumed, double *tups_recently_dead) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); TupleDesc sourceDesc = RelationGetDescr(OldHeap); TupleDesc targetDesc = RelationGetDescr(NewHeap); @@ -1024,7 +1040,7 @@ static void columnar_vacuum_rel(Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy) { - if (!CheckCitusVersion(WARNING)) + if (!CheckCitusColumnarVersion(WARNING)) { /* * Skip if the extension catalogs are not up-to-date, but avoid @@ -1342,7 +1358,7 @@ columnar_index_build_range_scan(Relation columnarRelation, void *callback_state, TableScanDesc scan) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); if (start_blockno != 0 || numblocks != InvalidBlockNumber) { @@ -1592,7 +1608,7 @@ columnar_index_validate_scan(Relation columnarRelation, ValidateIndexState * validateIndexState) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); ColumnarReportTotalVirtualBlocks(columnarRelation, snapshot, PROGRESS_SCAN_BLOCKS_TOTAL); @@ -1764,7 +1780,7 @@ TupleSortSkipSmallerItemPointers(Tuplesortstate *tupleSort, ItemPointer targetIt static uint64 columnar_relation_size(Relation rel, ForkNumber forkNumber) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); uint64 nblocks = 0; @@ -1791,7 +1807,7 @@ columnar_relation_size(Relation rel, ForkNumber forkNumber) static bool columnar_relation_needs_toast_table(Relation rel) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); return false; } @@ -1802,8 +1818,7 @@ columnar_estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac) { - CheckCitusVersion(ERROR); - + CheckCitusColumnarVersion(ERROR); RelationOpenSmgr(rel); *pages = smgrnblocks(rel->rd_smgr, MAIN_FORKNUM); *tuples = ColumnarTableRowCount(rel); @@ -1910,6 +1925,15 @@ columnar_tableam_init() TTSOpsColumnar = TTSOpsVirtual; TTSOpsColumnar.copy_heap_tuple = ColumnarSlotCopyHeapTuple; + DefineCustomBoolVariable( + "columnar.enable_version_checks", + gettext_noop("Enables Version Check for Columnar"), + NULL, + &EnableVersionChecksColumnar, + true, + PGC_USERSET, + GUC_NO_SHOW_ALL, + NULL, NULL, NULL); } @@ -1968,7 +1992,7 @@ ColumnarTableDropHook(Oid relid) if (IsColumnarTableAmTable(relid)) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); /* * Drop metadata. No need to drop storage here since for @@ -2093,8 +2117,7 @@ ColumnarProcessUtility(PlannedStmt *pstmt, if (rel->rd_tableam == GetColumnarTableAmRoutine()) { - CheckCitusVersion(ERROR); - + CheckCitusColumnarVersion(ERROR); if (!ColumnarSupportsIndexAM(indexStmt->accessMethod)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -2316,7 +2339,7 @@ PG_FUNCTION_INFO_V1(alter_columnar_table_set); Datum alter_columnar_table_set(PG_FUNCTION_ARGS) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); Oid relationId = PG_GETARG_OID(0); @@ -2440,7 +2463,7 @@ PG_FUNCTION_INFO_V1(alter_columnar_table_reset); Datum alter_columnar_table_reset(PG_FUNCTION_ARGS) { - CheckCitusVersion(ERROR); + CheckCitusColumnarVersion(ERROR); Oid relationId = PG_GETARG_OID(0); @@ -2579,3 +2602,318 @@ downgrade_columnar_storage(PG_FUNCTION_ARGS) table_close(rel, AccessExclusiveLock); PG_RETURN_VOID(); } + + +/* + * Code to check the Citus Version, helps remove dependency from Citus + */ + +/* + * CitusColumnarHasBeenLoaded returns true if the citus extension has been created + * in the current database and the extension script has been executed. Otherwise, + * it returns false. The result is cached as this is called very frequently. + */ +bool +CitusColumnarHasBeenLoaded(void) +{ + if (!extensionLoadedColumnar || creating_extension) + { + /* + * Refresh if we have not determined whether the extension has been + * loaded yet, or in case of ALTER EXTENSION since we want to treat + * Citus as "not loaded" during ALTER EXTENSION citus. + */ + bool extensionLoaded = CitusColumnarHasBeenLoadedInternal(); + extensionLoadedColumnar = extensionLoaded; + } + + return extensionLoadedColumnar; +} + + +/* + * CitusColumnarHasBeenLoadedInternal returns true if the citus extension has been created + * in the current database and the extension script has been executed. Otherwise, + * it returns false. + */ +static bool +CitusColumnarHasBeenLoadedInternal(void) +{ + if (IsBinaryUpgrade) + { + /* never use Citus logic during pg_upgrade */ + return false; + } + + Oid citusExtensionOid = get_extension_oid("citus", true); + if (citusExtensionOid == InvalidOid) + { + /* Citus extension does not exist yet */ + return false; + } + + if (creating_extension && CurrentExtensionObject == citusExtensionOid) + { + /* + * We do not use Citus hooks during CREATE/ALTER EXTENSION citus + * since the objects used by the C code might be not be there yet. + */ + return false; + } + + /* citus extension exists and has been created */ + return true; +} + + +/* + * CheckCitusColumnarVersion checks whether there is a version mismatch between the + * available version and the loaded version or between the installed version + * and the loaded version. Returns true if compatible, false otherwise. + * + * As a side effect, this function also sets citusVersionKnownCompatible_Columnar global + * variable to true which reduces version check cost of next calls. + */ +bool +CheckCitusColumnarVersion(int elevel) +{ + if (citusVersionKnownCompatibleColumnar || + !CitusColumnarHasBeenLoaded() || + !EnableVersionChecksColumnar) + { + return true; + } + + if (CheckAvailableVersionColumnar(elevel) && CheckInstalledVersionColumnar(elevel)) + { + citusVersionKnownCompatibleColumnar = true; + return true; + } + else + { + return false; + } +} + + +/* + * CheckAvailableVersion compares CITUS_EXTENSIONVERSION and the currently + * available version from the citus.control file. If they are not compatible, + * this function logs an error with the specified elevel and returns false, + * otherwise it returns true. + */ +bool +CheckAvailableVersionColumnar(int elevel) +{ + if (!EnableVersionChecksColumnar) + { + return true; + } + + char *availableVersion = AvailableExtensionVersionColumnar(); + + if (!MajorVersionsCompatibleColumnar(availableVersion, CITUS_EXTENSIONVERSION)) + { + ereport(elevel, (errmsg("loaded Citus library version differs from latest " + "available extension version"), + errdetail("Loaded library requires %s, but the latest control " + "file specifies %s.", CITUS_MAJORVERSION, + availableVersion), + errhint("Restart the database to load the latest Citus " + "library."))); + pfree(availableVersion); + return false; + } + pfree(availableVersion); + return true; +} + + +/* + * CheckInstalledVersion compares CITUS_EXTENSIONVERSION and the + * extension's current version from the pg_extension catalog table. If they + * are not compatible, this function logs an error with the specified elevel, + * otherwise it returns true. + */ +static bool +CheckInstalledVersionColumnar(int elevel) +{ + Assert(CitusColumnarHasBeenLoaded()); + Assert(EnableVersionChecksColumnar); + + char *installedVersion = InstalledExtensionVersionColumnar(); + + if (!MajorVersionsCompatibleColumnar(installedVersion, CITUS_EXTENSIONVERSION)) + { + ereport(elevel, (errmsg("loaded Citus library version differs from installed " + "extension version"), + errdetail("Loaded library requires %s, but the installed " + "extension version is %s.", CITUS_MAJORVERSION, + installedVersion), + errhint("Run ALTER EXTENSION citus UPDATE and try again."))); + pfree(installedVersion); + return false; + } + pfree(installedVersion); + return true; +} + + +/* + * MajorVersionsCompatible checks whether both versions are compatible. They + * are if major and minor version numbers match, the schema version is + * ignored. Returns true if compatible, false otherwise. + */ +bool +MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion) +{ + const char schemaVersionSeparator = '-'; + + char *leftSeperatorPosition = strchr(leftVersion, schemaVersionSeparator); + char *rightSeperatorPosition = strchr(rightVersion, schemaVersionSeparator); + int leftComparisionLimit = 0; + int rightComparisionLimit = 0; + + if (leftSeperatorPosition != NULL) + { + leftComparisionLimit = leftSeperatorPosition - leftVersion; + } + else + { + leftComparisionLimit = strlen(leftVersion); + } + + if (rightSeperatorPosition != NULL) + { + rightComparisionLimit = rightSeperatorPosition - rightVersion; + } + else + { + rightComparisionLimit = strlen(leftVersion); + } + + /* we can error out early if hypens are not in the same position */ + if (leftComparisionLimit != rightComparisionLimit) + { + return false; + } + + return strncmp(leftVersion, rightVersion, leftComparisionLimit) == 0; +} + + +/* + * AvailableExtensionVersion returns the Citus version from citus.control file. It also + * saves the result, thus consecutive calls to CitusExtensionAvailableVersion will + * not read the citus.control file again. + */ +static char * +AvailableExtensionVersionColumnar(void) +{ + LOCAL_FCINFO(fcinfo, 0); + FmgrInfo flinfo; + + bool goForward = true; + bool doCopy = false; + char *availableExtensionVersion; + + EState *estate = CreateExecutorState(); + ReturnSetInfo *extensionsResultSet = makeNode(ReturnSetInfo); + extensionsResultSet->econtext = GetPerTupleExprContext(estate); + extensionsResultSet->allowedModes = SFRM_Materialize; + + fmgr_info(F_PG_AVAILABLE_EXTENSIONS, &flinfo); + InitFunctionCallInfoData(*fcinfo, &flinfo, 0, InvalidOid, NULL, + (Node *) extensionsResultSet); + + /* pg_available_extensions returns result set containing all available extensions */ + (*pg_available_extensions)(fcinfo); + + TupleTableSlot *tupleTableSlot = MakeSingleTupleTableSlotCompat( + extensionsResultSet->setDesc, + &TTSOpsMinimalTuple); + bool hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, + doCopy, + tupleTableSlot); + while (hasTuple) + { + bool isNull = false; + + Datum extensionNameDatum = slot_getattr(tupleTableSlot, 1, &isNull); + char *extensionName = NameStr(*DatumGetName(extensionNameDatum)); + if (strcmp(extensionName, "citus") == 0) + { + Datum availableVersion = slot_getattr(tupleTableSlot, 2, &isNull); + + + availableExtensionVersion = text_to_cstring(DatumGetTextPP(availableVersion)); + + + ExecClearTuple(tupleTableSlot); + ExecDropSingleTupleTableSlot(tupleTableSlot); + + return availableExtensionVersion; + } + + ExecClearTuple(tupleTableSlot); + hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, + doCopy, tupleTableSlot); + } + + ExecDropSingleTupleTableSlot(tupleTableSlot); + + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("citus extension is not found"))); +} + + +/* + * InstalledExtensionVersion returns the Citus version in PostgreSQL pg_extension table. + */ +static char * +InstalledExtensionVersionColumnar(void) +{ + ScanKeyData entry[1]; + char *installedExtensionVersion = NULL; + + Relation relation = table_open(ExtensionRelationId, AccessShareLock); + + ScanKeyInit(&entry[0], Anum_pg_extension_extname, BTEqualStrategyNumber, F_NAMEEQ, + CStringGetDatum("citus")); + + SysScanDesc scandesc = systable_beginscan(relation, ExtensionNameIndexId, true, + NULL, 1, entry); + + HeapTuple extensionTuple = systable_getnext(scandesc); + + /* We assume that there can be at most one matching tuple */ + if (HeapTupleIsValid(extensionTuple)) + { + int extensionIndex = Anum_pg_extension_extversion; + TupleDesc tupleDescriptor = RelationGetDescr(relation); + bool isNull = false; + + Datum installedVersion = heap_getattr(extensionTuple, extensionIndex, + tupleDescriptor, &isNull); + + if (isNull) + { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("citus extension version is null"))); + } + + + installedExtensionVersion = text_to_cstring(DatumGetTextPP(installedVersion)); + } + else + { + ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("citus extension is not loaded"))); + } + + systable_endscan(scandesc); + + table_close(relation, AccessShareLock); + + return installedExtensionVersion; +} diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index 5364d49cc..6664b6c1c 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -1059,7 +1059,8 @@ CreateTableConversion(TableConversionParameters *params) } relation_close(relation, NoLock); con->distributionKey = - BuildDistributionKeyFromColumnName(relation, con->distributionColumn); + BuildDistributionKeyFromColumnName(con->relationId, con->distributionColumn, + NoLock); con->originalAccessMethod = NULL; if (!PartitionedTable(con->relationId) && !IsForeignTable(con->relationId)) @@ -1175,6 +1176,9 @@ CreateDistributedTableLike(TableConversionState *con) newShardCount = con->shardCount; } + char *distributionColumnName = + ColumnToColumnName(con->newRelationId, (Node *) newDistributionKey); + Oid originalRelationId = con->relationId; if (con->originalDistributionKey != NULL && PartitionTable(originalRelationId)) { @@ -1190,16 +1194,13 @@ CreateDistributedTableLike(TableConversionState *con) */ Oid parentRelationId = PartitionParentOid(originalRelationId); Var *parentDistKey = DistPartitionKeyOrError(parentRelationId); - char *parentDistKeyColumnName = - ColumnToColumnName(parentRelationId, nodeToString(parentDistKey)); - - newDistributionKey = - FindColumnWithNameOnTargetRelation(parentRelationId, parentDistKeyColumnName, - con->newRelationId); + distributionColumnName = + ColumnToColumnName(parentRelationId, (Node *) parentDistKey); } char partitionMethod = PartitionMethod(con->relationId); - CreateDistributedTable(con->newRelationId, newDistributionKey, partitionMethod, + + CreateDistributedTable(con->newRelationId, distributionColumnName, partitionMethod, newShardCount, true, newColocateWith, false); } diff --git a/src/backend/distributed/commands/citus_global_signal.c b/src/backend/distributed/commands/citus_global_signal.c new file mode 100644 index 000000000..fc7618159 --- /dev/null +++ b/src/backend/distributed/commands/citus_global_signal.c @@ -0,0 +1,127 @@ +/*------------------------------------------------------------------------- + * + * citus_global_signal.c + * Commands for Citus' overriden versions of pg_cancel_backend + * and pg_terminate_backend statements. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "distributed/pg_version_constants.h" + +#include "distributed/backend_data.h" +#include "distributed/metadata_cache.h" +#include "distributed/worker_manager.h" +#include "lib/stringinfo.h" +#include "signal.h" + +static bool CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig); + +PG_FUNCTION_INFO_V1(pg_cancel_backend); +PG_FUNCTION_INFO_V1(pg_terminate_backend); + +/* + * pg_cancel_backend overrides the Postgres' pg_cancel_backend to cancel + * a query with a global pid so a query can be cancelled from another node. + * + * To cancel a query that is on another node, a pg_cancel_backend command is sent + * to that node. This new command is sent with pid instead of global pid, so original + * pg_cancel_backend function is used. + */ +Datum +pg_cancel_backend(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + uint64 pid = PG_GETARG_INT64(0); + + int sig = SIGINT; + uint64 timeout = 0; + bool success = CitusSignalBackend(pid, timeout, sig); + + PG_RETURN_BOOL(success); +} + + +/* + * pg_terminate_backend overrides the Postgres' pg_terminate_backend to terminate + * a query with a global pid so a query can be terminated from another node. + * + * To terminate a query that is on another node, a pg_terminate_backend command is sent + * to that node. This new command is sent with pid instead of global pid, so original + * pg_terminate_backend function is used. + */ +Datum +pg_terminate_backend(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + uint64 pid = PG_GETARG_INT64(0); + uint64 timeout = PG_GETARG_INT64(1); + + int sig = SIGTERM; + bool success = CitusSignalBackend(pid, timeout, sig); + + PG_RETURN_BOOL(success); +} + + +/* + * CitusSignalBackend gets a global pid and and ends the original query with the global pid + * that might have started in another node by connecting to that node and running either + * pg_cancel_backend or pg_terminate_backend based on the withTerminate argument. + */ +static bool +CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig) +{ + Assert((sig == SIGINT) || (sig == SIGTERM)); + +#if PG_VERSION_NUM < PG_VERSION_14 + if (timeout != 0) + { + elog(ERROR, "timeout parameter is only supported on Postgres 14 or later"); + } +#endif + + int nodeId = ExtractNodeIdFromGlobalPID(globalPID); + int processId = ExtractProcessIdFromGlobalPID(globalPID); + + WorkerNode *workerNode = FindNodeWithNodeId(nodeId); + + StringInfo cancelQuery = makeStringInfo(); + + if (sig == SIGINT) + { + appendStringInfo(cancelQuery, "SELECT pg_cancel_backend(%d::integer)", processId); + } + else + { +#if PG_VERSION_NUM >= PG_VERSION_14 + appendStringInfo(cancelQuery, + "SELECT pg_terminate_backend(%d::integer, %lu::bigint)", + processId, timeout); +#else + appendStringInfo(cancelQuery, "SELECT pg_terminate_backend(%d::integer)", + processId); +#endif + } + + StringInfo queryResult = makeStringInfo(); + + bool reportResultError = true; + + bool success = ExecuteRemoteQueryOrCommand(workerNode->workerName, + workerNode->workerPort, cancelQuery->data, + queryResult, reportResultError); + + if (success && queryResult && strcmp(queryResult->data, "f") == 0) + { + success = false; + } + + return success; +} diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index d712c18ab..7f047ec1d 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -37,7 +37,7 @@ static char * CreateCollationDDLInternal(Oid collationId, Oid *collowner, char **quotedCollationName); static List * FilterNameListForDistributedCollations(List *objects, bool missing_ok, List **addresses); - +static bool ShouldPropagateDefineCollationStmt(void); /* * GetCreateCollationDDLInternal returns a CREATE COLLATE sql string for the @@ -519,6 +519,26 @@ DefineCollationStmtObjectAddress(Node *node, bool missing_ok) } +/* + * PreprocessDefineCollationStmt executed before the collation has been + * created locally to ensure that if the collation create statement will + * be propagated, the node is a coordinator node + */ +List * +PreprocessDefineCollationStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION); + + if (ShouldPropagateDefineCollationStmt()) + { + EnsureCoordinator(); + } + + return NIL; +} + + /* * PostprocessDefineCollationStmt executed after the collation has been * created locally and before we create it on the worker nodes. @@ -531,16 +551,7 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString) { Assert(castNode(DefineStmt, node)->kind == OBJECT_COLLATION); - if (!ShouldPropagate()) - { - return NIL; - } - - /* - * If the create collation command is a part of a multi-statement transaction, - * do not propagate it - */ - if (IsMultiStatementTransaction()) + if (!ShouldPropagateDefineCollationStmt()) { return NIL; } @@ -548,13 +559,38 @@ PostprocessDefineCollationStmt(Node *node, const char *queryString) ObjectAddress collationAddress = DefineCollationStmtObjectAddress(node, false); - if (IsObjectDistributed(&collationAddress)) - { - EnsureCoordinator(); - } - EnsureDependenciesExistOnAllNodes(&collationAddress); - return NodeDDLTaskList(NON_COORDINATOR_NODES, CreateCollationDDLsIdempotent( + /* to prevent recursion with mx we disable ddl propagation */ + List *commands = list_make1(DISABLE_DDL_PROPAGATION); + commands = list_concat(commands, CreateCollationDDLsIdempotent( collationAddress.objectId)); + commands = lappend(commands, ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * ShouldPropagateDefineCollationStmt checks if collation define + * statement should be propagated. Don't propagate if: + * - metadata syncing if off + * - statement is part of a multi stmt transaction and the multi shard connection + * type is not sequential + */ +static bool +ShouldPropagateDefineCollationStmt() +{ + if (!ShouldPropagate()) + { + return false; + } + + if (IsMultiStatementTransaction() && + MultiShardConnectionType != SEQUENTIAL_CONNECTION) + { + return false; + } + + return true; } diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index eb51195bf..5b7005354 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -159,30 +159,14 @@ master_create_distributed_table(PG_FUNCTION_ARGS) char *colocateWithTableName = NULL; bool viaDeprecatedAPI = true; - /* - * Lock target relation with an exclusive lock - there's no way to make - * sense of this table until we've committed, and we don't want multiple - * backends manipulating this relation. - */ - Relation relation = try_relation_open(relationId, ExclusiveLock); - - if (relation == NULL) - { - ereport(ERROR, (errmsg("could not create distributed table: " - "relation does not exist"))); - } - char *distributionColumnName = text_to_cstring(distributionColumnText); - Var *distributionColumn = BuildDistributionKeyFromColumnName(relation, - distributionColumnName); - Assert(distributionColumn != NULL); + Assert(distributionColumnName != NULL); + char distributionMethod = LookupDistributionMethod(distributionMethodOid); - CreateDistributedTable(relationId, distributionColumn, distributionMethod, + CreateDistributedTable(relationId, distributionColumnName, distributionMethod, ShardCount, false, colocateWithTableName, viaDeprecatedAPI); - relation_close(relation, NoLock); - PG_RETURN_VOID(); } @@ -249,9 +233,8 @@ create_distributed_table(PG_FUNCTION_ARGS) relation_close(relation, NoLock); char *distributionColumnName = text_to_cstring(distributionColumnText); - Var *distributionColumn = BuildDistributionKeyFromColumnName(relation, - distributionColumnName); - Assert(distributionColumn != NULL); + Assert(distributionColumnName != NULL); + char distributionMethod = LookupDistributionMethod(distributionMethodOid); if (shardCount < 1 || shardCount > MAX_SHARD_COUNT) @@ -261,7 +244,7 @@ create_distributed_table(PG_FUNCTION_ARGS) shardCount, MAX_SHARD_COUNT))); } - CreateDistributedTable(relationId, distributionColumn, distributionMethod, + CreateDistributedTable(relationId, distributionColumnName, distributionMethod, shardCount, shardCountIsStrict, colocateWithTableName, viaDeprecatedAPI); @@ -281,7 +264,7 @@ create_reference_table(PG_FUNCTION_ARGS) Oid relationId = PG_GETARG_OID(0); char *colocateWithTableName = NULL; - Var *distributionColumn = NULL; + char *distributionColumnName = NULL; bool viaDeprecatedAPI = false; @@ -317,7 +300,7 @@ create_reference_table(PG_FUNCTION_ARGS) errdetail("There are no active worker nodes."))); } - CreateDistributedTable(relationId, distributionColumn, DISTRIBUTE_BY_NONE, + CreateDistributedTable(relationId, distributionColumnName, DISTRIBUTE_BY_NONE, ShardCount, false, colocateWithTableName, viaDeprecatedAPI); PG_RETURN_VOID(); } @@ -385,9 +368,10 @@ EnsureRelationExists(Oid relationId) * day, once we deprecate master_create_distribute_table completely. */ void -CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributionMethod, - int shardCount, bool shardCountIsStrict, - char *colocateWithTableName, bool viaDeprecatedAPI) +CreateDistributedTable(Oid relationId, char *distributionColumnName, + char distributionMethod, int shardCount, + bool shardCountIsStrict, char *colocateWithTableName, + bool viaDeprecatedAPI) { /* * EnsureTableNotDistributed errors out when relation is a citus table but @@ -443,6 +427,8 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio DropFKeysRelationInvolvedWithTableType(relationId, INCLUDE_LOCAL_TABLES); } + LockRelationOid(relationId, ExclusiveLock); + /* * Ensure that the sequences used in column defaults of the table * have proper types @@ -463,22 +449,9 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio colocateWithTableName, viaDeprecatedAPI); - /* - * Due to dropping columns, the parent's distribution key may not match the - * partition's distribution key. The input distributionColumn belongs to - * the parent. That's why we override the distribution column of partitions - * here. See issue #5123 for details. - */ - if (PartitionTable(relationId)) - { - Oid parentRelationId = PartitionParentOid(relationId); - char *distributionColumnName = - ColumnToColumnName(parentRelationId, nodeToString(distributionColumn)); - - distributionColumn = - FindColumnWithNameOnTargetRelation(parentRelationId, distributionColumnName, - relationId); - } + Var *distributionColumn = BuildDistributionKeyFromColumnName(relationId, + distributionColumnName, + ExclusiveLock); /* * ColocationIdForNewTable assumes caller acquires lock on relationId. In our case, @@ -567,7 +540,7 @@ CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributio foreach_oid(partitionRelationId, partitionList) { - CreateDistributedTable(partitionRelationId, distributionColumn, + CreateDistributedTable(partitionRelationId, distributionColumnName, distributionMethod, shardCount, false, parentRelationName, viaDeprecatedAPI); } diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index 9ee50ed47..84e875602 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -241,6 +241,17 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) return NIL; } + /* + * Indices are created separately, however, they do show up in the dependency + * list for a table since they will have potentially their own dependencies. + * The commands will be added to both shards and metadata tables via the table + * creation commands. + */ + if (relKind == RELKIND_INDEX) + { + return NIL; + } + if (relKind == RELKIND_RELATION || relKind == RELKIND_PARTITIONED_TABLE || relKind == RELKIND_FOREIGN_TABLE) { @@ -317,6 +328,11 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) return DDLCommands; } + case OCLASS_TSCONFIG: + { + return CreateTextSearchConfigDDLCommandsIdempotent(dependency); + } + case OCLASS_TYPE: { return CreateTypeDDLCommandsIdempotent(dependency); diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index 380a83401..37758aec3 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -175,7 +175,7 @@ static DistributeObjectOps Any_CreateFunction = { .preprocess = PreprocessCreateFunctionStmt, .postprocess = PostprocessCreateFunctionStmt, .address = CreateFunctionStmtObjectAddress, - .markDistributed = false, + .markDistributed = true, }; static DistributeObjectOps Any_CreatePolicy = { .deparse = NULL, @@ -276,7 +276,7 @@ static DistributeObjectOps Collation_AlterOwner = { static DistributeObjectOps Collation_Define = { .deparse = NULL, .qualify = NULL, - .preprocess = NULL, + .preprocess = PreprocessDefineCollationStmt, .postprocess = PostprocessDefineCollationStmt, .address = DefineCollationStmtObjectAddress, .markDistributed = true, @@ -505,6 +505,62 @@ static DistributeObjectOps Sequence_Rename = { .address = RenameSequenceStmtObjectAddress, .markDistributed = false, }; +static DistributeObjectOps TextSearchConfig_Alter = { + .deparse = DeparseAlterTextSearchConfigurationStmt, + .qualify = QualifyAlterTextSearchConfigurationStmt, + .preprocess = PreprocessAlterTextSearchConfigurationStmt, + .postprocess = NULL, + .address = AlterTextSearchConfigurationStmtObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchConfig_AlterObjectSchema = { + .deparse = DeparseAlterTextSearchConfigurationSchemaStmt, + .qualify = QualifyAlterTextSearchConfigurationSchemaStmt, + .preprocess = PreprocessAlterTextSearchConfigurationSchemaStmt, + .postprocess = PostprocessAlterTextSearchConfigurationSchemaStmt, + .address = AlterTextSearchConfigurationSchemaStmtObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchConfig_AlterOwner = { + .deparse = DeparseAlterTextSearchConfigurationOwnerStmt, + .qualify = QualifyAlterTextSearchConfigurationOwnerStmt, + .preprocess = PreprocessAlterTextSearchConfigurationOwnerStmt, + .postprocess = PostprocessAlterTextSearchConfigurationOwnerStmt, + .address = AlterTextSearchConfigurationOwnerObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchConfig_Comment = { + .deparse = DeparseTextSearchConfigurationCommentStmt, + .qualify = QualifyTextSearchConfigurationCommentStmt, + .preprocess = PreprocessTextSearchConfigurationCommentStmt, + .postprocess = NULL, + .address = TextSearchConfigurationCommentObjectAddress, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchConfig_Define = { + .deparse = DeparseCreateTextSearchStmt, + .qualify = NULL, + .preprocess = NULL, + .postprocess = PostprocessCreateTextSearchConfigurationStmt, + .address = CreateTextSearchConfigurationObjectAddress, + .markDistributed = true, +}; +static DistributeObjectOps TextSearchConfig_Drop = { + .deparse = DeparseDropTextSearchConfigurationStmt, + .qualify = QualifyDropTextSearchConfigurationStmt, + .preprocess = PreprocessDropTextSearchConfigurationStmt, + .postprocess = NULL, + .address = NULL, + .markDistributed = false, +}; +static DistributeObjectOps TextSearchConfig_Rename = { + .deparse = DeparseRenameTextSearchConfigurationStmt, + .qualify = QualifyRenameTextSearchConfigurationStmt, + .preprocess = PreprocessRenameTextSearchConfigurationStmt, + .postprocess = NULL, + .address = RenameTextSearchConfigurationStmtObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps Trigger_AlterObjectDepends = { .deparse = NULL, .qualify = NULL, @@ -811,6 +867,11 @@ GetDistributeObjectOps(Node *node) return &Table_AlterObjectSchema; } + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_AlterObjectSchema; + } + case OBJECT_TYPE: { return &Type_AlterObjectSchema; @@ -868,6 +929,11 @@ GetDistributeObjectOps(Node *node) return &Statistics_AlterOwner; } + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_AlterOwner; + } + case OBJECT_TYPE: { return &Type_AlterOwner; @@ -949,11 +1015,33 @@ GetDistributeObjectOps(Node *node) return &Any_AlterTableMoveAll; } + case T_AlterTSConfigurationStmt: + { + return &TextSearchConfig_Alter; + } + case T_ClusterStmt: { return &Any_Cluster; } + case T_CommentStmt: + { + CommentStmt *stmt = castNode(CommentStmt, node); + switch (stmt->objtype) + { + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_Comment; + } + + default: + { + return &NoDistributeOps; + } + } + } + case T_CompositeTypeStmt: { return &Any_CompositeType; @@ -1014,6 +1102,11 @@ GetDistributeObjectOps(Node *node) return &Collation_Define; } + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_Define; + } + default: { return &NoDistributeOps; @@ -1091,6 +1184,11 @@ GetDistributeObjectOps(Node *node) return &Table_Drop; } + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_Drop; + } + case OBJECT_TYPE: { return &Type_Drop; @@ -1190,6 +1288,11 @@ GetDistributeObjectOps(Node *node) return &Statistics_Rename; } + case OBJECT_TSCONFIGURATION: + { + return &TextSearchConfig_Rename; + } + case OBJECT_TYPE: { return &Type_Rename; diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 95b11fd0c..80b870b62 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -25,6 +25,7 @@ #include "access/htup_details.h" #include "access/xact.h" #include "catalog/pg_aggregate.h" +#include "catalog/dependency.h" #include "catalog/namespace.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" @@ -38,6 +39,7 @@ #include "distributed/listutils.h" #include "distributed/maintenanced.h" #include "distributed/metadata_utility.h" +#include "distributed/metadata/dependency.h" #include "distributed/coordinator_protocol.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata/pg_dist_object.h" @@ -80,6 +82,7 @@ static void EnsureFunctionCanBeColocatedWithTable(Oid functionOid, Oid static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt); static bool ShouldPropagateAlterFunction(const ObjectAddress *address); static bool ShouldAddFunctionSignature(FunctionParameterMode mode); +static ObjectAddress * GetUndistributableDependency(ObjectAddress *functionAddress); static ObjectAddress FunctionToObjectAddress(ObjectType objectType, ObjectWithArgs *objectWithArgs, bool missing_ok); @@ -759,7 +762,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress, /* * GetFunctionDDLCommand returns the complete "CREATE OR REPLACE FUNCTION ..." statement for - * the specified function followed by "ALTER FUNCTION .. SET OWNER ..". + * the specified function. * * useCreateOrReplace is ignored for non-aggregate functions. */ @@ -1170,46 +1173,23 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) /* * ShouldPropagateCreateFunction tests if we need to propagate a CREATE FUNCTION - * statement. We only propagate replace's of distributed functions to keep the function on - * the workers in sync with the one on the coordinator. + * statement. */ static bool ShouldPropagateCreateFunction(CreateFunctionStmt *stmt) { - if (creating_extension) + if (!ShouldPropagate()) { - /* - * extensions should be created separately on the workers, functions cascading - * from an extension should therefore not be propagated. - */ - return false; - } - - if (!EnableMetadataSync) - { - /* - * we are configured to disable object propagation, should not propagate anything - */ - return false; - } - - if (!stmt->replace) - { - /* - * Since we only care for a replace of distributed functions if the statement is - * not a replace we are going to ignore. - */ return false; } /* - * Even though its a replace we should accept an non-existing function, it will just - * not be distributed + * If the create command is a part of a multi-statement transaction that is not in + * sequential mode, don't propagate. */ - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, true); - if (!IsObjectDistributed(&address)) + if (IsMultiStatementTransaction() && + MultiShardConnectionType != SEQUENTIAL_CONNECTION) { - /* do not propagate alter function for non-distributed functions */ return false; } @@ -1253,12 +1233,10 @@ ShouldPropagateAlterFunction(const ObjectAddress *address) /* * PreprocessCreateFunctionStmt is called during the planning phase for CREATE [OR REPLACE] - * FUNCTION. We primarily care for the replace variant of this statement to keep - * distributed functions in sync. We bail via a check on ShouldPropagateCreateFunction - * which checks for the OR REPLACE modifier. + * FUNCTION before it is created on the local node internally. * * Since we use pg_get_functiondef to get the ddl command we actually do not do any - * planning here, instead we defer the plan creation to the processing step. + * planning here, instead we defer the plan creation to the postprocessing step. * * Instead we do our basic housekeeping where we make sure we are on the coordinator and * can propagate the function in sequential mode. @@ -1279,7 +1257,7 @@ PreprocessCreateFunctionStmt(Node *node, const char *queryString, EnsureSequentialMode(OBJECT_FUNCTION); /* - * ddl jobs will be generated during the Processing phase as we need the function to + * ddl jobs will be generated during the postprocessing phase as we need the function to * be updated in the catalog to get its sql representation */ return NIL; @@ -1290,6 +1268,11 @@ PreprocessCreateFunctionStmt(Node *node, const char *queryString, * PostprocessCreateFunctionStmt actually creates the plan we need to execute for function * propagation. This is the downside of using pg_get_functiondef to get the sql statement. * + * If function depends on any non-distributed relation (except sequence and composite type), + * Citus can not distribute it. In order to not to prevent users from creating local + * functions on the coordinator WARNING message will be sent to the customer about the case + * instead of erroring out. + * * Besides creating the plan we also make sure all (new) dependencies of the function are * created on all nodes. */ @@ -1303,18 +1286,113 @@ PostprocessCreateFunctionStmt(Node *node, const char *queryString) return NIL; } - ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); - EnsureDependenciesExistOnAllNodes(&address); + ObjectAddress functionAddress = GetObjectAddressFromParseTree((Node *) stmt, false); - List *commands = list_make4(DISABLE_DDL_PROPAGATION, - GetFunctionDDLCommand(address.objectId, true), - GetFunctionAlterOwnerCommand(address.objectId), - ENABLE_DDL_PROPAGATION); + if (IsObjectAddressOwnedByExtension(&functionAddress, NULL)) + { + return NIL; + } + + /* + * This check should have been valid for all objects not only for functions. Though, + * we do this limited check for now as functions are more likely to be used with + * such dependencies, and we want to scope it for now. + */ + ObjectAddress *undistributableDependency = GetUndistributableDependency( + &functionAddress); + if (undistributableDependency != NULL) + { + if (SupportedDependencyByCitus(undistributableDependency)) + { + /* + * Citus can't distribute some relations as dependency, although those + * types as supported by Citus. So we can use get_rel_name directly + */ + RangeVar *functionRangeVar = makeRangeVarFromNameList(stmt->funcname); + char *functionName = functionRangeVar->relname; + char *dependentRelationName = + get_rel_name(undistributableDependency->objectId); + + ereport(WARNING, (errmsg("Citus can't distribute function \"%s\" having " + "dependency on non-distributed relation \"%s\"", + functionName, dependentRelationName), + errdetail("Function will be created only locally"), + errhint("To distribute function, distribute dependent " + "relations first. Then, re-create the function"))); + } + else + { + char *objectType = NULL; + #if PG_VERSION_NUM >= PG_VERSION_14 + objectType = getObjectTypeDescription(undistributableDependency, false); + #else + objectType = getObjectTypeDescription(undistributableDependency); + #endif + ereport(WARNING, (errmsg("Citus can't distribute functions having " + "dependency on unsupported object of type \"%s\"", + objectType), + errdetail("Function will be created only locally"))); + } + + return NIL; + } + + EnsureDependenciesExistOnAllNodes(&functionAddress); + + List *commands = list_make1(DISABLE_DDL_PROPAGATION); + commands = list_concat(commands, CreateFunctionDDLCommandsIdempotent( + &functionAddress)); + commands = list_concat(commands, list_make1(ENABLE_DDL_PROPAGATION)); return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } +/* + * GetUndistributableDependency checks whether object has any non-distributable + * dependency. If any one found, it will be returned. + */ +static ObjectAddress * +GetUndistributableDependency(ObjectAddress *objectAddress) +{ + List *dependencies = GetAllDependenciesForObject(objectAddress); + ObjectAddress *dependency = NULL; + foreach_ptr(dependency, dependencies) + { + if (IsObjectDistributed(dependency)) + { + continue; + } + + if (!SupportedDependencyByCitus(dependency)) + { + /* + * Since roles should be handled manually with Citus community, skip them. + */ + if (getObjectClass(dependency) != OCLASS_ROLE) + { + return dependency; + } + } + + if (getObjectClass(dependency) == OCLASS_CLASS) + { + /* + * Citus can only distribute dependent non-distributed sequence + * and composite types. + */ + char relKind = get_rel_relkind(dependency->objectId); + if (relKind != RELKIND_SEQUENCE && relKind != RELKIND_COMPOSITE_TYPE) + { + return dependency; + } + } + } + + return NULL; +} + + /* * CreateFunctionStmtObjectAddress returns the ObjectAddress for the subject of the * CREATE [OR REPLACE] FUNCTION statement. If missing_ok is false it will error with the diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index cfdd6ad63..5ff984f66 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -725,12 +725,6 @@ PostprocessIndexStmt(Node *node, const char *queryString) { IndexStmt *indexStmt = castNode(IndexStmt, node); - /* we are only processing CONCURRENT index statements */ - if (!indexStmt->concurrent) - { - return NIL; - } - /* this logic only applies to the coordinator */ if (!IsCoordinator()) { @@ -747,14 +741,36 @@ PostprocessIndexStmt(Node *node, const char *queryString) return NIL; } + Oid indexRelationId = get_relname_relid(indexStmt->idxname, schemaId); + + /* ensure dependencies of index exist on all nodes */ + ObjectAddress address = { 0 }; + ObjectAddressSet(address, RelationRelationId, indexRelationId); + EnsureDependenciesExistOnAllNodes(&address); + + /* furtheron we are only processing CONCURRENT index statements */ + if (!indexStmt->concurrent) + { + return NIL; + } + + /* + * EnsureDependenciesExistOnAllNodes could have distributed objects that are required + * by this index. During the propagation process an active snapshout might be left as + * a side effect of inserting the local tuples via SPI. To not leak a snapshot like + * that we will pop any snapshot if we have any right before we commit. + */ + if (ActiveSnapshotSet()) + { + PopActiveSnapshot(); + } + /* commit the current transaction and start anew */ CommitTransactionCommand(); StartTransactionCommand(); /* get the affected relation and index */ Relation relation = table_openrv(indexStmt->relation, ShareUpdateExclusiveLock); - Oid indexRelationId = get_relname_relid(indexStmt->idxname, - schemaId); Relation indexRelation = index_open(indexRelationId, RowExclusiveLock); /* close relations but retain locks */ diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index e0c0f2b11..77f3fcc32 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -86,13 +86,6 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString, DropStmt *dropStatement = castNode(DropStmt, node); Assert(dropStatement->removeType == OBJECT_SCHEMA); - if (!ShouldPropagate()) - { - return NIL; - } - - EnsureCoordinator(); - List *distributedSchemas = FilterDistributedSchemas(dropStatement->objects); if (list_length(distributedSchemas) < 1) @@ -100,6 +93,13 @@ PreprocessDropSchemaStmt(Node *node, const char *queryString, return NIL; } + if (!ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_SCHEMA); Value *schemaVal = NULL; diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 9236f0e1a..5718e675a 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -378,6 +378,8 @@ PostprocessCreateTableStmtPartitionOf(CreateStmt *createStatement, const } Var *parentDistributionColumn = DistPartitionKeyOrError(parentRelationId); + char *distributionColumnName = + ColumnToColumnName(parentRelationId, (Node *) parentDistributionColumn); char parentDistributionMethod = DISTRIBUTE_BY_HASH; char *parentRelationName = generate_qualified_relation_name(parentRelationId); bool viaDeprecatedAPI = false; @@ -385,7 +387,7 @@ PostprocessCreateTableStmtPartitionOf(CreateStmt *createStatement, const SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong(parentRelationId, relationId); - CreateDistributedTable(relationId, parentDistributionColumn, + CreateDistributedTable(relationId, distributionColumnName, parentDistributionMethod, ShardCount, false, parentRelationName, viaDeprecatedAPI); } @@ -573,13 +575,8 @@ static void DistributePartitionUsingParent(Oid parentCitusRelationId, Oid partitionRelationId) { Var *distributionColumn = DistPartitionKeyOrError(parentCitusRelationId); - char *distributionColumnName = - ColumnToColumnName(parentCitusRelationId, - nodeToString(distributionColumn)); - distributionColumn = - FindColumnWithNameOnTargetRelation(parentCitusRelationId, - distributionColumnName, - partitionRelationId); + char *distributionColumnName = ColumnToColumnName(parentCitusRelationId, + (Node *) distributionColumn); char distributionMethod = DISTRIBUTE_BY_HASH; char *parentRelationName = generate_qualified_relation_name(parentCitusRelationId); @@ -588,7 +585,7 @@ DistributePartitionUsingParent(Oid parentCitusRelationId, Oid partitionRelationI SwitchToSequentialAndLocalExecutionIfPartitionNameTooLong( parentCitusRelationId, partitionRelationId); - CreateDistributedTable(partitionRelationId, distributionColumn, + CreateDistributedTable(partitionRelationId, distributionColumnName, distributionMethod, ShardCount, false, parentRelationName, viaDeprecatedAPI); } diff --git a/src/backend/distributed/commands/text_search.c b/src/backend/distributed/commands/text_search.c new file mode 100644 index 000000000..be78057f7 --- /dev/null +++ b/src/backend/distributed/commands/text_search.c @@ -0,0 +1,935 @@ +/*------------------------------------------------------------------------- + * + * text_search.c + * Commands for creating and altering TEXT SEARCH objects + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/genam.h" +#include "access/xact.h" +#include "catalog/namespace.h" +#include "catalog/objectaddress.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_config_map.h" +#include "catalog/pg_ts_dict.h" +#include "catalog/pg_ts_parser.h" +#include "commands/comment.h" +#include "commands/extension.h" +#include "fmgr.h" +#include "nodes/makefuncs.h" +#include "tsearch/ts_cache.h" +#include "tsearch/ts_public.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" +#include "distributed/multi_executor.h" +#include "distributed/relation_access_tracking.h" +#include "distributed/worker_create_or_replace.h" + + +static List * GetDistributedTextSearchConfigurationNames(DropStmt *stmt); +static DefineStmt * GetTextSearchConfigDefineStmt(Oid tsconfigOid); +static List * GetTextSearchConfigCommentStmt(Oid tsconfigOid); +static List * get_ts_parser_namelist(Oid tsparserOid); +static List * GetTextSearchConfigMappingStmt(Oid tsconfigOid); +static List * GetTextSearchConfigOwnerStmts(Oid tsconfigOid); + +static List * get_ts_dict_namelist(Oid tsdictOid); +static Oid get_ts_config_parser_oid(Oid tsconfigOid); +static char * get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype); + +/* + * PostprocessCreateTextSearchConfigurationStmt is called after the TEXT SEARCH + * CONFIGURATION has been created locally. + * + * Contrary to many other objects a text search configuration is often created as a copy + * of an existing configuration. After the copy there is no relation to the configuration + * that has been copied. This prevents our normal approach of ensuring dependencies to + * exist before forwarding a close ressemblance of the statement the user executed. + * + * Instead we recreate the object based on what we find in our own catalog, hence the + * amount of work we perform in the postprocess function, contrary to other objects. + */ +List * +PostprocessCreateTextSearchConfigurationStmt(Node *node, const char *queryString) +{ + DefineStmt *stmt = castNode(DefineStmt, node); + Assert(stmt->kind == OBJECT_TSCONFIGURATION); + + if (!ShouldPropagate()) + { + return NIL; + } + + /* + * If the create command is a part of a multi-statement transaction that is not in + * sequential mode, don't propagate. Instead we will rely on back filling. + */ + if (IsMultiStatementTransaction()) + { + if (MultiShardConnectionType != SEQUENTIAL_CONNECTION) + { + return NIL; + } + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + EnsureDependenciesExistOnAllNodes(&address); + + /* + * TEXT SEARCH CONFIGURATION objects are more complex with their mappings and the + * possibility of copying from existing templates that we will require the idempotent + * recreation commands to be run for successful propagation + */ + List *commands = CreateTextSearchConfigDDLCommandsIdempotent(&address); + + commands = lcons(DISABLE_DDL_PROPAGATION, commands); + commands = lappend(commands, ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +List * +GetCreateTextSearchConfigStatements(const ObjectAddress *address) +{ + Assert(address->classId == TSConfigRelationId); + List *stmts = NIL; + + /* CREATE TEXT SEARCH CONFIGURATION ...*/ + stmts = lappend(stmts, GetTextSearchConfigDefineStmt(address->objectId)); + + /* ALTER TEXT SEARCH CONFIGURATION ... OWNER TO ...*/ + stmts = list_concat(stmts, GetTextSearchConfigOwnerStmts(address->objectId)); + + /* COMMENT ON TEXT SEARCH CONFIGURATION ... */ + stmts = list_concat(stmts, GetTextSearchConfigCommentStmt(address->objectId)); + + + /* ALTER TEXT SEARCH CONFIGURATION ... ADD MAPPING FOR ... WITH ... */ + stmts = list_concat(stmts, GetTextSearchConfigMappingStmt(address->objectId)); + + return stmts; +} + + +/* + * CreateTextSearchConfigDDLCommandsIdempotent creates a list of ddl commands to recreate + * a TEXT SERACH CONFIGURATION object in an idempotent manner on workers. + */ +List * +CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address) +{ + List *stmts = GetCreateTextSearchConfigStatements(address); + List *sqls = DeparseTreeNodes(stmts); + return list_make1(WrapCreateOrReplaceList(sqls)); +} + + +/* + * PreprocessDropTextSearchConfigurationStmt prepares the statements we need to send to + * the workers. After we have dropped the schema's locally they also got removed from + * pg_dist_object so it is important to do all distribution checks before the change is + * made locally. + */ +List * +PreprocessDropTextSearchConfigurationStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + DropStmt *stmt = castNode(DropStmt, node); + Assert(stmt->removeType == OBJECT_TSCONFIGURATION); + + if (!ShouldPropagate()) + { + return NIL; + } + + List *distributedObjects = GetDistributedTextSearchConfigurationNames(stmt); + if (list_length(distributedObjects) == 0) + { + /* no distributed objects to remove */ + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + /* + * Temporarily replace the list of objects being dropped with only the list + * containing the distributed objects. After we have created the sql statement we + * restore the original list of objects to execute on locally. + * + * Because searchpaths on coordinator and workers might not be in sync we fully + * qualify the list before deparsing. This is safe because qualification doesn't + * change the original names in place, but insteads creates new ones. + */ + List *originalObjects = stmt->objects; + stmt->objects = distributedObjects; + QualifyTreeNode((Node *) stmt); + const char *dropStmtSql = DeparseTreeNode((Node *) stmt); + stmt->objects = originalObjects; + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * GetDistributedTextSearchConfigurationNames iterates over all text search configurations + * dropped, and create a list containign all configurations that are distributed. + */ +static List * +GetDistributedTextSearchConfigurationNames(DropStmt *stmt) +{ + List *objName = NULL; + List *distributedObjects = NIL; + foreach_ptr(objName, stmt->objects) + { + Oid tsconfigOid = get_ts_config_oid(objName, stmt->missing_ok); + if (!OidIsValid(tsconfigOid)) + { + /* skip missing configuration names, they can't be dirstibuted */ + continue; + } + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSConfigRelationId, tsconfigOid); + if (!IsObjectDistributed(&address)) + { + continue; + } + distributedObjects = lappend(distributedObjects, objName); + } + return distributedObjects; +} + + +/* + * PreprocessAlterTextSearchConfigurationStmt verifies if the configuration being altered + * is distributed in the cluster. If that is the case it will prepare the list of commands + * to send to the worker to apply the same changes remote. + */ +List * +PreprocessAlterTextSearchConfigurationStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + QualifyTreeNode((Node *) stmt); + const char *alterStmtSql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) alterStmtSql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * PreprocessRenameTextSearchConfigurationStmt verifies if the configuration being altered + * is distributed in the cluster. If that is the case it will prepare the list of commands + * to send to the worker to apply the same changes remote. + */ +List * +PreprocessRenameTextSearchConfigurationStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + QualifyTreeNode((Node *) stmt); + + char *ddlCommand = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) ddlCommand, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * PreprocessAlterTextSearchConfigurationSchemaStmt verifies if the configuration being + * altered is distributed in the cluster. If that is the case it will prepare the list of + * commands to send to the worker to apply the same changes remote. + */ +List * +PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, + stmt->missing_ok); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + QualifyTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * PostprocessAlterTextSearchConfigurationSchemaStmt is invoked after the schema has been + * changed locally. Since changing the schema could result in new dependencies being found + * for this object we re-ensure all the dependencies for the configuration do exist. This + * is solely to propagate the new schema (and all its dependencies) if it was not already + * distributed in the cluster. + */ +List * +PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, const char *queryString) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, + stmt->missing_ok); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + /* dependencies have changed (schema) let's ensure they exist */ + EnsureDependenciesExistOnAllNodes(&address); + + return NIL; +} + + +/* + * PreprocessTextSearchConfigurationCommentStmt propagates any comment on a distributed + * configuration to the workers. Since comments for configurations are promenently shown + * when listing all text search configurations this is purely a cosmetic thing when + * running in MX. + */ +List * +PreprocessTextSearchConfigurationCommentStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + QualifyTreeNode((Node *) stmt); + const char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_METADATA_NODES, commands); +} + + +/* + * PreprocessAlterTextSearchConfigurationOwnerStmt verifies if the configuration being + * altered is distributed in the cluster. If that is the case it will prepare the list of + * commands to send to the worker to apply the same changes remote. + */ +List * +PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + EnsureCoordinator(); + EnsureSequentialMode(OBJECT_TSCONFIGURATION); + + QualifyTreeNode((Node *) stmt); + char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * PostprocessAlterTextSearchConfigurationOwnerStmt is invoked after the owner has been + * changed locally. Since changing the owner could result in new dependencies being found + * for this object we re-ensure all the dependencies for the configuration do exist. This + * is solely to propagate the new owner (and all its dependencies) if it was not already + * distributed in the cluster. + */ +List * +PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, const char *queryString) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + ObjectAddress address = GetObjectAddressFromParseTree((Node *) stmt, false); + if (!ShouldPropagateObject(&address)) + { + return NIL; + } + + /* dependencies have changed (owner) let's ensure they exist */ + EnsureDependenciesExistOnAllNodes(&address); + + return NIL; +} + + +/* + * GetTextSearchConfigDefineStmt returns the DefineStmt for a TEXT SEARCH CONFIGURATION + * based on the configuration as defined in the catalog identified by tsconfigOid. + * + * This statement will only contain the parser, as all other properties for text search + * configurations are stored as mappings in a different catalog. + */ +static DefineStmt * +GetTextSearchConfigDefineStmt(Oid tsconfigOid) +{ + HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search configuration %u", + tsconfigOid); + } + Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup); + + DefineStmt *stmt = makeNode(DefineStmt); + stmt->kind = OBJECT_TSCONFIGURATION; + + stmt->defnames = get_ts_config_namelist(tsconfigOid); + + List *parserNameList = get_ts_parser_namelist(config->cfgparser); + TypeName *parserTypeName = makeTypeNameFromNameList(parserNameList); + stmt->definition = list_make1(makeDefElem("parser", (Node *) parserTypeName, -1)); + + ReleaseSysCache(tup); + return stmt; +} + + +/* + * GetTextSearchConfigCommentStmt returns a list containing all entries to recreate a + * comment on the configuration identified by tsconfigOid. The list could be empty if + * there is no comment on a configuration. + * + * The reason for a list is for easy use when building a list of all statements to invoke + * to recreate the text search configuration. An empty list can easily be concatinated + * without inspection, contrary to a NULL ptr if we would return the CommentStmt struct. + */ +static List * +GetTextSearchConfigCommentStmt(Oid tsconfigOid) +{ + char *comment = GetComment(tsconfigOid, TSConfigRelationId, 0); + if (!comment) + { + return NIL; + } + + CommentStmt *stmt = makeNode(CommentStmt); + stmt->objtype = OBJECT_TSCONFIGURATION; + + stmt->object = (Node *) get_ts_config_namelist(tsconfigOid); + stmt->comment = comment; + return list_make1(stmt); +} + + +/* + * GetTextSearchConfigMappingStmt returns a list of all mappings from token_types to + * dictionaries configured on a text search configuration identified by tsconfigOid. + * + * Many mappings can exist on a configuration which all require their own statement to + * recreate. + */ +static List * +GetTextSearchConfigMappingStmt(Oid tsconfigOid) +{ + ScanKeyData mapskey = { 0 }; + + /* mapcfg = tsconfigOid */ + ScanKeyInit(&mapskey, + Anum_pg_ts_config_map_mapcfg, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(tsconfigOid)); + + Relation maprel = table_open(TSConfigMapRelationId, AccessShareLock); + Relation mapidx = index_open(TSConfigMapIndexId, AccessShareLock); + SysScanDesc mapscan = systable_beginscan_ordered(maprel, mapidx, NULL, 1, &mapskey); + + List *stmts = NIL; + AlterTSConfigurationStmt *stmt = NULL; + + /* + * We iterate the config mappings on the index order filtered by mapcfg. Meaning we + * get equal maptokentype's in 1 run. By comparing the current tokentype to the last + * we know when we can create a new stmt and append the previous constructed one to + * the list. + */ + int lastTokType = -1; + + /* + * We read all mappings filtered by config id, hence we only need to load the name + * once and can reuse for every statement. + */ + List *configName = get_ts_config_namelist(tsconfigOid); + + Oid parserOid = get_ts_config_parser_oid(tsconfigOid); + + HeapTuple maptup = NULL; + while ((maptup = systable_getnext_ordered(mapscan, ForwardScanDirection)) != NULL) + { + Form_pg_ts_config_map cfgmap = (Form_pg_ts_config_map) GETSTRUCT(maptup); + if (lastTokType != cfgmap->maptokentype) + { + /* creating a new statement, appending the previous one (if existing) */ + if (stmt != NULL) + { + stmts = lappend(stmts, stmt); + } + + stmt = makeNode(AlterTSConfigurationStmt); + stmt->cfgname = configName; + stmt->kind = ALTER_TSCONFIG_ADD_MAPPING; + stmt->tokentype = list_make1(makeString( + get_ts_parser_tokentype_name(parserOid, + cfgmap-> + maptokentype))); + + lastTokType = cfgmap->maptokentype; + } + + stmt->dicts = lappend(stmt->dicts, get_ts_dict_namelist(cfgmap->mapdict)); + } + + /* + * If we have ran atleast 1 iteration above we have the last stmt not added to the + * stmts list. + */ + if (stmt != NULL) + { + stmts = lappend(stmts, stmt); + stmt = NULL; + } + + systable_endscan_ordered(mapscan); + index_close(mapidx, NoLock); + table_close(maprel, NoLock); + + return stmts; +} + + +/* + * GetTextSearchConfigOwnerStmts returns a potentially empty list of statements to change + * the ownership of a TEXT SEARCH CONFIGURATION object. + * + * The list is for convenienve when building a full list of statements to recreate the + * configuration. + */ +static List * +GetTextSearchConfigOwnerStmts(Oid tsconfigOid) +{ + HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search configuration %u", + tsconfigOid); + } + Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup); + + AlterOwnerStmt *stmt = makeNode(AlterOwnerStmt); + stmt->objectType = OBJECT_TSCONFIGURATION; + stmt->object = (Node *) get_ts_config_namelist(tsconfigOid); + stmt->newowner = GetRoleSpecObjectForUser(config->cfgowner); + + ReleaseSysCache(tup); + return list_make1(stmt); +} + + +/* + * get_ts_config_namelist based on the tsconfigOid this function creates the namelist that + * identifies the configuration in a fully qualified manner, irregardless of the schema + * existing on the search_path. + */ +List * +get_ts_config_namelist(Oid tsconfigOid) +{ + HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search configuration %u", + tsconfigOid); + } + Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup); + + char *schema = get_namespace_name(config->cfgnamespace); + char *configName = pstrdup(NameStr(config->cfgname)); + List *names = list_make2(makeString(schema), makeString(configName)); + + ReleaseSysCache(tup); + return names; +} + + +/* + * get_ts_dict_namelist based on the tsdictOid this function creates the namelist that + * identifies the dictionary in a fully qualified manner, irregardless of the schema + * existing on the search_path. + */ +static List * +get_ts_dict_namelist(Oid tsdictOid) +{ + HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search dictionary %u", tsdictOid); + } + Form_pg_ts_dict dict = (Form_pg_ts_dict) GETSTRUCT(tup); + + char *schema = get_namespace_name(dict->dictnamespace); + char *dictName = pstrdup(NameStr(dict->dictname)); + List *names = list_make2(makeString(schema), makeString(dictName)); + + ReleaseSysCache(tup); + return names; +} + + +/* + * get_ts_config_parser_oid based on the tsconfigOid this function returns the Oid of the + * parser used in the configuration. + */ +static Oid +get_ts_config_parser_oid(Oid tsconfigOid) +{ + HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search configuration %u", tsconfigOid); + } + Form_pg_ts_config config = (Form_pg_ts_config) GETSTRUCT(tup); + Oid parserOid = config->cfgparser; + + ReleaseSysCache(tup); + return parserOid; +} + + +/* + * get_ts_parser_tokentype_name returns the name of the token as known to the parser by + * its tokentype identifier. The parser used to resolve the token name is identified by + * parserOid and should be the same that emitted the tokentype to begin with. + */ +static char * +get_ts_parser_tokentype_name(Oid parserOid, int32 tokentype) +{ + TSParserCacheEntry *parserCache = lookup_ts_parser_cache(parserOid); + if (!OidIsValid(parserCache->lextypeOid)) + { + elog(ERROR, "method lextype isn't defined for text search parser %u", parserOid); + } + + /* take lextypes from parser */ + LexDescr *tokenlist = (LexDescr *) DatumGetPointer( + OidFunctionCall1(parserCache->lextypeOid, Int32GetDatum(0))); + + /* and find the one with lexid = tokentype */ + int tokenIndex = 0; + while (tokenlist && tokenlist[tokenIndex].lexid) + { + if (tokenlist[tokenIndex].lexid == tokentype) + { + return pstrdup(tokenlist[tokenIndex].alias); + } + tokenIndex++; + } + + /* we haven't found the token */ + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("token type \"%d\" does not exist in parser", tokentype))); +} + + +/* + * get_ts_parser_namelist based on the tsparserOid this function creates the namelist that + * identifies the parser in a fully qualified manner, irregardless of the schema existing + * on the search_path. + */ +static List * +get_ts_parser_namelist(Oid tsparserOid) +{ + HeapTuple tup = SearchSysCache1(TSPARSEROID, ObjectIdGetDatum(tsparserOid)); + if (!HeapTupleIsValid(tup)) /* should not happen */ + { + elog(ERROR, "cache lookup failed for text search parser %u", + tsparserOid); + } + Form_pg_ts_parser parser = (Form_pg_ts_parser) GETSTRUCT(tup); + + char *schema = get_namespace_name(parser->prsnamespace); + char *parserName = pstrdup(NameStr(parser->prsname)); + List *names = list_make2(makeString(schema), makeString(parserName)); + + ReleaseSysCache(tup); + return names; +} + + +/* + * CreateTextSearchConfigurationObjectAddress resolves the ObjectAddress for the object + * being created. If missing_pk is false the function will error, explaining to the user + * the text search configuration described in the statement doesn't exist. + */ +ObjectAddress +CreateTextSearchConfigurationObjectAddress(Node *node, bool missing_ok) +{ + DefineStmt *stmt = castNode(DefineStmt, node); + Assert(stmt->kind == OBJECT_TSCONFIGURATION); + + Oid objid = get_ts_config_oid(stmt->defnames, missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSConfigRelationId, objid); + return address; +} + + +/* + * RenameTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH CONFIGURATION being renamed. Optionally errors if the configuration does not + * exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +RenameTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSCONFIGURATION); + + Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSConfigRelationId, objid); + return address; +} + + +/* + * AlterTextSearchConfigurationStmtObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH CONFIGURATION being altered. Optionally errors if the configuration does not + * exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +AlterTextSearchConfigurationStmtObjectAddress(Node *node, bool missing_ok) +{ + AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); + + Oid objid = get_ts_config_oid(stmt->cfgname, missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSConfigRelationId, objid); + return address; +} + + +/* + * AlterTextSearchConfigurationSchemaStmtObjectAddress resolves the ObjectAddress for the + * TEXT SEARCH CONFIGURATION being moved to a different schema. Optionally errors if the + * configuration does not exist based on the missing_ok flag passed in by the caller. + * + * This can be called, either before or after the move of schema has been executed, hence + * the triple checking before the error might be thrown. Errors for non-existing schema's + * in edgecases will be raised by postgres while executing the move. + */ +ObjectAddress +AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, bool missing_ok) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + Oid objid = get_ts_config_oid(castNode(List, stmt->object), true); + + if (!OidIsValid(objid)) + { + /* + * couldn't find the text search configuration, might have already been moved to + * the new schema, we construct a new sequence name that uses the new schema to + * search in. + */ + char *schemaname = NULL; + char *config_name = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaname, &config_name); + + char *newSchemaName = stmt->newschema; + List *names = list_make2(makeString(newSchemaName), makeString(config_name)); + objid = get_ts_config_oid(names, true); + + if (!missing_ok && !OidIsValid(objid)) + { + /* + * if the text search config id is still invalid we couldn't find it, error + * with the same message postgres would error with if missing_ok is false + * (not ok to miss) + */ + + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("text search configuration \"%s\" does not exist", + NameListToString(castNode(List, stmt->object))))); + } + } + + ObjectAddress sequenceAddress = { 0 }; + ObjectAddressSet(sequenceAddress, TSConfigRelationId, objid); + return sequenceAddress; +} + + +/* + * TextSearchConfigurationCommentObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH CONFIGURATION on which the comment is placed. Optionally errors if the + * configuration does not exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSCONFIGURATION); + + Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok); + + ObjectAddress address = { 0 }; + ObjectAddressSet(address, TSConfigRelationId, objid); + return address; +} + + +/* + * AlterTextSearchConfigurationOwnerObjectAddress resolves the ObjectAddress for the TEXT + * SEARCH CONFIGURATION for which the owner is changed. Optionally errors if the + * configuration does not exist based on the missing_ok flag passed in by the caller. + */ +ObjectAddress +AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Relation relation = NULL; + + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + return get_object_address(stmt->objectType, stmt->object, &relation, AccessShareLock, + missing_ok); +} + + +/* + * GenerateBackupNameForTextSearchConfiguration generates a safe name that is not in use + * already that can be used to rename an existing TEXT SEARCH CONFIGURATION to allow the + * configuration with a specific name to be created, even if this would not have been + * possible due to name collisions. + */ +char * +GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address) +{ + Assert(address->classId == TSConfigRelationId); + List *names = get_ts_config_namelist(address->objectId); + + RangeVar *rel = makeRangeVarFromNameList(names); + + char *newName = palloc0(NAMEDATALEN); + char suffix[NAMEDATALEN] = { 0 }; + char *baseName = rel->relname; + int baseLength = strlen(baseName); + int count = 0; + + while (true) + { + int suffixLength = SafeSnprintf(suffix, NAMEDATALEN - 1, "(citus_backup_%d)", + count); + + /* trim the base name at the end to leave space for the suffix and trailing \0 */ + baseLength = Min(baseLength, NAMEDATALEN - suffixLength - 1); + + /* clear newName before copying the potentially trimmed baseName and suffix */ + memset(newName, 0, NAMEDATALEN); + strncpy_s(newName, NAMEDATALEN, baseName, baseLength); + strncpy_s(newName + baseLength, NAMEDATALEN - baseLength, suffix, + suffixLength); + + + rel->relname = newName; + List *newNameList = MakeNameListFromRangeVar(rel); + + Oid tsconfigOid = get_ts_config_oid(newNameList, true); + if (!OidIsValid(tsconfigOid)) + { + return newName; + } + + count++; + } +} diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c index 815a90f93..48d1dd10e 100644 --- a/src/backend/distributed/commands/truncate.c +++ b/src/backend/distributed/commands/truncate.c @@ -267,13 +267,17 @@ ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement) ErrorIfIllegallyChangingKnownShard(relationId); - if (IsCitusTable(relationId) && IsForeignTable(relationId)) + /* + * We allow truncating foreign tables that are added to metadata + * only on the coordinator, as user mappings are not propagated. + */ + if (IsForeignTable(relationId) && + IsCitusTableType(relationId, CITUS_LOCAL_TABLE) && + !IsCoordinator()) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("truncating distributed foreign tables is " - "currently unsupported"), - errhint("Consider undistributing table before TRUNCATE, " - "and then distribute or add to metadata again"))); + errmsg("truncating foreign tables that are added to metadata " + "can only be excuted on the coordinator"))); } } } diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index 524124285..eb59e8522 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -957,6 +957,20 @@ CreateTypeDDLCommandsIdempotent(const ObjectAddress *typeAddress) return NIL; } + HeapTuple tup = SearchSysCacheCopy1(TYPEOID, ObjectIdGetDatum(typeAddress->objectId)); + if (!HeapTupleIsValid(tup)) + { + elog(ERROR, "cache lookup failed for type %u", typeAddress->objectId); + } + + /* Don't send any command if the type is a table's row type */ + Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup); + if (typTup->typtype == TYPTYPE_COMPOSITE && + get_rel_relkind(typTup->typrelid) != RELKIND_COMPOSITE_TYPE) + { + return NIL; + } + Node *stmt = CreateTypeStmtByObjectAddress(typeAddress); /* capture ddl command for recreation and wrap in create if not exists construct */ diff --git a/src/backend/distributed/deparser/deparse.c b/src/backend/distributed/deparser/deparse.c index cff1d0b16..8312d6407 100644 --- a/src/backend/distributed/deparser/deparse.c +++ b/src/backend/distributed/deparser/deparse.c @@ -17,6 +17,7 @@ #include "distributed/commands.h" #include "distributed/deparser.h" +#include "distributed/listutils.h" /* * DeparseTreeNode aims to be the inverse of postgres' ParseTreeNode. Currently with @@ -35,3 +36,20 @@ DeparseTreeNode(Node *stmt) return ops->deparse(stmt); } + + +/* + * DeparseTreeNodes deparses all stmts in the list from the statement datastructure into + * sql statements. + */ +List * +DeparseTreeNodes(List *stmts) +{ + List *sqls = NIL; + Node *stmt = NULL; + foreach_ptr(stmt, stmts) + { + sqls = lappend(sqls, DeparseTreeNode(stmt)); + } + return sqls; +} diff --git a/src/backend/distributed/deparser/deparse_text_search.c b/src/backend/distributed/deparser/deparse_text_search.c new file mode 100644 index 000000000..e1ac44f5a --- /dev/null +++ b/src/backend/distributed/deparser/deparse_text_search.c @@ -0,0 +1,377 @@ +/*------------------------------------------------------------------------- + * + * deparse_text_search.c + * All routines to deparse text search statements. + * This file contains all entry points specific for text search statement deparsing. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/namespace.h" +#include "utils/builtins.h" + +#include "distributed/citus_ruleutils.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" + +static void AppendDefElemList(StringInfo buf, List *defelms); + +static void AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes); +static void AppendStringInfoDictnames(StringInfo buf, List *dicts); + + +/* + * DeparseCreateTextSearchStmt returns the sql for a DefineStmt defining a TEXT SEARCH + * CONFIGURATION + * + * Although the syntax is mutually exclusive on the two arguments that can be passed in + * the deparser will syntactically correct multiple definitions if provided. * + */ +char * +DeparseCreateTextSearchStmt(Node *node) +{ + DefineStmt *stmt = castNode(DefineStmt, node); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + const char *identifier = NameListToQuotedString(stmt->defnames); + appendStringInfo(&buf, "CREATE TEXT SEARCH CONFIGURATION %s ", identifier); + appendStringInfoString(&buf, "("); + AppendDefElemList(&buf, stmt->definition); + appendStringInfoString(&buf, ");"); + + return buf.data; +} + + +/* + * AppendDefElemList specialization to append a comma separated list of definitions to a + * define statement. + * + * Currently only supports String and TypeName entries. Will error on others. + */ +static void +AppendDefElemList(StringInfo buf, List *defelems) +{ + DefElem *defelem = NULL; + bool first = true; + foreach_ptr(defelem, defelems) + { + if (!first) + { + appendStringInfoString(buf, ", "); + } + first = false; + + /* extract identifier from defelem */ + const char *identifier = NULL; + switch (nodeTag(defelem->arg)) + { + case T_String: + { + identifier = quote_identifier(strVal(defelem->arg)); + break; + } + + case T_TypeName: + { + TypeName *typeName = castNode(TypeName, defelem->arg); + identifier = NameListToQuotedString(typeName->names); + break; + } + + default: + { + ereport(ERROR, (errmsg("unexpected argument during deparsing of " + "TEXT SEARCH CONFIGURATION definition"))); + } + } + + /* stringify */ + appendStringInfo(buf, "%s = %s", defelem->defname, identifier); + } +} + + +/* + * DeparseDropTextSearchConfigurationStmt returns the sql representation for a DROP TEXT + * SEARCH CONFIGURATION ... statment. Supports dropping multiple configurations at once. + */ +char * +DeparseDropTextSearchConfigurationStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + Assert(stmt->removeType == OBJECT_TSCONFIGURATION); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfoString(&buf, "DROP TEXT SEARCH CONFIGURATION "); + List *nameList = NIL; + bool first = true; + foreach_ptr(nameList, stmt->objects) + { + if (!first) + { + appendStringInfoString(&buf, ", "); + } + first = false; + + appendStringInfoString(&buf, NameListToQuotedString(nameList)); + } + + if (stmt->behavior == DROP_CASCADE) + { + appendStringInfoString(&buf, " CASCADE"); + } + + appendStringInfoString(&buf, ";"); + + return buf.data; +} + + +/* + * DeparseRenameTextSearchConfigurationStmt returns the sql representation of a ALTER TEXT + * SEARCH CONFIGURATION ... RENAME TO ... statement. + */ +char * +DeparseRenameTextSearchConfigurationStmt(Node *node) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSCONFIGURATION); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + char *identifier = NameListToQuotedString(castNode(List, stmt->object)); + appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s RENAME TO %s;", + identifier, quote_identifier(stmt->newname)); + + return buf.data; +} + + +/* + * DeparseAlterTextSearchConfigurationStmt returns the ql representation of any generic + * ALTER TEXT SEARCH CONFIGURATION .... statement. The statements supported include: + * - ALTER TEXT SEARCH CONFIGURATIONS ... ADD MAPPING FOR [, ...] WITH [, ...] + * - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING FOR [, ...] WITH [, ...] + * - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING REPLACE ... WITH ... + * - ALTER TEXT SEARCH CONFIGURATIONS ... ALTER MAPPING FOR [, ...] REPLACE ... WITH ... + * - ALTER TEXT SEARCH CONFIGURATIONS ... DROP MAPPING [ IF EXISTS ] FOR ... + */ +char * +DeparseAlterTextSearchConfigurationStmt(Node *node) +{ + AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + char *identifier = NameListToQuotedString(castNode(List, stmt->cfgname)); + appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s", identifier); + + switch (stmt->kind) + { + case ALTER_TSCONFIG_ADD_MAPPING: + { + appendStringInfoString(&buf, " ADD MAPPING FOR "); + AppendStringInfoTokentypeList(&buf, stmt->tokentype); + + appendStringInfoString(&buf, " WITH "); + AppendStringInfoDictnames(&buf, stmt->dicts); + + break; + } + + case ALTER_TSCONFIG_ALTER_MAPPING_FOR_TOKEN: + { + appendStringInfoString(&buf, " ALTER MAPPING FOR "); + AppendStringInfoTokentypeList(&buf, stmt->tokentype); + + appendStringInfoString(&buf, " WITH "); + AppendStringInfoDictnames(&buf, stmt->dicts); + + break; + } + + case ALTER_TSCONFIG_REPLACE_DICT: + case ALTER_TSCONFIG_REPLACE_DICT_FOR_TOKEN: + { + appendStringInfoString(&buf, " ALTER MAPPING"); + if (list_length(stmt->tokentype) > 0) + { + appendStringInfoString(&buf, " FOR "); + AppendStringInfoTokentypeList(&buf, stmt->tokentype); + } + + if (list_length(stmt->dicts) != 2) + { + elog(ERROR, "unexpected number of dictionaries while deparsing ALTER " + "TEXT SEARCH CONFIGURATION ... ALTER MAPPING [FOR ...] REPLACE " + "statement."); + } + + appendStringInfo(&buf, " REPLACE %s", + NameListToQuotedString(linitial(stmt->dicts))); + + appendStringInfo(&buf, " WITH %s", + NameListToQuotedString(lsecond(stmt->dicts))); + + break; + } + + case ALTER_TSCONFIG_DROP_MAPPING: + { + appendStringInfoString(&buf, " DROP MAPPING"); + + if (stmt->missing_ok) + { + appendStringInfoString(&buf, " IF EXISTS"); + } + + appendStringInfoString(&buf, " FOR "); + AppendStringInfoTokentypeList(&buf, stmt->tokentype); + break; + } + + default: + { + elog(ERROR, "unable to deparse unsupported ALTER TEXT SEARCH STATEMENT"); + } + } + + appendStringInfoString(&buf, ";"); + + return buf.data; +} + + +/* + * DeparseAlterTextSearchConfigurationSchemaStmt returns the sql statement representing + * ALTER TEXT SEARCH CONFIGURATION ... SET SCHEMA ... statements. + */ +char * +DeparseAlterTextSearchConfigurationSchemaStmt(Node *node) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s SET SCHEMA %s;", + NameListToQuotedString(castNode(List, stmt->object)), + quote_identifier(stmt->newschema)); + + return buf.data; +} + + +/* + * DeparseTextSearchConfigurationCommentStmt returns the sql statement representing + * COMMENT ON TEXT SEARCH CONFIGURATION ... IS ... + */ +char * +DeparseTextSearchConfigurationCommentStmt(Node *node) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSCONFIGURATION); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfo(&buf, "COMMENT ON TEXT SEARCH CONFIGURATION %s IS ", + NameListToQuotedString(castNode(List, stmt->object))); + + if (stmt->comment == NULL) + { + appendStringInfoString(&buf, "NULL"); + } + else + { + appendStringInfoString(&buf, quote_literal_cstr(stmt->comment)); + } + + appendStringInfoString(&buf, ";"); + + return buf.data; +} + + +/* + * AppendStringInfoTokentypeList specializes in adding a comma separated list of + * token_tyoe's to TEXT SEARCH CONFIGURATION commands + */ +static void +AppendStringInfoTokentypeList(StringInfo buf, List *tokentypes) +{ + Value *tokentype = NULL; + bool first = true; + foreach_ptr(tokentype, tokentypes) + { + if (nodeTag(tokentype) != T_String) + { + elog(ERROR, + "unexpected tokentype for deparsing in text search configuration"); + } + + if (!first) + { + appendStringInfoString(buf, ", "); + } + first = false; + + appendStringInfoString(buf, strVal(tokentype)); + } +} + + +/* + * AppendStringInfoDictnames specializes in appending a comma separated list of + * dictionaries to TEXT SEARCH CONFIGURATION commands. + */ +static void +AppendStringInfoDictnames(StringInfo buf, List *dicts) +{ + List *dictNames = NIL; + bool first = true; + foreach_ptr(dictNames, dicts) + { + if (!first) + { + appendStringInfoString(buf, ", "); + } + first = false; + + char *dictIdentifier = NameListToQuotedString(dictNames); + appendStringInfoString(buf, dictIdentifier); + } +} + + +/* + * DeparseAlterTextSearchConfigurationOwnerStmt returns the sql statement representing + * ALTER TEXT SEARCH CONFIGURATION ... ONWER TO ... commands. + */ +char * +DeparseAlterTextSearchConfigurationOwnerStmt(Node *node) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + appendStringInfo(&buf, "ALTER TEXT SEARCH CONFIGURATION %s OWNER TO %s;", + NameListToQuotedString(castNode(List, stmt->object)), + RoleSpecString(stmt->newowner, true)); + + return buf.data; +} diff --git a/src/backend/distributed/deparser/qualify_text_search_stmts.c b/src/backend/distributed/deparser/qualify_text_search_stmts.c new file mode 100644 index 000000000..42c98039a --- /dev/null +++ b/src/backend/distributed/deparser/qualify_text_search_stmts.c @@ -0,0 +1,278 @@ +/*------------------------------------------------------------------------- + * + * qualify_text_search_stmts.c + * Functions specialized in fully qualifying all text search statements. These + * functions are dispatched from qualify.c + * + * Fully qualifying text search statements consists of adding the schema name + * to the subject of the types as well as any other branch of the parsetree. + * + * Goal would be that the deparser functions for these statements can + * serialize the statement without any external lookups. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/htup_details.h" +#include "catalog/namespace.h" +#include "catalog/pg_ts_config.h" +#include "catalog/pg_ts_dict.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "distributed/deparser.h" +#include "distributed/listutils.h" + +static Oid get_ts_config_namespace(Oid tsconfigOid); +static Oid get_ts_dict_namespace(Oid tsdictOid); + + +/* + * QualifyDropTextSearchConfigurationStmt adds any missing schema names to text search + * configurations being dropped. All configurations are expected to exists before fully + * qualifying the statement. Errors will be raised for objects not existing. Non-existing + * objects are expected to not be distributed. + */ +void +QualifyDropTextSearchConfigurationStmt(Node *node) +{ + DropStmt *stmt = castNode(DropStmt, node); + Assert(stmt->removeType == OBJECT_TSCONFIGURATION); + + List *qualifiedObjects = NIL; + List *objName = NIL; + + foreach_ptr(objName, stmt->objects) + { + char *schemaName = NULL; + char *tsconfigName = NULL; + DeconstructQualifiedName(objName, &schemaName, &tsconfigName); + + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(objName, false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + objName = list_make2(makeString(schemaName), + makeString(tsconfigName)); + } + + qualifiedObjects = lappend(qualifiedObjects, objName); + } + + stmt->objects = qualifiedObjects; +} + + +/* + * QualifyAlterTextSearchConfigurationStmt adds the schema name (if missing) to the name + * of the text search configurations, as well as the dictionaries referenced. + */ +void +QualifyAlterTextSearchConfigurationStmt(Node *node) +{ + AlterTSConfigurationStmt *stmt = castNode(AlterTSConfigurationStmt, node); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(stmt->cfgname, &schemaName, &objName); + + /* fully qualify the cfgname being altered */ + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(stmt->cfgname, false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->cfgname = list_make2(makeString(schemaName), + makeString(objName)); + } + + /* fully qualify the dicts */ + bool useNewDicts = false; + List *dicts = NULL; + List *dictName = NIL; + foreach_ptr(dictName, stmt->dicts) + { + DeconstructQualifiedName(dictName, &schemaName, &objName); + + /* fully qualify the cfgname being altered */ + if (!schemaName) + { + Oid dictOid = get_ts_dict_oid(dictName, false); + Oid namespaceOid = get_ts_dict_namespace(dictOid); + schemaName = get_namespace_name(namespaceOid); + + useNewDicts = true; + dictName = list_make2(makeString(schemaName), makeString(objName)); + } + + dicts = lappend(dicts, dictName); + } + + if (useNewDicts) + { + /* swap original dicts with the new list */ + stmt->dicts = dicts; + } + else + { + /* we don't use the new list, everything was already qualified, free-ing */ + list_free(dicts); + } +} + + +/* + * QualifyRenameTextSearchConfigurationStmt adds the schema name (if missing) to the + * configuration being renamed. The new name will kept be without schema name since this + * command cannot be used to change the schema of a configuration. + */ +void +QualifyRenameTextSearchConfigurationStmt(Node *node) +{ + RenameStmt *stmt = castNode(RenameStmt, node); + Assert(stmt->renameType == OBJECT_TSCONFIGURATION); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + /* fully qualify the cfgname being altered */ + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + +/* + * QualifyAlterTextSearchConfigurationSchemaStmt adds the schema name (if missing) for the + * text search being moved to a new schema. + */ +void +QualifyAlterTextSearchConfigurationSchemaStmt(Node *node) +{ + AlterObjectSchemaStmt *stmt = castNode(AlterObjectSchemaStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + +/* + * QualifyTextSearchConfigurationCommentStmt adds the schema name (if missing) to the + * configuration name on which the comment is created. + */ +void +QualifyTextSearchConfigurationCommentStmt(Node *node) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Assert(stmt->objtype == OBJECT_TSCONFIGURATION); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + +/* + * QualifyAlterTextSearchConfigurationOwnerStmt adds the schema name (if missing) to the + * configuration for which the owner is changing. + */ +void +QualifyAlterTextSearchConfigurationOwnerStmt(Node *node) +{ + AlterOwnerStmt *stmt = castNode(AlterOwnerStmt, node); + Assert(stmt->objectType == OBJECT_TSCONFIGURATION); + + char *schemaName = NULL; + char *objName = NULL; + DeconstructQualifiedName(castNode(List, stmt->object), &schemaName, &objName); + + if (!schemaName) + { + Oid tsconfigOid = get_ts_config_oid(castNode(List, stmt->object), false); + Oid namespaceOid = get_ts_config_namespace(tsconfigOid); + schemaName = get_namespace_name(namespaceOid); + + stmt->object = (Node *) list_make2(makeString(schemaName), + makeString(objName)); + } +} + + +/* + * get_ts_config_namespace returns the oid of the namespace which is housing the text + * search configuration identified by tsconfigOid. + */ +static Oid +get_ts_config_namespace(Oid tsconfigOid) +{ + HeapTuple tup = SearchSysCache1(TSCONFIGOID, ObjectIdGetDatum(tsconfigOid)); + + if (HeapTupleIsValid(tup)) + { + Form_pg_ts_config cfgform = (Form_pg_ts_config) GETSTRUCT(tup); + Oid namespaceOid = cfgform->cfgnamespace; + ReleaseSysCache(tup); + + return namespaceOid; + } + + return InvalidOid; +} + + +/* + * get_ts_dict_namespace returns the oid of the namespace which is housing the text + * search dictionary identified by tsdictOid. + */ +static Oid +get_ts_dict_namespace(Oid tsdictOid) +{ + HeapTuple tup = SearchSysCache1(TSDICTOID, ObjectIdGetDatum(tsdictOid)); + + if (HeapTupleIsValid(tup)) + { + Form_pg_ts_dict cfgform = (Form_pg_ts_dict) GETSTRUCT(tup); + Oid namespaceOid = cfgform->dictnamespace; + ReleaseSysCache(tup); + + return namespaceOid; + } + + return InvalidOid; +} diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index cc7f8d3ac..28498e0f2 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -237,16 +237,17 @@ CitusExecutorRun(QueryDesc *queryDesc, * transactions. */ CitusTableCacheFlushInvalidatedEntries(); - - /* - * Within a 2PC, when a function is delegated to a remote node, we pin - * the distribution argument as the shard key for all the SQL in the - * function's block. The restriction is imposed to not to access other - * nodes from the current node and violate the transactional integrity - * of the 2PC. Now that the query is ending, reset the shard key to NULL. - */ - ResetAllowedShardKeyValue(); + InTopLevelDelegatedFunctionCall = false; } + + /* + * Within a 2PC, when a function is delegated to a remote node, we pin + * the distribution argument as the shard key for all the SQL in the + * function's block. The restriction is imposed to not to access other + * nodes from the current node, and violate the transactional integrity + * of the 2PC. Now that the query is ending, reset the shard key to NULL. + */ + CheckAndResetAllowedShardKeyValueIfNeeded(); } PG_CATCH(); { @@ -260,13 +261,15 @@ CitusExecutorRun(QueryDesc *queryDesc, if (ExecutorLevel == 0 && PlannerLevel == 0) { - /* - * In case of an exception, reset the pinned shard-key, for more - * details see the function header. - */ - ResetAllowedShardKeyValue(); + InTopLevelDelegatedFunctionCall = false; } + /* + * In case of an exception, reset the pinned shard-key, for more + * details see the function header. + */ + CheckAndResetAllowedShardKeyValueIfNeeded(); + PG_RE_THROW(); } PG_END_TRY(); @@ -770,6 +773,11 @@ GetObjectTypeString(ObjectType objType) return "schema"; } + case OBJECT_TSCONFIGURATION: + { + return "text search configuration"; + } + case OBJECT_TYPE: { return "type"; diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index a9a154242..2d1f67ad3 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -124,6 +124,7 @@ typedef struct ViewDependencyNode static List * GetRelationSequenceDependencyList(Oid relationId); static List * GetRelationTriggerFunctionDependencyList(Oid relationId); static List * GetRelationStatsSchemaDependencyList(Oid relationId); +static List * GetRelationIndicesDependencyList(Oid relationId); static DependencyDefinition * CreateObjectAddressDependencyDef(Oid classId, Oid objectId); static List * CreateObjectAddressDependencyDefList(Oid classId, List *objectIdList); static ObjectAddress DependencyDefinitionObjectAddress(DependencyDefinition *definition); @@ -155,6 +156,8 @@ static bool FollowAllSupportedDependencies(ObjectAddressCollector *collector, DependencyDefinition *definition); static bool FollowNewSupportedDependencies(ObjectAddressCollector *collector, DependencyDefinition *definition); +static bool FollowAllDependencies(ObjectAddressCollector *collector, + DependencyDefinition *definition); static void ApplyAddToDependencyList(ObjectAddressCollector *collector, DependencyDefinition *definition); static List * ExpandCitusSupportedTypes(ObjectAddressCollector *collector, @@ -211,15 +214,42 @@ GetDependenciesForObject(const ObjectAddress *target) /* - * GetAllDependenciesForObject returns a list of all the ObjectAddresses to be - * created in order before the target object could safely be created on a - * worker. As a caller, you probably need GetDependenciesForObject() which - * eliminates already distributed objects from the returned list. + * GetAllSupportedDependenciesForObject returns a list of all the ObjectAddresses to be + * created in order before the target object could safely be created on a worker, if all + * dependent objects are distributable. As a caller, you probably need to use + * GetDependenciesForObject() which eliminates already distributed objects from the returned + * list. * * Some of the object might already be created on a worker. It should be created * in an idempotent way. */ List * +GetAllSupportedDependenciesForObject(const ObjectAddress *target) +{ + ObjectAddressCollector collector = { 0 }; + InitObjectAddressCollector(&collector); + + RecurseObjectDependencies(*target, + &ExpandCitusSupportedTypes, + &FollowAllSupportedDependencies, + &ApplyAddToDependencyList, + &collector); + + return collector.dependencyList; +} + + +/* + * GetAllDependenciesForObject returns a list of all the dependent objects of the given + * object irrespective of whether the dependent object is supported by Citus or not, if + * the object can be found as dependency with RecurseObjectDependencies and + * ExpandCitusSupportedTypes. + * + * This function will be used to provide meaningful error messages if any dependent + * object for a given object is not supported. If you want to create dependencies for + * an object, you probably need to use GetDependenciesForObject(). + */ +List * GetAllDependenciesForObject(const ObjectAddress *target) { ObjectAddressCollector collector = { 0 }; @@ -227,7 +257,7 @@ GetAllDependenciesForObject(const ObjectAddress *target) RecurseObjectDependencies(*target, &ExpandCitusSupportedTypes, - &FollowAllSupportedDependencies, + &FollowAllDependencies, &ApplyAddToDependencyList, &collector); @@ -639,6 +669,11 @@ SupportedDependencyByCitus(const ObjectAddress *address) return true; } + case OCLASS_TSCONFIG: + { + return true; + } + case OCLASS_TYPE: { switch (get_typtype(address->objectId)) @@ -686,7 +721,8 @@ SupportedDependencyByCitus(const ObjectAddress *address) relKind == RELKIND_RELATION || relKind == RELKIND_PARTITIONED_TABLE || relKind == RELKIND_FOREIGN_TABLE || - relKind == RELKIND_SEQUENCE) + relKind == RELKIND_SEQUENCE || + relKind == RELKIND_INDEX) { return true; } @@ -896,10 +932,61 @@ FollowAllSupportedDependencies(ObjectAddressCollector *collector, /* - * ApplyAddToDependencyList is an apply function for RecurseObjectDependencies that will collect - * all the ObjectAddresses for pg_depend entries to the context. The context here is - * assumed to be a (ObjectAddressCollector *) to the location where all ObjectAddresses - * will be collected. + * FollowAllDependencies applies filters on pg_depend entries to follow the dependency + * tree of objects in depth first order. We will visit all objects irrespective of it is + * supported by Citus or not. + */ +static bool +FollowAllDependencies(ObjectAddressCollector *collector, + DependencyDefinition *definition) +{ + if (definition->mode == DependencyPgDepend) + { + /* + * For dependencies found in pg_depend: + * + * Follow only normal and extension dependencies. The latter is used to reach the + * extensions, the objects that directly depend on the extension are eliminated + * during the "apply" phase. + * + * Other dependencies are internal dependencies and managed by postgres. + */ + if (definition->data.pg_depend.deptype != DEPENDENCY_NORMAL && + definition->data.pg_depend.deptype != DEPENDENCY_EXTENSION) + { + return false; + } + } + + /* rest of the tests are to see if we want to follow the actual dependency */ + ObjectAddress address = DependencyDefinitionObjectAddress(definition); + + /* + * If the object is already in our dependency list we do not have to follow any + * further + */ + if (IsObjectAddressCollected(address, collector)) + { + return false; + } + + if (CitusExtensionObject(&address)) + { + /* following citus extension could complicate role management */ + return false; + } + + return true; +} + + +/* + * ApplyAddToDependencyList is an apply function for RecurseObjectDependencies that will + * collect all the ObjectAddresses for pg_depend entries to the context, except it is + * extension owned one. + * + * The context here is assumed to be a (ObjectAddressCollector *) to the location where + * all ObjectAddresses will be collected. */ static void ApplyAddToDependencyList(ObjectAddressCollector *collector, @@ -1005,6 +1092,17 @@ ExpandCitusSupportedTypes(ObjectAddressCollector *collector, ObjectAddress targe List *sequenceDependencyList = GetRelationSequenceDependencyList(relationId); result = list_concat(result, sequenceDependencyList); + + /* + * Tables could have indexes. Indexes themself could have dependencies that + * need to be propagated. eg. TEXT SEARCH CONFIGRUATIONS. Here we add the + * addresses of all indices to the list of objects to vist, as to make sure we + * create all objects required by the indices before we create the table + * including indices. + */ + + List *indexDependencyList = GetRelationIndicesDependencyList(relationId); + result = list_concat(result, indexDependencyList); } default: @@ -1048,6 +1146,28 @@ GetRelationStatsSchemaDependencyList(Oid relationId) } +/* + * CollectIndexOids implements PGIndexProcessor to create a list of all index oids + */ +static void +CollectIndexOids(Form_pg_index formPgIndex, List **oids, int flags) +{ + *oids = lappend_oid(*oids, formPgIndex->indexrelid); +} + + +/* + * GetRelationIndicesDependencyList creates a list of ObjectAddressDependencies for the + * indexes on a given relation. + */ +static List * +GetRelationIndicesDependencyList(Oid relationId) +{ + List *indexIds = ExecuteFunctionOnEachTableIndex(relationId, CollectIndexOids, 0); + return CreateObjectAddressDependencyDefList(RelationRelationId, indexIds); +} + + /* * GetRelationTriggerFunctionDependencyList returns a list of DependencyDefinition * objects for the functions that triggers of the relation with relationId depends. diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index ba67a073b..41b3b372d 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -405,6 +405,21 @@ GetDistributedObjectAddressList(void) } +/* + * GetRoleSpecObjectForUser creates a RoleSpec object for the given roleOid. + */ +RoleSpec * +GetRoleSpecObjectForUser(Oid roleOid) +{ + RoleSpec *roleSpec = makeNode(RoleSpec); + roleSpec->roletype = OidIsValid(roleOid) ? ROLESPEC_CSTRING : ROLESPEC_PUBLIC; + roleSpec->rolename = OidIsValid(roleOid) ? GetUserNameFromId(roleOid, false) : NULL; + roleSpec->location = -1; + + return roleSpec; +} + + /* * UpdateDistributedObjectColocationId gets an old and a new colocationId * and updates the colocationId of all tuples in citus.pg_dist_object which diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 161a56942..2265ca691 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -31,6 +31,7 @@ #include "commands/dbcommands.h" #include "commands/extension.h" #include "commands/trigger.h" +#include "distributed/backend_data.h" #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" #include "distributed/citus_ruleutils.h" @@ -3666,9 +3667,10 @@ GetLocalNodeId(void) /* * This is expected if the coordinator is not added to the metadata. - * We'll return 0 for this case and for all cases so views can function almost normally + * We'll return GLOBAL_PID_NODE_ID_FOR_NODES_NOT_IN_METADATA for this case and + * for all cases so views can function almost normally */ - nodeId = 0; + nodeId = GLOBAL_PID_NODE_ID_FOR_NODES_NOT_IN_METADATA; } LocalNodeId = nodeId; diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index ec5ffb3f9..490eeb4ca 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -102,7 +102,6 @@ static GrantStmt * GenerateGrantStmtForRights(ObjectType objectType, bool withGrantOption); static List * GetObjectsForGrantStmt(ObjectType objectType, Oid objectId); static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission); -static RoleSpec * GetRoleSpecObjectForGrantStmt(Oid roleOid); static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid, AclItem *aclItem); static void SetLocalEnableMetadataSync(bool state); @@ -1023,7 +1022,6 @@ DistributionCreateCommand(CitusTableCacheEntry *cacheEntry) StringInfo insertDistributionCommand = makeStringInfo(); Oid relationId = cacheEntry->relationId; char distributionMethod = cacheEntry->partitionMethod; - char *partitionKeyString = cacheEntry->partitionKeyString; char *qualifiedRelationName = generate_qualified_relation_name(relationId); uint32 colocationId = cacheEntry->colocationId; @@ -1037,7 +1035,7 @@ DistributionCreateCommand(CitusTableCacheEntry *cacheEntry) else { char *partitionKeyColumnName = - ColumnToColumnName(relationId, partitionKeyString); + ColumnToColumnName(relationId, (Node *) cacheEntry->partitionColumn); appendStringInfo(tablePartitionKeyNameString, "%s", quote_literal_cstr(partitionKeyColumnName)); } @@ -1782,7 +1780,7 @@ GenerateGrantStmtForRights(ObjectType objectType, stmt->objtype = objectType; stmt->objects = GetObjectsForGrantStmt(objectType, objectId); stmt->privileges = list_make1(GetAccessPrivObjectForGrantStmt(permission)); - stmt->grantees = list_make1(GetRoleSpecObjectForGrantStmt(roleOid)); + stmt->grantees = list_make1(GetRoleSpecObjectForUser(roleOid)); stmt->grant_option = withGrantOption; return stmt; @@ -1831,22 +1829,6 @@ GetAccessPrivObjectForGrantStmt(char *permission) } -/* - * GetRoleSpecObjectForGrantStmt creates a RoleSpec object for the given roleOid. - * It will be used when creating GrantStmt objects. - */ -static RoleSpec * -GetRoleSpecObjectForGrantStmt(Oid roleOid) -{ - RoleSpec *roleSpec = makeNode(RoleSpec); - roleSpec->roletype = OidIsValid(roleOid) ? ROLESPEC_CSTRING : ROLESPEC_PUBLIC; - roleSpec->rolename = OidIsValid(roleOid) ? GetUserNameFromId(roleOid, false) : NULL; - roleSpec->location = -1; - - return roleSpec; -} - - /* * SetLocalEnableMetadataSync sets the enable_metadata_sync locally */ @@ -2462,12 +2444,10 @@ citus_internal_add_partition_metadata(PG_FUNCTION_ARGS) distributionColumnText = PG_GETARG_TEXT_P(2); distributionColumnString = text_to_cstring(distributionColumnText); - Relation relation = relation_open(relationId, AccessShareLock); distributionColumnVar = - BuildDistributionKeyFromColumnName(relation, distributionColumnString); + BuildDistributionKeyFromColumnName(relationId, distributionColumnString, + AccessShareLock); Assert(distributionColumnVar != NULL); - - relation_close(relation, NoLock); } if (!ShouldSkipMetadataChecks()) diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index a32e0aa20..706f000cb 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -1539,6 +1539,31 @@ FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePort) } +/* + * FindNodeWithNodeId searches pg_dist_node and returns the node with the nodeId. + * If the node cannot be found this functions errors. + */ +WorkerNode * +FindNodeWithNodeId(int nodeId) +{ + List *workerList = ActiveReadableNodeList(); + WorkerNode *workerNode = NULL; + + foreach_ptr(workerNode, workerList) + { + if (workerNode->nodeId == nodeId) + { + return workerNode; + } + } + + /* there isn't any node with nodeId in pg_dist_node */ + elog(ERROR, "worker node with node id %d could not be found", nodeId); + + return NULL; +} + + /* * ReadDistNode iterates over pg_dist_node table, converts each row * into it's memory representation (i.e., WorkerNode) and adds them into diff --git a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c index c4da6764a..c2ec4db3a 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c +++ b/src/backend/distributed/metadata/pg_get_object_address_12_13_14.c @@ -410,6 +410,7 @@ ErrorIfCurrentUserCanNotDistributeObject(ObjectType type, ObjectAddress *addr, case OBJECT_FUNCTION: case OBJECT_PROCEDURE: case OBJECT_AGGREGATE: + case OBJECT_TSCONFIGURATION: case OBJECT_TYPE: case OBJECT_FOREIGN_SERVER: case OBJECT_SEQUENCE: diff --git a/src/backend/distributed/operations/citus_tools.c b/src/backend/distributed/operations/citus_tools.c index cb87e70b1..b7905d9f8 100644 --- a/src/backend/distributed/operations/citus_tools.c +++ b/src/backend/distributed/operations/citus_tools.c @@ -14,6 +14,7 @@ #include "access/htup_details.h" #include "catalog/pg_type.h" +#include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/metadata_cache.h" #include "distributed/multi_client_executor.h" @@ -50,8 +51,6 @@ static void ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, bool *statusArray, StringInfo *resultStringArray, int commandCount); -static bool ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, - char *queryString, StringInfo queryResult); static Tuplestorestate * CreateTupleStore(TupleDesc tupleDescriptor, StringInfo *nodeNameArray, int *nodePortArray, bool *statusArray, @@ -474,9 +473,10 @@ ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray, int32 nodePort = nodePortArray[commandIndex]; char *queryString = commandStringArray[commandIndex]->data; StringInfo queryResultString = resultStringArray[commandIndex]; + bool reportResultError = false; bool success = ExecuteRemoteQueryOrCommand(nodeName, nodePort, queryString, - queryResultString); + queryResultString, reportResultError); statusArray[commandIndex] = success; @@ -491,9 +491,9 @@ ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray, * (success/failure), and query result. The query is expected to return a single * target containing zero or one rows. */ -static bool +bool ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString, - StringInfo queryResultString) + StringInfo queryResultString, bool reportResultError) { int connectionFlags = FORCE_NEW_CONNECTION; MultiConnection *connection = @@ -517,6 +517,11 @@ ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString, PGresult *queryResult = GetRemoteCommandResult(connection, raiseInterrupts); bool success = EvaluateQueryResult(connection, queryResult, queryResultString); + if (!success && reportResultError) + { + ReportResultError(connection, queryResult, ERROR); + } + PQclear(queryResult); /* close the connection */ diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index 9ea275035..3ca22f3b1 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -723,6 +723,16 @@ FunctionInFromClause(List *fromlist, Query *query) static void EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId) { + /* + * If the distribution key is already set, the key is fixed until + * the force-delegation function returns. All nested force-delegation + * functions must use the same key. + */ + if (AllowedDistributionColumnValue.isActive) + { + return; + } + /* * The saved distribution argument need to persist through the life * of the query, both during the planning (where we save) and execution @@ -734,6 +744,7 @@ EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId) colocationId)); AllowedDistributionColumnValue.distributionColumnValue = copyObject(distArgument); AllowedDistributionColumnValue.colocationId = colocationId; + AllowedDistributionColumnValue.executorLevel = ExecutorLevel; AllowedDistributionColumnValue.isActive = true; MemoryContextSwitchTo(oldcontext); } @@ -747,15 +758,22 @@ EnableInForceDelegatedFuncExecution(Const *distArgument, uint32 colocationId) * the 2PC. Reset the distribution argument value once the function ends. */ void -ResetAllowedShardKeyValue(void) +CheckAndResetAllowedShardKeyValueIfNeeded(void) { - if (AllowedDistributionColumnValue.isActive) + /* + * If no distribution argument is pinned or the pinned argument was + * set by a nested-executor from upper level, nothing to reset. + */ + if (!AllowedDistributionColumnValue.isActive || + ExecutorLevel > AllowedDistributionColumnValue.executorLevel) { - pfree(AllowedDistributionColumnValue.distributionColumnValue); - AllowedDistributionColumnValue.isActive = false; + return; } - InTopLevelDelegatedFunctionCall = false; + Assert(ExecutorLevel == AllowedDistributionColumnValue.executorLevel); + pfree(AllowedDistributionColumnValue.distributionColumnValue); + AllowedDistributionColumnValue.isActive = false; + AllowedDistributionColumnValue.executorLevel = 0; } @@ -767,6 +785,7 @@ bool IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId) { Assert(AllowedDistributionColumnValue.isActive); + Assert(ExecutorLevel > AllowedDistributionColumnValue.executorLevel); ereport(DEBUG4, errmsg("Comparing saved:%s with Shard key: %s colocationid:%d:%d", pretty_format_node_dump( diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index bff5a4243..ae0f4ac67 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -1018,9 +1018,9 @@ ModifyQuerySupported(Query *queryTree, Query *originalQuery, bool multiShardQuer StringInfo errorHint = makeStringInfo(); CitusTableCacheEntry *cacheEntry = GetCitusTableCacheEntry( distributedTableId); - char *partitionKeyString = cacheEntry->partitionKeyString; - char *partitionColumnName = ColumnToColumnName(distributedTableId, - partitionKeyString); + char *partitionColumnName = + ColumnToColumnName(distributedTableId, + (Node *) cacheEntry->partitionColumn); appendStringInfo(errorHint, "Consider using an equality filter on " "partition column \"%s\" to target a single shard.", @@ -3058,8 +3058,8 @@ BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError) if (prunedShardIntervalCount != 1) { char *partitionKeyString = cacheEntry->partitionKeyString; - char *partitionColumnName = ColumnToColumnName(distributedTableId, - partitionKeyString); + char *partitionColumnName = + ColumnToColumnName(distributedTableId, stringToNode(partitionKeyString)); StringInfo errorMessage = makeStringInfo(); StringInfo errorHint = makeStringInfo(); const char *targetCountType = NULL; diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 5dbc7d3e1..521d3e406 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -513,6 +513,9 @@ CitusCleanupConnectionsAtExit(int code, Datum arg) * are already given away. */ DeallocateReservedConnections(); + + /* we don't want any monitoring view/udf to show already exited backends */ + UnSetGlobalPID(); } diff --git a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql index c3ffbb1cb..849b28761 100644 --- a/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql +++ b/src/backend/distributed/sql/citus--10.2-4--11.0-1.sql @@ -17,7 +17,13 @@ #include "udfs/get_all_active_transactions/11.0-1.sql" #include "udfs/get_global_active_transactions/11.0-1.sql" +#include "udfs/citus_internal_local_blocked_processes/11.0-1.sql" +#include "udfs/citus_internal_global_blocked_processes/11.0-1.sql" + #include "udfs/citus_worker_stat_activity/11.0-1.sql" +#include "udfs/worker_create_or_replace_object/11.0-1.sql" +#include "udfs/citus_isolation_test_session_is_blocked/11.0-1.sql" +#include "udfs/citus_blocking_pids/11.0-1.sql" CREATE VIEW citus.citus_worker_stat_activity AS SELECT * FROM pg_catalog.citus_worker_stat_activity(); @@ -34,6 +40,9 @@ GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC; -- we have to recreate this view because recreated citus_dist_stat_activity that this view depends #include "udfs/citus_lock_waits/11.0-1.sql" +#include "udfs/pg_cancel_backend/11.0-1.sql" +#include "udfs/pg_terminate_backend/11.0-1.sql" + DROP FUNCTION IF EXISTS pg_catalog.master_apply_delete_command(text); DROP FUNCTION pg_catalog.master_get_table_metadata(text); DROP FUNCTION pg_catalog.master_append_table_to_shard(bigint, text, text, integer); diff --git a/src/backend/distributed/sql/citus--8.3-1--9.0-1.sql b/src/backend/distributed/sql/citus--8.3-1--9.0-1.sql index dccc66d16..359360981 100644 --- a/src/backend/distributed/sql/citus--8.3-1--9.0-1.sql +++ b/src/backend/distributed/sql/citus--8.3-1--9.0-1.sql @@ -21,13 +21,7 @@ ALTER FUNCTION citus.restore_isolation_tester_func SET SCHEMA citus_internal; GRANT USAGE ON SCHEMA citus TO public; #include "udfs/pg_dist_shard_placement_trigger_func/9.0-1.sql" - -CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text) - RETURNS bool - LANGUAGE C STRICT - AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$; -COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text) - IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object'; +#include "udfs/worker_create_or_replace_object/9.0-1.sql" CREATE OR REPLACE FUNCTION pg_catalog.master_unmark_object_distributed(classid oid, objid oid, objsubid int) RETURNS void diff --git a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql index e94ed0bbf..ba13b134a 100644 --- a/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql +++ b/src/backend/distributed/sql/downgrades/citus--11.0-1--10.2-4.sql @@ -113,6 +113,9 @@ CREATE FUNCTION get_global_active_transactions(OUT datid oid, OUT process_id int RESET search_path; +DROP FUNCTION citus_internal_local_blocked_processes CASCADE; +DROP FUNCTION citus_internal_global_blocked_processes CASCADE; + DROP FUNCTION pg_catalog.citus_dist_stat_activity CASCADE; CREATE OR REPLACE FUNCTION pg_catalog.citus_dist_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, @@ -139,6 +142,162 @@ ALTER VIEW citus.citus_dist_stat_activity SET SCHEMA pg_catalog; GRANT SELECT ON pg_catalog.citus_dist_stat_activity TO PUBLIC; SET search_path = 'pg_catalog'; +DROP FUNCTION citus_worker_stat_activity CASCADE; + +CREATE OR REPLACE FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) +RETURNS SETOF RECORD +LANGUAGE C STRICT AS 'MODULE_PATHNAME', +$$citus_worker_stat_activity$$; + +COMMENT ON FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, + OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, + OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, + OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, + OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, + OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) +IS 'returns distributed transaction activity on shards of distributed tables'; + +DROP FUNCTION pg_catalog.worker_create_or_replace_object(text[]); +#include "../udfs/worker_create_or_replace_object/9.0-1.sql" + +DROP FUNCTION IF EXISTS pg_catalog.pg_cancel_backend(bigint) CASCADE; +DROP FUNCTION IF EXISTS pg_catalog.pg_terminate_backend(bigint, bigint) CASCADE; + +DROP FUNCTION pg_catalog.dump_local_wait_edges CASCADE; +CREATE FUNCTION pg_catalog.dump_local_wait_edges( + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$dump_local_wait_edges$$; +COMMENT ON FUNCTION pg_catalog.dump_local_wait_edges() +IS 'returns all local lock wait chains, that start from distributed transactions'; + +DROP FUNCTION pg_catalog.dump_global_wait_edges CASCADE; +CREATE FUNCTION pg_catalog.dump_global_wait_edges( + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE 'c' STRICT +AS $$MODULE_PATHNAME$$, $$dump_global_wait_edges$$; +COMMENT ON FUNCTION pg_catalog.dump_global_wait_edges() +IS 'returns a global list of blocked transactions originating from this node'; + +DROP FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]); +CREATE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]) +RETURNS boolean AS $$ + DECLARE + mBlockedTransactionNum int8; + workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id'); + coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id'); + BEGIN + IF pg_catalog.old_pg_isolation_test_session_is_blocked(pBlockedPid, pInterestingPids) THEN + RETURN true; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + -- Note that worker process may be blocked or waiting for a lock. So we need to + -- get transaction number for both of them. Following IF provides the transaction + -- number when the worker process waiting for other session. + IF EXISTS (SELECT transaction_number FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN + SELECT transaction_number INTO mBlockedTransactionNum FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId; + ELSE + -- Check whether transactions initiated from the coordinator get locked + SELECT transaction_number INTO mBlockedTransactionNum + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + END IF; + + RETURN EXISTS ( + SELECT 1 FROM dump_global_wait_edges() + WHERE waiting_transaction_num = mBlockedTransactionNum + ) OR EXISTS ( + -- Check on the workers if any logical replication job spawned by the + -- current PID is blocked, by checking it's application name + -- Query is heavily based on: https://wiki.postgresql.org/wiki/Lock_Monitoring + SELECT result FROM run_command_on_workers($two$ + SELECT blocked_activity.application_name AS blocked_application + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.GRANTED AND blocked_activity.application_name LIKE 'citus_shard_move_subscription_%' + $two$) where result='citus_shard_move_subscription_' || pBlockedPid); + + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_isolation_test_session_is_blocked(integer,integer[]) FROM PUBLIC; + +DROP FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer); +CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer) +RETURNS int4[] AS $$ + DECLARE + mLocalBlockingPids int4[]; + mRemoteBlockingPids int4[]; + mLocalTransactionNum int8; + BEGIN + SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids; + + IF (array_length(mLocalBlockingPids, 1) > 0) THEN + RETURN mLocalBlockingPids; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + SELECT transaction_number INTO mLocalTransactionNum + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + + SELECT array_agg(process_id) INTO mRemoteBlockingPids FROM ( + WITH activeTransactions AS ( + SELECT process_id, transaction_number FROM get_all_active_transactions() + ), blockingTransactions AS ( + SELECT blocking_transaction_num AS txn_num FROM dump_global_wait_edges() + WHERE waiting_transaction_num = mLocalTransactionNum + ) + SELECT activeTransactions.process_id FROM activeTransactions, blockingTransactions + WHERE activeTransactions.transaction_number = blockingTransactions.txn_num + ) AS sub; + + RETURN mRemoteBlockingPids; + END; +$$ LANGUAGE plpgsql; +REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC; + +CREATE VIEW citus.citus_worker_stat_activity AS +SELECT * FROM pg_catalog.citus_worker_stat_activity(); +ALTER VIEW citus.citus_worker_stat_activity SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC; -- we have to recreate this view because we drop citus_dist_stat_activity that this view depends CREATE VIEW citus.citus_lock_waits AS @@ -150,7 +309,7 @@ citus_dist_stat_activity AS ), unique_global_wait_edges AS ( - SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() + SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() ), citus_dist_stat_activity_with_node_id AS ( @@ -183,29 +342,4 @@ JOIN ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; -DROP FUNCTION citus_worker_stat_activity CASCADE; - -CREATE OR REPLACE FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, - OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, - OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, - OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, - OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, - OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) -RETURNS SETOF RECORD -LANGUAGE C STRICT AS 'MODULE_PATHNAME', -$$citus_worker_stat_activity$$; - -COMMENT ON FUNCTION citus_worker_stat_activity(OUT query_hostname text, OUT query_hostport int, OUT distributed_query_host_name text, OUT distributed_query_host_port int, - OUT transaction_number int8, OUT transaction_stamp timestamptz, OUT datid oid, OUT datname name, - OUT pid int, OUT usesysid oid, OUT usename name, OUT application_name text, OUT client_addr INET, - OUT client_hostname TEXT, OUT client_port int, OUT backend_start timestamptz, OUT xact_start timestamptz, - OUT query_start timestamptz, OUT state_change timestamptz, OUT wait_event_type text, OUT wait_event text, - OUT state text, OUT backend_xid xid, OUT backend_xmin xid, OUT query text, OUT backend_type text) -IS 'returns distributed transaction activity on shards of distributed tables'; - -CREATE VIEW citus.citus_worker_stat_activity AS -SELECT * FROM pg_catalog.citus_worker_stat_activity(); -ALTER VIEW citus.citus_worker_stat_activity SET SCHEMA pg_catalog; -GRANT SELECT ON pg_catalog.citus_worker_stat_activity TO PUBLIC; - RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_blocking_pids/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_blocking_pids/11.0-1.sql new file mode 100644 index 000000000..c7e607c1c --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_blocking_pids/11.0-1.sql @@ -0,0 +1,34 @@ +DROP FUNCTION pg_catalog.citus_blocking_pids; +CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer) +RETURNS int4[] AS $$ + DECLARE + mLocalBlockingPids int4[]; + mRemoteBlockingPids int4[]; + mLocalGlobalPid int8; + BEGIN + SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids; + + IF (array_length(mLocalBlockingPids, 1) > 0) THEN + RETURN mLocalBlockingPids; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + SELECT global_pid INTO mLocalGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + + SELECT array_agg(global_pid) INTO mRemoteBlockingPids FROM ( + WITH activeTransactions AS ( + SELECT global_pid FROM get_all_active_transactions() + ), blockingTransactions AS ( + SELECT blocking_global_pid FROM citus_internal_global_blocked_processes() + WHERE waiting_global_pid = mLocalGlobalPid + ) + SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions + WHERE activeTransactions.global_pid = blockingTransactions.blocking_global_pid + ) AS sub; + + RETURN mRemoteBlockingPids; + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql b/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql new file mode 100644 index 000000000..c7e607c1c --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql @@ -0,0 +1,34 @@ +DROP FUNCTION pg_catalog.citus_blocking_pids; +CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer) +RETURNS int4[] AS $$ + DECLARE + mLocalBlockingPids int4[]; + mRemoteBlockingPids int4[]; + mLocalGlobalPid int8; + BEGIN + SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids; + + IF (array_length(mLocalBlockingPids, 1) > 0) THEN + RETURN mLocalBlockingPids; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + SELECT global_pid INTO mLocalGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + + SELECT array_agg(global_pid) INTO mRemoteBlockingPids FROM ( + WITH activeTransactions AS ( + SELECT global_pid FROM get_all_active_transactions() + ), blockingTransactions AS ( + SELECT blocking_global_pid FROM citus_internal_global_blocked_processes() + WHERE waiting_global_pid = mLocalGlobalPid + ) + SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions + WHERE activeTransactions.global_pid = blockingTransactions.blocking_global_pid + ) AS sub; + + RETURN mRemoteBlockingPids; + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/11.0-1.sql new file mode 100644 index 000000000..510cdf93d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/11.0-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql new file mode 100644 index 000000000..510cdf93d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/11.0-1.sql new file mode 100644 index 000000000..3157a9aad --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/11.0-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql new file mode 100644 index 000000000..3157a9aad --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/11.0-1.sql new file mode 100644 index 000000000..64b89ec0e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/11.0-1.sql @@ -0,0 +1,56 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]) +RETURNS boolean AS $$ + DECLARE + mBlockedGlobalPid int8; + workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id'); + coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id'); + BEGIN + IF pg_catalog.old_pg_isolation_test_session_is_blocked(pBlockedPid, pInterestingPids) THEN + RETURN true; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + -- Note that worker process may be blocked or waiting for a lock. So we need to + -- get transaction number for both of them. Following IF provides the transaction + -- number when the worker process waiting for other session. + IF EXISTS (SELECT 1 FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN + SELECT global_pid INTO mBlockedGlobalPid FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId; + ELSE + -- Check whether transactions initiated from the coordinator get locked + SELECT global_pid INTO mBlockedGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + END IF; + + RETURN EXISTS ( + SELECT 1 FROM citus_internal_global_blocked_processes() + WHERE waiting_global_pid = mBlockedGlobalPid + ) OR EXISTS ( + -- Check on the workers if any logical replication job spawned by the + -- current PID is blocked, by checking it's application name + -- Query is heavily based on: https://wiki.postgresql.org/wiki/Lock_Monitoring + SELECT result FROM run_command_on_workers($two$ + SELECT blocked_activity.application_name AS blocked_application + FROM pg_catalog.pg_locks blocked_locks + JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid + JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid + JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid + WHERE NOT blocked_locks.GRANTED AND blocked_activity.application_name LIKE 'citus_shard_move_subscription_%' + $two$) where result='citus_shard_move_subscription_' || pBlockedPid); + + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_isolation_test_session_is_blocked(integer,integer[]) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql index 0b91cc37c..64b89ec0e 100644 --- a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql @@ -1,7 +1,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]) RETURNS boolean AS $$ DECLARE - mBlockedTransactionNum int8; + mBlockedGlobalPid int8; workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id'); coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id'); BEGIN @@ -13,19 +13,19 @@ RETURNS boolean AS $$ -- Note that worker process may be blocked or waiting for a lock. So we need to -- get transaction number for both of them. Following IF provides the transaction -- number when the worker process waiting for other session. - IF EXISTS (SELECT transaction_number FROM get_global_active_transactions() + IF EXISTS (SELECT 1 FROM get_global_active_transactions() WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN - SELECT transaction_number INTO mBlockedTransactionNum FROM get_global_active_transactions() + SELECT global_pid INTO mBlockedGlobalPid FROM get_global_active_transactions() WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId; ELSE -- Check whether transactions initiated from the coordinator get locked - SELECT transaction_number INTO mBlockedTransactionNum + SELECT global_pid INTO mBlockedGlobalPid FROM get_all_active_transactions() WHERE process_id = pBlockedPid; END IF; RETURN EXISTS ( - SELECT 1 FROM dump_global_wait_edges() - WHERE waiting_transaction_num = mBlockedTransactionNum + SELECT 1 FROM citus_internal_global_blocked_processes() + WHERE waiting_global_pid = mBlockedGlobalPid ) OR EXISTS ( -- Check on the workers if any logical replication job spawned by the -- current PID is blocked, by checking it's application name diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql index 2ae40374a..779341657 100644 --- a/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql @@ -8,7 +8,7 @@ citus_dist_stat_activity AS ), unique_global_wait_edges AS ( - SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() + SELECT DISTINCT ON(waiting_global_pid, blocking_global_pid) * FROM citus_internal_global_blocked_processes() ), citus_dist_stat_activity_with_node_id AS ( @@ -21,6 +21,8 @@ citus_dist_stat_activity_with_node_id AS citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport ) SELECT + waiting.global_pid as waiting_gpid, + blocking.global_pid as blocking_gpid, waiting.pid AS waiting_pid, blocking.pid AS blocking_pid, waiting.query AS blocked_statement, @@ -34,9 +36,9 @@ SELECT FROM unique_global_wait_edges JOIN - citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id) + citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_global_pid = waiting.global_pid) JOIN - citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id); + citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_global_pid = blocking.global_pid); ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql index 2ae40374a..779341657 100644 --- a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql @@ -8,7 +8,7 @@ citus_dist_stat_activity AS ), unique_global_wait_edges AS ( - SELECT DISTINCT ON(waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num) * FROM dump_global_wait_edges() + SELECT DISTINCT ON(waiting_global_pid, blocking_global_pid) * FROM citus_internal_global_blocked_processes() ), citus_dist_stat_activity_with_node_id AS ( @@ -21,6 +21,8 @@ citus_dist_stat_activity_with_node_id AS citus_dist_stat_activity.distributed_query_host_port = pg_dist_node.nodeport ) SELECT + waiting.global_pid as waiting_gpid, + blocking.global_pid as blocking_gpid, waiting.pid AS waiting_pid, blocking.pid AS blocking_pid, waiting.query AS blocked_statement, @@ -34,9 +36,9 @@ SELECT FROM unique_global_wait_edges JOIN - citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_transaction_num = waiting.transaction_number AND unique_global_wait_edges.waiting_node_id = waiting.initiator_node_id) + citus_dist_stat_activity_with_node_id waiting ON (unique_global_wait_edges.waiting_global_pid = waiting.global_pid) JOIN - citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_transaction_num = blocking.transaction_number AND unique_global_wait_edges.blocking_node_id = blocking.initiator_node_id); + citus_dist_stat_activity_with_node_id blocking ON (unique_global_wait_edges.blocking_global_pid = blocking.global_pid); ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; diff --git a/src/backend/distributed/sql/udfs/pg_cancel_backend/11.0-1.sql b/src/backend/distributed/sql/udfs/pg_cancel_backend/11.0-1.sql new file mode 100644 index 000000000..3a355aa6d --- /dev/null +++ b/src/backend/distributed/sql/udfs/pg_cancel_backend/11.0-1.sql @@ -0,0 +1,9 @@ +DROP FUNCTION IF EXISTS pg_catalog.pg_cancel_backend(global_pid bigint) CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.pg_cancel_backend(global_pid bigint) + RETURNS BOOL + LANGUAGE C +AS 'MODULE_PATHNAME', $$pg_cancel_backend$$; + +COMMENT ON FUNCTION pg_catalog.pg_cancel_backend(global_pid bigint) + IS 'cancels a Citus query which might be on any node in the Citus cluster'; diff --git a/src/backend/distributed/sql/udfs/pg_cancel_backend/latest.sql b/src/backend/distributed/sql/udfs/pg_cancel_backend/latest.sql new file mode 100644 index 000000000..3a355aa6d --- /dev/null +++ b/src/backend/distributed/sql/udfs/pg_cancel_backend/latest.sql @@ -0,0 +1,9 @@ +DROP FUNCTION IF EXISTS pg_catalog.pg_cancel_backend(global_pid bigint) CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.pg_cancel_backend(global_pid bigint) + RETURNS BOOL + LANGUAGE C +AS 'MODULE_PATHNAME', $$pg_cancel_backend$$; + +COMMENT ON FUNCTION pg_catalog.pg_cancel_backend(global_pid bigint) + IS 'cancels a Citus query which might be on any node in the Citus cluster'; diff --git a/src/backend/distributed/sql/udfs/pg_terminate_backend/11.0-1.sql b/src/backend/distributed/sql/udfs/pg_terminate_backend/11.0-1.sql new file mode 100644 index 000000000..9b9798b76 --- /dev/null +++ b/src/backend/distributed/sql/udfs/pg_terminate_backend/11.0-1.sql @@ -0,0 +1,9 @@ +DROP FUNCTION IF EXISTS pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint) CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint DEFAULT 0) + RETURNS BOOL + LANGUAGE C +AS 'MODULE_PATHNAME', $$pg_terminate_backend$$; + +COMMENT ON FUNCTION pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint) + IS 'terminates a Citus query which might be on any node in the Citus cluster'; diff --git a/src/backend/distributed/sql/udfs/pg_terminate_backend/latest.sql b/src/backend/distributed/sql/udfs/pg_terminate_backend/latest.sql new file mode 100644 index 000000000..9b9798b76 --- /dev/null +++ b/src/backend/distributed/sql/udfs/pg_terminate_backend/latest.sql @@ -0,0 +1,9 @@ +DROP FUNCTION IF EXISTS pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint) CASCADE; + +CREATE OR REPLACE FUNCTION pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint DEFAULT 0) + RETURNS BOOL + LANGUAGE C +AS 'MODULE_PATHNAME', $$pg_terminate_backend$$; + +COMMENT ON FUNCTION pg_catalog.pg_terminate_backend(global_pid bigint, timeout bigint) + IS 'terminates a Citus query which might be on any node in the Citus cluster'; diff --git a/src/backend/distributed/sql/udfs/worker_create_or_replace_object/11.0-1.sql b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/11.0-1.sql new file mode 100644 index 000000000..d9e21a9b2 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/11.0-1.sql @@ -0,0 +1,15 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + RETURNS bool + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$; + +COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object'; + +CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statements text[]) + RETURNS bool + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_create_or_replace_object_array$$; + +COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statements text[]) + IS 'takes a lost of sql statements, before executing these it will check if the object already exists in that exact state otherwise replaces that named object with the new object'; diff --git a/src/backend/distributed/sql/udfs/worker_create_or_replace_object/9.0-1.sql b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/9.0-1.sql new file mode 100644 index 000000000..d4ab612f0 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/9.0-1.sql @@ -0,0 +1,6 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + RETURNS bool + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$; +COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object'; diff --git a/src/backend/distributed/sql/udfs/worker_create_or_replace_object/latest.sql b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/latest.sql new file mode 100644 index 000000000..d9e21a9b2 --- /dev/null +++ b/src/backend/distributed/sql/udfs/worker_create_or_replace_object/latest.sql @@ -0,0 +1,15 @@ +CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + RETURNS bool + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_create_or_replace_object$$; + +COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statement text) + IS 'takes a sql CREATE statement, before executing the create it will check if an object with that name already exists and safely replaces that named object with the new object'; + +CREATE OR REPLACE FUNCTION pg_catalog.worker_create_or_replace_object(statements text[]) + RETURNS bool + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$worker_create_or_replace_object_array$$; + +COMMENT ON FUNCTION pg_catalog.worker_create_or_replace_object(statements text[]) + IS 'takes a lost of sql statements, before executing these it will check if the object already exists in that exact state otherwise replaces that named object with the new object'; diff --git a/src/backend/distributed/test/dependency.c b/src/backend/distributed/test/dependency.c index 79ad1c139..82e818b8c 100644 --- a/src/backend/distributed/test/dependency.c +++ b/src/backend/distributed/test/dependency.c @@ -47,7 +47,7 @@ citus_get_all_dependencies_for_object(PG_FUNCTION_ARGS) ObjectAddress address = { 0 }; ObjectAddressSubSet(address, classid, objid, objsubid); - List *dependencies = GetAllDependenciesForObject(&address); + List *dependencies = GetAllSupportedDependenciesForObject(&address); ObjectAddress *dependency = NULL; foreach_ptr(dependency, dependencies) { diff --git a/src/backend/distributed/test/distributed_deadlock_detection.c b/src/backend/distributed/test/distributed_deadlock_detection.c index 448228158..d3fa34db2 100644 --- a/src/backend/distributed/test/distributed_deadlock_detection.c +++ b/src/backend/distributed/test/distributed_deadlock_detection.c @@ -50,7 +50,10 @@ get_adjacency_list_wait_graph(PG_FUNCTION_ARGS) bool isNulls[2]; Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDescriptor); - WaitGraph *waitGraph = BuildGlobalWaitGraph(); + + /* distributed deadlock detection only considers distributed txs */ + bool onlyDistributedTx = true; + WaitGraph *waitGraph = BuildGlobalWaitGraph(onlyDistributedTx); HTAB *adjacencyList = BuildAdjacencyListsForWaitGraph(waitGraph); /* iterate on all nodes */ diff --git a/src/backend/distributed/test/run_from_same_connection.c b/src/backend/distributed/test/run_from_same_connection.c index 3b5f804b4..ad39664bf 100644 --- a/src/backend/distributed/test/run_from_same_connection.c +++ b/src/backend/distributed/test/run_from_same_connection.c @@ -58,6 +58,7 @@ static int64 GetRemoteProcessId(void); PG_FUNCTION_INFO_V1(start_session_level_connection_to_node); PG_FUNCTION_INFO_V1(run_commands_on_session_level_connection_to_node); PG_FUNCTION_INFO_V1(stop_session_level_connection_to_node); +PG_FUNCTION_INFO_V1(override_backend_data_command_originator); /* @@ -119,6 +120,17 @@ start_session_level_connection_to_node(PG_FUNCTION_ARGS) ExecuteCriticalRemoteCommand(singleConnection, setAppName); + /* + * We are hackily overriding the remote processes' worker_query to be false + * such that relevant observibility UDFs work fine. + */ + StringInfo overrideBackendDataCommandOriginator = makeStringInfo(); + appendStringInfo(overrideBackendDataCommandOriginator, + "SELECT override_backend_data_command_originator(true);"); + ExecuteCriticalRemoteCommand(singleConnection, + overrideBackendDataCommandOriginator->data); + + PG_RETURN_VOID(); } @@ -174,6 +186,23 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS) } +/* + * override_backend_data_command_originator is a wrapper around + * OverrideBackendDataDistributedCommandOriginator(). + */ +Datum +override_backend_data_command_originator(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + bool distributedCommandOriginator = PG_GETARG_BOOL(0); + + OverrideBackendDataDistributedCommandOriginator(distributedCommandOriginator); + + PG_RETURN_VOID(); +} + + /* * stop_session_level_connection_to_node closes the connection opened by the * start_session_level_connection_to_node and set the flag to false which diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 4e037e30c..5ca517199 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -33,6 +33,7 @@ #include "distributed/shared_connection_stats.h" #include "distributed/transaction_identifier.h" #include "distributed/tuplestore.h" +#include "distributed/worker_manager.h" #include "nodes/execnodes.h" #include "postmaster/autovacuum.h" /* to access autovacuum_max_workers */ #include "replication/walsender.h" @@ -47,6 +48,7 @@ #define GET_ACTIVE_TRANSACTION_QUERY "SELECT * FROM get_all_active_transactions();" #define ACTIVE_TRANSACTION_COLUMN_COUNT 7 +#define GLOBAL_PID_NODE_ID_MULTIPLIER 10000000000 /* * Each backend's data reside in the shared memory @@ -90,7 +92,6 @@ static BackendData *MyBackendData = NULL; static void BackendManagementShmemInit(void); static size_t BackendManagementShmemSize(void); -static void UnSetGlobalPID(void); PG_FUNCTION_INFO_V1(assign_distributed_transaction_id); @@ -153,7 +154,6 @@ assign_distributed_transaction_id(PG_FUNCTION_ARGS) MyBackendData->citusBackend.initiatorNodeIdentifier = MyBackendData->transactionId.initiatorNodeIdentifier; - MyBackendData->citusBackend.transactionOriginator = false; SpinLockRelease(&MyBackendData->mutex); @@ -411,15 +411,12 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto initiatorNodeIdentifier = currentBackend->citusBackend.initiatorNodeIdentifier; /* - * We prefer to use worker_query instead of transactionOriginator in the user facing - * functions since its more intuitive. Thus, we negate the result before returning. - * - * We prefer to use citusBackend's transactionOriginator field over transactionId's - * field with the same name. The reason is that it also covers backends that are not - * inside a distributed transaction. + * We prefer to use worker_query instead of distributedCommandOriginator in + * the user facing functions since its more intuitive. Thus, + * we negate the result before returning. */ - bool coordinatorOriginatedQuery = - currentBackend->citusBackend.transactionOriginator; + bool distributedCommandOriginator = + currentBackend->distributedCommandOriginator; transactionNumber = currentBackend->transactionId.transactionNumber; TimestampTz transactionIdTimestamp = currentBackend->transactionId.timestamp; @@ -429,7 +426,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto values[0] = ObjectIdGetDatum(databaseId); values[1] = Int32GetDatum(backendPid); values[2] = Int32GetDatum(initiatorNodeIdentifier); - values[3] = !coordinatorOriginatedQuery; + values[3] = !distributedCommandOriginator; values[4] = UInt64GetDatum(transactionNumber); values[5] = TimestampTzGetDatum(transactionIdTimestamp); values[6] = UInt64GetDatum(currentBackend->globalPID); @@ -664,7 +661,6 @@ UnSetDistributedTransactionId(void) MyBackendData->transactionId.timestamp = 0; MyBackendData->citusBackend.initiatorNodeIdentifier = -1; - MyBackendData->citusBackend.transactionOriginator = false; SpinLockRelease(&MyBackendData->mutex); } @@ -674,7 +670,7 @@ UnSetDistributedTransactionId(void) /* * UnSetGlobalPID resets the global pid for the current backend. */ -static void +void UnSetGlobalPID(void) { /* backend does not exist if the extension is not created */ @@ -777,7 +773,6 @@ AssignDistributedTransactionId(void) MyBackendData->transactionId.timestamp = currentTimestamp; MyBackendData->citusBackend.initiatorNodeIdentifier = localGroupId; - MyBackendData->citusBackend.transactionOriginator = true; SpinLockRelease(&MyBackendData->mutex); } @@ -799,7 +794,6 @@ MarkCitusInitiatedCoordinatorBackend(void) SpinLockAcquire(&MyBackendData->mutex); MyBackendData->citusBackend.initiatorNodeIdentifier = localGroupId; - MyBackendData->citusBackend.transactionOriginator = true; SpinLockRelease(&MyBackendData->mutex); } @@ -815,10 +809,12 @@ void AssignGlobalPID(void) { uint64 globalPID = INVALID_CITUS_INTERNAL_BACKEND_GPID; + bool distributedCommandOriginator = false; if (!IsCitusInternalBackend()) { globalPID = GenerateGlobalPID(); + distributedCommandOriginator = true; } else { @@ -827,6 +823,21 @@ AssignGlobalPID(void) SpinLockAcquire(&MyBackendData->mutex); MyBackendData->globalPID = globalPID; + MyBackendData->distributedCommandOriginator = distributedCommandOriginator; + SpinLockRelease(&MyBackendData->mutex); +} + + +/* + * OverrideBackendDataDistributedCommandOriginator should only be used for isolation testing. + * See how it is used in the relevant functions. + */ +void +OverrideBackendDataDistributedCommandOriginator(bool distributedCommandOriginator) +{ + SpinLockAcquire(&MyBackendData->mutex); + MyBackendData->distributedCommandOriginator = + distributedCommandOriginator; SpinLockRelease(&MyBackendData->mutex); } @@ -865,7 +876,7 @@ GenerateGlobalPID(void) * node ids might cause overflow. But even for the applications that scale around 50 nodes every * day it'd take about 100K years. So we are not worried. */ - return (((uint64) GetLocalNodeId()) * 10000000000) + getpid(); + return (((uint64) GetLocalNodeId()) * GLOBAL_PID_NODE_ID_MULTIPLIER) + getpid(); } @@ -908,6 +919,42 @@ ExtractGlobalPID(char *applicationName) } +/* + * ExtractNodeIdFromGlobalPID extracts the node id from the global pid. + * Global pid is constructed by multiplying node id with GLOBAL_PID_NODE_ID_MULTIPLIER + * and adding process id. So integer division of global pid by GLOBAL_PID_NODE_ID_MULTIPLIER + * gives us the node id. + */ +int +ExtractNodeIdFromGlobalPID(uint64 globalPID) +{ + int nodeId = (int) (globalPID / GLOBAL_PID_NODE_ID_MULTIPLIER); + + if (nodeId == GLOBAL_PID_NODE_ID_FOR_NODES_NOT_IN_METADATA) + { + ereport(ERROR, (errmsg("originator node of the query with the global pid " + "%lu is not in Citus' metadata", globalPID), + errhint("connect to the node directly run pg_cancel_backend(pid) " + "or pg_terminate_backend(pid)"))); + } + + return nodeId; +} + + +/* + * ExtractProcessIdFromGlobalPID extracts the process id from the global pid. + * Global pid is constructed by multiplying node id with GLOBAL_PID_NODE_ID_MULTIPLIER + * and adding process id. So global pid mod GLOBAL_PID_NODE_ID_MULTIPLIER gives us the + * process id. + */ +int +ExtractProcessIdFromGlobalPID(uint64 globalPID) +{ + return (int) (globalPID % GLOBAL_PID_NODE_ID_MULTIPLIER); +} + + /* * CurrentDistributedTransactionNumber returns the transaction number of the * current distributed transaction. The caller must make sure a distributed diff --git a/src/backend/distributed/transaction/citus_dist_stat_activity.c b/src/backend/distributed/transaction/citus_dist_stat_activity.c index 3fb15295f..ec4f6e8a3 100644 --- a/src/backend/distributed/transaction/citus_dist_stat_activity.c +++ b/src/backend/distributed/transaction/citus_dist_stat_activity.c @@ -157,10 +157,9 @@ FROM \ WHERE \ backend_type = 'client backend' \ AND \ - pg_stat_activity.query NOT ILIKE '%stat_activity%' \ + worker_query = False \ AND \ - pg_stat_activity.application_name NOT SIMILAR TO 'citus_internal gpid=\\d+'; \ -" + pg_stat_activity.query NOT ILIKE '%stat_activity%';" #define CITUS_WORKER_STAT_ACTIVITY_QUERY \ "\ @@ -195,7 +194,7 @@ FROM \ get_all_active_transactions() AS dist_txs(database_id, process_id, initiator_node_identifier, worker_query, transaction_number, transaction_stamp, global_id) \ ON pg_stat_activity.pid = dist_txs.process_id \ WHERE \ - pg_stat_activity.application_name SIMILAR TO 'citus_internal gpid=\\d+' \ + worker_query = True \ AND \ pg_stat_activity.query NOT ILIKE '%stat_activity%';" diff --git a/src/backend/distributed/transaction/distributed_deadlock_detection.c b/src/backend/distributed/transaction/distributed_deadlock_detection.c index f9e4adca0..82c274661 100644 --- a/src/backend/distributed/transaction/distributed_deadlock_detection.c +++ b/src/backend/distributed/transaction/distributed_deadlock_detection.c @@ -119,7 +119,9 @@ CheckForDistributedDeadlocks(void) return false; } - WaitGraph *waitGraph = BuildGlobalWaitGraph(); + /* distributed deadlock detection only considers distributed txs */ + bool onlyDistributedTx = true; + WaitGraph *waitGraph = BuildGlobalWaitGraph(onlyDistributedTx); HTAB *adjacencyLists = BuildAdjacencyListsForWaitGraph(waitGraph); int edgeCount = waitGraph->edgeCount; diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index aa37e4371..62b5e4e04 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -47,7 +47,10 @@ typedef struct PROCStack static void AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex); static void ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo); -static WaitGraph * BuildLocalWaitGraph(void); +static void AddWaitEdgeFromBlockedProcessResult(WaitGraph *waitGraph, PGresult *result, + int rowIndex); +static void ReturnBlockedProcessGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo); +static WaitGraph * BuildLocalWaitGraph(bool onlyDistributedTx); static bool IsProcessWaitingForSafeOperations(PGPROC *proc); static void LockLockData(void); static void UnlockLockData(void); @@ -62,10 +65,30 @@ static void AddProcToVisit(PROCStack *remaining, PGPROC *proc); static bool IsSameLockGroup(PGPROC *leftProc, PGPROC *rightProc); static bool IsConflictingLockMask(int holdMask, int conflictMask); - +/* + * We almost have 2 sets of identical functions. The first set (e.g., dump_wait_edges) + * functions are intended for distributed deadlock detection purposes. + * + * The second set of functions (e.g., citus_internal_local_blocked_processes) are + * intended for citus_lock_waits view. + * + * The main difference is that the former functions only show processes that are blocked + * inside a distributed transaction (e.g., see AssignDistributedTransactionId()). + * The latter functions return a superset, where any blocked process is returned. + * + * We kept two different set of functions for two purposes. First, the deadlock detection + * is a performance critical code-path happening very frequently and we don't add any + * performance overhead. Secondly, to be able to do rolling upgrades, we cannot change + * the API of dump_global_wait_edges/dump_local_wait_edges such that they take a boolean + * parameter. If we do that, until all nodes are upgraded, the deadlock detection would fail, + * which is not acceptable. + */ PG_FUNCTION_INFO_V1(dump_local_wait_edges); PG_FUNCTION_INFO_V1(dump_global_wait_edges); +PG_FUNCTION_INFO_V1(citus_internal_local_blocked_processes); +PG_FUNCTION_INFO_V1(citus_internal_global_blocked_processes); + /* * dump_global_wait_edges returns global wait edges for distributed transactions @@ -74,7 +97,9 @@ PG_FUNCTION_INFO_V1(dump_global_wait_edges); Datum dump_global_wait_edges(PG_FUNCTION_ARGS) { - WaitGraph *waitGraph = BuildGlobalWaitGraph(); + bool onlyDistributedTx = true; + + WaitGraph *waitGraph = BuildGlobalWaitGraph(onlyDistributedTx); ReturnWaitGraph(waitGraph, fcinfo); @@ -82,20 +107,44 @@ dump_global_wait_edges(PG_FUNCTION_ARGS) } +/* + * citus_internal_global_blocked_processes returns global wait edges + * including all processes running on the cluster. + */ +Datum +citus_internal_global_blocked_processes(PG_FUNCTION_ARGS) +{ + bool onlyDistributedTx = false; + + WaitGraph *waitGraph = BuildGlobalWaitGraph(onlyDistributedTx); + + ReturnBlockedProcessGraph(waitGraph, fcinfo); + + return (Datum) 0; +} + + /* * BuildGlobalWaitGraph builds a wait graph for distributed transactions * that originate from this node, including edges from all (other) worker * nodes. + * + * + * If onlyDistributedTx is true, we only return distributed transactions + * (e.g., AssignDistributedTransaction() or assign_distributed_transactions()) + * has been called for the process. Distributed deadlock detection only + * interested in these processes. */ WaitGraph * -BuildGlobalWaitGraph(void) +BuildGlobalWaitGraph(bool onlyDistributedTx) { List *workerNodeList = ActiveReadableNodeList(); char *nodeUser = CitusExtensionOwnerName(); List *connectionList = NIL; int32 localGroupId = GetLocalGroupId(); - WaitGraph *waitGraph = BuildLocalWaitGraph(); + /* deadlock detection is only interested in distributed transactions */ + WaitGraph *waitGraph = BuildLocalWaitGraph(onlyDistributedTx); /* open connections in parallel */ WorkerNode *workerNode = NULL; @@ -124,9 +173,28 @@ BuildGlobalWaitGraph(void) MultiConnection *connection = NULL; foreach_ptr(connection, connectionList) { - const char *command = "SELECT * FROM dump_local_wait_edges()"; + StringInfo queryString = makeStringInfo(); - int querySent = SendRemoteCommand(connection, command); + if (onlyDistributedTx) + { + appendStringInfo(queryString, + "SELECT waiting_pid, waiting_node_id, " + "waiting_transaction_num, waiting_transaction_stamp, " + "blocking_pid, blocking_node_id, blocking_transaction_num, " + "blocking_transaction_stamp, blocking_transaction_waiting " + "FROM dump_local_wait_edges()"); + } + else + { + appendStringInfo(queryString, + "SELECT waiting_global_pid, waiting_pid, " + "waiting_node_id, waiting_transaction_num, waiting_transaction_stamp, " + "blocking_global_pid,blocking_pid, blocking_node_id, " + "blocking_transaction_num, blocking_transaction_stamp, blocking_transaction_waiting " + "FROM citus_internal_local_blocked_processes()"); + } + + int querySent = SendRemoteCommand(connection, queryString->data); if (querySent == 0) { ReportConnectionError(connection, WARNING); @@ -148,16 +216,29 @@ BuildGlobalWaitGraph(void) int64 rowCount = PQntuples(result); int64 colCount = PQnfields(result); - if (colCount != 9) + if (onlyDistributedTx && colCount != 9) { ereport(WARNING, (errmsg("unexpected number of columns from " "dump_local_wait_edges"))); continue; } + else if (!onlyDistributedTx && colCount != 11) + { + ereport(WARNING, (errmsg("unexpected number of columns from " + "citus_internal_local_blocked_processes"))); + continue; + } for (int64 rowIndex = 0; rowIndex < rowCount; rowIndex++) { - AddWaitEdgeFromResult(waitGraph, result, rowIndex); + if (onlyDistributedTx) + { + AddWaitEdgeFromResult(waitGraph, result, rowIndex); + } + else + { + AddWaitEdgeFromBlockedProcessResult(waitGraph, result, rowIndex); + } } PQclear(result); @@ -177,10 +258,12 @@ AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex) { WaitEdge *waitEdge = AllocWaitEdge(waitGraph); + waitEdge->waitingGPid = 0; /* not requested for deadlock detection */ waitEdge->waitingPid = ParseIntField(result, rowIndex, 0); waitEdge->waitingNodeId = ParseIntField(result, rowIndex, 1); waitEdge->waitingTransactionNum = ParseIntField(result, rowIndex, 2); waitEdge->waitingTransactionStamp = ParseTimestampTzField(result, rowIndex, 3); + waitEdge->blockingGPid = 0; /* not requested for deadlock detection */ waitEdge->blockingPid = ParseIntField(result, rowIndex, 4); waitEdge->blockingNodeId = ParseIntField(result, rowIndex, 5); waitEdge->blockingTransactionNum = ParseIntField(result, rowIndex, 6); @@ -189,6 +272,29 @@ AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex) } +/* + * AddWaitEdgeFromBlockedProcessResult adds an edge to the wait graph that + * is read from a PGresult. + */ +static void +AddWaitEdgeFromBlockedProcessResult(WaitGraph *waitGraph, PGresult *result, int rowIndex) +{ + WaitEdge *waitEdge = AllocWaitEdge(waitGraph); + + waitEdge->waitingGPid = ParseIntField(result, rowIndex, 0); + waitEdge->waitingPid = ParseIntField(result, rowIndex, 1); + waitEdge->waitingNodeId = ParseIntField(result, rowIndex, 2); + waitEdge->waitingTransactionNum = ParseIntField(result, rowIndex, 3); + waitEdge->waitingTransactionStamp = ParseTimestampTzField(result, rowIndex, 4); + waitEdge->blockingGPid = ParseIntField(result, rowIndex, 5); + waitEdge->blockingPid = ParseIntField(result, rowIndex, 6); + waitEdge->blockingNodeId = ParseIntField(result, rowIndex, 7); + waitEdge->blockingTransactionNum = ParseIntField(result, rowIndex, 8); + waitEdge->blockingTransactionStamp = ParseTimestampTzField(result, rowIndex, 9); + waitEdge->isBlockingXactWaiting = ParseBoolField(result, rowIndex, 10); +} + + /* * ParseIntField parses a int64 from a remote result or returns 0 if the * result is NULL. @@ -256,13 +362,31 @@ ParseTimestampTzField(PGresult *result, int rowIndex, int colIndex) Datum dump_local_wait_edges(PG_FUNCTION_ARGS) { - WaitGraph *waitGraph = BuildLocalWaitGraph(); + bool onlyDistributedTx = true; + + WaitGraph *waitGraph = BuildLocalWaitGraph(onlyDistributedTx); ReturnWaitGraph(waitGraph, fcinfo); return (Datum) 0; } +/* + * citus_internal_local_blocked_processes returns global wait edges + * including all processes running on the node. + */ +Datum +citus_internal_local_blocked_processes(PG_FUNCTION_ARGS) +{ + bool onlyDistributedTx = false; + + WaitGraph *waitGraph = BuildLocalWaitGraph(onlyDistributedTx); + ReturnBlockedProcessGraph(waitGraph, fcinfo); + + return (Datum) 0; +} + + /* * ReturnWaitGraph returns a wait graph for a set returning function. */ @@ -325,12 +449,83 @@ ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo) } +/* + * ReturnBlockedProcessGraph returns a wait graph for a set returning function. + */ +static void +ReturnBlockedProcessGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo) +{ + TupleDesc tupleDesc; + Tuplestorestate *tupleStore = SetupTuplestore(fcinfo, &tupleDesc); + + /* + * Columns: + * 00: waiting_global_pid + * 01: waiting_pid + * 02: waiting_node_id + * 03: waiting_transaction_num + * 04: waiting_transaction_stamp + * 05: blocking_global_pid + * 06: blocking_pid + * 07: blocking__node_id + * 08: blocking_transaction_num + * 09: blocking_transaction_stamp + * 10: blocking_transaction_waiting + */ + for (size_t curEdgeNum = 0; curEdgeNum < waitGraph->edgeCount; curEdgeNum++) + { + Datum values[11]; + bool nulls[11]; + WaitEdge *curEdge = &waitGraph->edges[curEdgeNum]; + + memset(values, 0, sizeof(values)); + memset(nulls, 0, sizeof(nulls)); + + values[0] = UInt64GetDatum(curEdge->waitingGPid); + values[1] = Int32GetDatum(curEdge->waitingPid); + values[2] = Int32GetDatum(curEdge->waitingNodeId); + if (curEdge->waitingTransactionNum != 0) + { + values[3] = Int64GetDatum(curEdge->waitingTransactionNum); + values[4] = TimestampTzGetDatum(curEdge->waitingTransactionStamp); + } + else + { + nulls[3] = true; + nulls[4] = true; + } + + values[5] = UInt64GetDatum(curEdge->blockingGPid); + values[6] = Int32GetDatum(curEdge->blockingPid); + values[7] = Int32GetDatum(curEdge->blockingNodeId); + if (curEdge->blockingTransactionNum != 0) + { + values[8] = Int64GetDatum(curEdge->blockingTransactionNum); + values[9] = TimestampTzGetDatum(curEdge->blockingTransactionStamp); + } + else + { + nulls[8] = true; + nulls[9] = true; + } + values[10] = BoolGetDatum(curEdge->isBlockingXactWaiting); + + tuplestore_putvalues(tupleStore, tupleDesc, values, nulls); + } +} + + /* * BuildLocalWaitGraph builds a wait graph for distributed transactions * that originate from the local node. + * + * If onlyDistributedTx is true, we only return distributed transactions + * (e.g., AssignDistributedTransaction() or assign_distributed_transactions()) + * has been called for the process. Distributed deadlock detection only + * interested in these processes. */ static WaitGraph * -BuildLocalWaitGraph(void) +BuildLocalWaitGraph(bool onlyDistributedTx) { PROCStack remaining; int totalProcs = TotalProcCount(); @@ -379,7 +574,8 @@ BuildLocalWaitGraph(void) * care about distributed transactions for the purpose of distributed * deadlock detection. */ - if (!IsInDistributedTransaction(¤tBackendData)) + if (onlyDistributedTx && + !IsInDistributedTransaction(¤tBackendData)) { continue; } @@ -627,6 +823,7 @@ AddWaitEdge(WaitGraph *waitGraph, PGPROC *waitingProc, PGPROC *blockingProc, } curEdge->waitingPid = waitingProc->pid; + curEdge->waitingGPid = waitingBackendData.globalPID; if (IsInDistributedTransaction(&waitingBackendData)) { @@ -645,6 +842,7 @@ AddWaitEdge(WaitGraph *waitGraph, PGPROC *waitingProc, PGPROC *blockingProc, } curEdge->blockingPid = blockingProc->pid; + curEdge->blockingGPid = blockingBackendData.globalPID; if (IsInDistributedTransaction(&blockingBackendData)) { diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 4fe97e421..78e14367a 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -557,7 +557,8 @@ ResetGlobalVariables() MetadataSyncOnCommit = false; InTopLevelDelegatedFunctionCall = false; ResetWorkerErrorIndication(); - AllowedDistributionColumnValue.isActive = false; + memset(&AllowedDistributionColumnValue, 0, + sizeof(AllowedDistributionColumn)); } diff --git a/src/backend/distributed/utils/distribution_column.c b/src/backend/distributed/utils/distribution_column.c index a4dd647bd..5b59bd529 100644 --- a/src/backend/distributed/utils/distribution_column.c +++ b/src/backend/distributed/utils/distribution_column.c @@ -56,15 +56,12 @@ column_name_to_column(PG_FUNCTION_ARGS) text *columnText = PG_GETARG_TEXT_P(1); char *columnName = text_to_cstring(columnText); - Relation relation = relation_open(relationId, AccessShareLock); - - Var *column = BuildDistributionKeyFromColumnName(relation, columnName); + Var *column = BuildDistributionKeyFromColumnName(relationId, columnName, + AccessShareLock); Assert(column != NULL); char *columnNodeString = nodeToString(column); text *columnNodeText = cstring_to_text(columnNodeString); - relation_close(relation, AccessShareLock); - PG_RETURN_TEXT_P(columnNodeText); } @@ -81,13 +78,10 @@ column_name_to_column_id(PG_FUNCTION_ARGS) Oid distributedTableId = PG_GETARG_OID(0); char *columnName = PG_GETARG_CSTRING(1); - Relation relation = relation_open(distributedTableId, AccessExclusiveLock); - - Var *column = BuildDistributionKeyFromColumnName(relation, columnName); + Var *column = BuildDistributionKeyFromColumnName(distributedTableId, columnName, + AccessExclusiveLock); Assert(column != NULL); - relation_close(relation, NoLock); - PG_RETURN_INT16((int16) column->varattno); } @@ -107,8 +101,9 @@ column_to_column_name(PG_FUNCTION_ARGS) text *columnNodeText = PG_GETARG_TEXT_P(1); char *columnNodeString = text_to_cstring(columnNodeText); + Node *columnNode = stringToNode(columnNodeString); - char *columnName = ColumnToColumnName(relationId, columnNodeString); + char *columnName = ColumnToColumnName(relationId, columnNode); text *columnText = cstring_to_text(columnName); @@ -116,53 +111,6 @@ column_to_column_name(PG_FUNCTION_ARGS) } -/* - * FindColumnWithNameOnTargetRelation gets a source table and - * column name. The function returns the the column with the - * same name on the target table. - * - * Note that due to dropping columns, the parent's distribution key may not - * match the partition's distribution key. See issue #5123. - * - * The function throws error if the input or output is not valid or does - * not exist. - */ -Var * -FindColumnWithNameOnTargetRelation(Oid sourceRelationId, char *sourceColumnName, - Oid targetRelationId) -{ - if (sourceColumnName == NULL || sourceColumnName[0] == '\0') - { - ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("cannot find the given column on table \"%s\"", - generate_qualified_relation_name(sourceRelationId)))); - } - - AttrNumber attributeNumberOnTarget = get_attnum(targetRelationId, sourceColumnName); - if (attributeNumberOnTarget == InvalidAttrNumber) - { - ereport(ERROR, (errmsg("Column \"%s\" does not exist on " - "relation \"%s\"", sourceColumnName, - get_rel_name(targetRelationId)))); - } - - Index varNo = 1; - Oid targetTypeId = InvalidOid; - int32 targetTypMod = 0; - Oid targetCollation = InvalidOid; - Index varlevelsup = 0; - - /* this function throws error in case anything goes wrong */ - get_atttypetypmodcoll(targetRelationId, attributeNumberOnTarget, - &targetTypeId, &targetTypMod, &targetCollation); - Var *targetColumn = - makeVar(varNo, attributeNumberOnTarget, targetTypeId, targetTypMod, - targetCollation, varlevelsup); - - return targetColumn; -} - - /* * BuildDistributionKeyFromColumnName builds a simple distribution key consisting * only out of a reference to the column of name columnName. Errors out if the @@ -173,9 +121,18 @@ FindColumnWithNameOnTargetRelation(Oid sourceRelationId, char *sourceColumnName, * corresponds to reference tables. */ Var * -BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnName) +BuildDistributionKeyFromColumnName(Oid relationId, char *columnName, LOCKMODE lockMode) { - char *tableName = RelationGetRelationName(distributedRelation); + Relation relation = try_relation_open(relationId, ExclusiveLock); + + if (relation == NULL) + { + ereport(ERROR, (errmsg("relation does not exist"))); + } + + relation_close(relation, NoLock); + + char *tableName = get_rel_name(relationId); /* short circuit for reference tables */ if (columnName == NULL) @@ -187,8 +144,7 @@ BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnNam truncate_identifier(columnName, strlen(columnName), true); /* lookup column definition */ - HeapTuple columnTuple = SearchSysCacheAttName(RelationGetRelid(distributedRelation), - columnName); + HeapTuple columnTuple = SearchSysCacheAttName(relationId, columnName); if (!HeapTupleIsValid(columnTuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), @@ -218,15 +174,13 @@ BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnNam /* * ColumnToColumnName returns the human-readable name of a column given a - * relation identifier and the column's internal textual (Var) representation. + * relation identifier and the column's internal (Var) representation. * This function will raise an ERROR if no such column can be found or if the * provided Var refers to a system column. */ char * -ColumnToColumnName(Oid relationId, char *columnNodeString) +ColumnToColumnName(Oid relationId, Node *columnNode) { - Node *columnNode = stringToNode(columnNodeString); - if (columnNode == NULL || !IsA(columnNode, Var)) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index 1c005e582..6c78a9389 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -1012,8 +1012,8 @@ CitusRangeVarCallbackForLockTable(const RangeVar *rangeVar, Oid relationId, return; } - /* we only allow tables and views to be locked */ - if (!RegularTable(relationId)) + /* we only allow tables, views and foreign tables to be locked */ + if (!RegularTable(relationId) && !IsForeignTable(relationId)) { ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not a table", rangeVar->relname))); diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index 942cabba5..6ce96bd9f 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -13,8 +13,10 @@ #include "catalog/dependency.h" #include "catalog/pg_collation.h" #include "catalog/pg_proc.h" +#include "catalog/pg_ts_config.h" #include "catalog/pg_type.h" #include "fmgr.h" +#include "funcapi.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" #include "parser/parse_type.h" @@ -28,13 +30,17 @@ #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/deparser.h" +#include "distributed/listutils.h" #include "distributed/metadata/distobject.h" #include "distributed/worker_create_or_replace.h" #include "distributed/worker_protocol.h" -static const char * CreateStmtByObjectAddress(const ObjectAddress *address); +static List * CreateStmtListByObjectAddress(const ObjectAddress *address); +static bool CompareStringList(List *list1, List *list2); PG_FUNCTION_INFO_V1(worker_create_or_replace_object); +PG_FUNCTION_INFO_V1(worker_create_or_replace_object_array); +static bool WorkerCreateOrReplaceObject(List *sqlStatements); /* @@ -51,6 +57,37 @@ WrapCreateOrReplace(const char *sql) } +/* + * WrapCreateOrReplaceList takes a list of sql commands and wraps it in a call to citus' + * udf to create or replace the existing object based on its create commands. + */ +char * +WrapCreateOrReplaceList(List *sqls) +{ + StringInfoData textArrayLitteral = { 0 }; + initStringInfo(&textArrayLitteral); + + appendStringInfoString(&textArrayLitteral, "ARRAY["); + const char *sql = NULL; + bool first = true; + foreach_ptr(sql, sqls) + { + if (!first) + { + appendStringInfoString(&textArrayLitteral, ", "); + } + appendStringInfoString(&textArrayLitteral, quote_literal_cstr(sql)); + first = false; + } + appendStringInfoString(&textArrayLitteral, "]::text[]"); + + StringInfoData buf = { 0 }; + initStringInfo(&buf); + appendStringInfo(&buf, CREATE_OR_REPLACE_COMMAND, textArrayLitteral.data); + return buf.data; +} + + /* * worker_create_or_replace_object(statement text) * @@ -73,35 +110,102 @@ Datum worker_create_or_replace_object(PG_FUNCTION_ARGS) { text *sqlStatementText = PG_GETARG_TEXT_P(0); - const char *sqlStatement = text_to_cstring(sqlStatementText); - Node *parseTree = ParseTreeNode(sqlStatement); + char *sqlStatement = text_to_cstring(sqlStatementText); + List *sqlStatements = list_make1(sqlStatement); + PG_RETURN_BOOL(WorkerCreateOrReplaceObject(sqlStatements)); +} + + +/* + * worker_create_or_replace_object(statements text[]) + * + * function is called, by the coordinator, with a CREATE statement for an object. This + * function implements the CREATE ... IF NOT EXISTS functionality for objects that do not + * have this functionality or where their implementation is not sufficient. + * + * Besides checking if an object of said name exists it tries to compare the object to be + * created with the one in the local catalog. If there is a difference the one in the local + * catalog will be renamed after which the statement can be executed on this worker to + * create the object. If more statements are provided, all are compared in order with the + * statements generated on the worker. This works assuming a) both citus versions are the + * same, b) the objects are exactly the same. + * + * Renaming has two purposes + * - free the identifier for creation + * - non destructive if there is data store that would be destroyed if the object was + * used in a table on this node, eg. types. If the type would be dropped with a cascade + * it would drop any column holding user data for this type. + */ +Datum +worker_create_or_replace_object_array(PG_FUNCTION_ARGS) +{ + List *sqlStatements = NIL; + Datum *textArray = NULL; + int length = 0; + deconstruct_array(PG_GETARG_ARRAYTYPE_P(0), TEXTOID, -1, false, 'i', &textArray, + NULL, &length); + + for (int i = 0; i < length; i++) + { + sqlStatements = lappend(sqlStatements, TextDatumGetCString(textArray[i])); + } + + if (list_length(sqlStatements) < 1) + { + ereport(ERROR, (errmsg("expected atleast 1 statement to be provided"))); + } + + PG_RETURN_BOOL(WorkerCreateOrReplaceObject(sqlStatements)); +} + + +/* + * WorkerCreateOrReplaceObject implements the logic used by both variants of + * worker_create_or_replace_object to either create the object or coming to the conclusion + * the object already exists in the correct state. + * + * Returns true if the object has been created, false if it was already in the exact state + * it was asked for. + */ +static bool +WorkerCreateOrReplaceObject(List *sqlStatements) +{ /* - * since going to the drop statement might require some resolving we will do a check - * if the type actually exists instead of adding the IF EXISTS keyword to the - * statement. + * To check which object we are changing we find the object address from the first + * statement passed into the UDF. Later we will check if all object addresses are the + * same. + * + * Although many of the objects will only have one statement in this call, more + * complex objects might come with a list of statements. We assume they all are on the + * same subject. */ + Node *parseTree = ParseTreeNode(linitial(sqlStatements)); ObjectAddress address = GetObjectAddressFromParseTree(parseTree, true); if (ObjectExists(&address)) { - const char *localSqlStatement = CreateStmtByObjectAddress(&address); + /* + * Object with name from statement is already found locally, check if states are + * identical. If objects differ we will rename the old object (non- destructively) + * as to make room to create the new object according to the spec sent. + */ - if (strcmp(sqlStatement, localSqlStatement) == 0) + /* + * Based on the local catalog we generate the list of commands we would send to + * recreate our version of the object. This we can compare to what the coordinator + * sent us. If they match we don't do anything. + */ + List *localSqlStatements = CreateStmtListByObjectAddress(&address); + if (CompareStringList(sqlStatements, localSqlStatements)) { /* - * TODO string compare is a poor man's comparison, but calling equal on the - * parsetree's returns false because there is extra information list character - * position of some sort - */ - - /* - * parseTree sent by the coordinator is the same as we would create for our - * object, therefore we can omit the create statement locally and not create - * the object as it already exists. + * statements sent by the coordinator are the same as we would create for our + * object, therefore we can omit the statements locally and not create the + * object as it already exists in the correct shape. * * We let the coordinator know we didn't create the object. */ - PG_RETURN_BOOL(false); + return false; } char *newName = GenerateBackupNameForCollision(&address); @@ -113,12 +217,47 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS) NULL, None_Receiver, NULL); } - /* apply create statement locally */ - ProcessUtilityParseTree(parseTree, sqlStatement, PROCESS_UTILITY_QUERY, NULL, - None_Receiver, NULL); + /* apply all statement locally */ + char *sqlStatement = NULL; + foreach_ptr(sqlStatement, sqlStatements) + { + parseTree = ParseTreeNode(sqlStatement); + ProcessUtilityParseTree(parseTree, sqlStatement, PROCESS_UTILITY_QUERY, NULL, + None_Receiver, NULL); + + /* TODO verify all statements are about exactly 1 subject, mostly a sanity check + * to prevent unintentional use of this UDF, needs to come after the local + * execution to be able to actually resolve the ObjectAddress of the newly created + * object */ + } /* type has been created */ - PG_RETURN_BOOL(true); + return true; +} + + +static bool +CompareStringList(List *list1, List *list2) +{ + if (list_length(list1) != list_length(list2)) + { + return false; + } + + ListCell *cell1 = NULL; + ListCell *cell2 = NULL; + forboth(cell1, list1, cell2, list2) + { + const char *str1 = lfirst(cell1); + const char *str2 = lfirst(cell2); + + if (strcmp(str1, str2) != 0) + { + return false; + } + } + + return true; } @@ -130,24 +269,38 @@ worker_create_or_replace_object(PG_FUNCTION_ARGS) * therefore you cannot equal this tree against parsed statement. Instead it can be * deparsed to do a string comparison. */ -static const char * -CreateStmtByObjectAddress(const ObjectAddress *address) +static List * +CreateStmtListByObjectAddress(const ObjectAddress *address) { switch (getObjectClass(address)) { case OCLASS_COLLATION: { - return CreateCollationDDL(address->objectId); + return list_make1(CreateCollationDDL(address->objectId)); } case OCLASS_PROC: { - return GetFunctionDDLCommand(address->objectId, false); + return list_make1(GetFunctionDDLCommand(address->objectId, false)); + } + + case OCLASS_TSCONFIG: + { + /* + * We do support TEXT SEARCH CONFIGURATION, however, we can't recreate the + * object in 1 command. Since the returned text is compared to the create + * statement sql we always want the sql to be different compared to the + * canonical creation sql we return here, hence we return an empty string, as + * that should never match the sql we have passed in for the creation. + */ + + List *stmts = GetCreateTextSearchConfigStatements(address); + return DeparseTreeNodes(stmts); } case OCLASS_TYPE: { - return DeparseTreeNode(CreateTypeStmtByObjectAddress(address)); + return list_make1(DeparseTreeNode(CreateTypeStmtByObjectAddress(address))); } default: @@ -179,6 +332,11 @@ GenerateBackupNameForCollision(const ObjectAddress *address) return GenerateBackupNameForProcCollision(address); } + case OCLASS_TSCONFIG: + { + return GenerateBackupNameForTextSearchConfiguration(address); + } + case OCLASS_TYPE: { return GenerateBackupNameForTypeCollision(address); @@ -256,6 +414,25 @@ CreateRenameTypeStmt(const ObjectAddress *address, char *newName) } +/* + * CreateRenameTextSearchStmt creates a rename statement for a text search configuration + * based on its ObjectAddress. The rename statement will rename the existing object on its + * address to the value provided in newName. + */ +static RenameStmt * +CreateRenameTextSearchStmt(const ObjectAddress *address, char *newName) +{ + Assert(address->classId == TSConfigRelationId); + RenameStmt *stmt = makeNode(RenameStmt); + + stmt->renameType = OBJECT_TSCONFIGURATION; + stmt->object = (Node *) get_ts_config_namelist(address->objectId); + stmt->newname = newName; + + return stmt; +} + + /* * CreateRenameTypeStmt creates a rename statement for a type based on its ObjectAddress. * The rename statement will rename the existing object on its address to the value @@ -325,6 +502,11 @@ CreateRenameStatement(const ObjectAddress *address, char *newName) return CreateRenameProcStmt(address, newName); } + case OCLASS_TSCONFIG: + { + return CreateRenameTextSearchStmt(address, newName); + } + case OCLASS_TYPE: { return CreateRenameTypeStmt(address, newName); diff --git a/src/include/columnar/columnar_tableam.h b/src/include/columnar/columnar_tableam.h index 784cf0341..2f06e0972 100644 --- a/src/include/columnar/columnar_tableam.h +++ b/src/include/columnar/columnar_tableam.h @@ -50,7 +50,6 @@ typedef struct ColumnarScanDescData *ColumnarScanDesc; const TableAmRoutine * GetColumnarTableAmRoutine(void); extern void columnar_tableam_init(void); -extern bool CheckCitusVersion(int elevel); extern TableScanDesc columnar_beginscan_extended(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, diff --git a/src/include/distributed/backend_data.h b/src/include/distributed/backend_data.h index bf9a57fd1..b463b89f5 100644 --- a/src/include/distributed/backend_data.h +++ b/src/include/distributed/backend_data.h @@ -29,7 +29,6 @@ typedef struct CitusInitiatedBackend { int initiatorNodeIdentifier; - bool transactionOriginator; } CitusInitiatedBackend; @@ -51,6 +50,7 @@ typedef struct BackendData slock_t mutex; bool cancelledDueToDeadlock; uint64 globalPID; + bool distributedCommandOriginator; CitusInitiatedBackend citusBackend; DistributedTransactionId transactionId; } BackendData; @@ -62,11 +62,16 @@ extern void InitializeBackendData(void); extern void LockBackendSharedMemory(LWLockMode lockMode); extern void UnlockBackendSharedMemory(void); extern void UnSetDistributedTransactionId(void); +extern void UnSetGlobalPID(void); extern void AssignDistributedTransactionId(void); extern void MarkCitusInitiatedCoordinatorBackend(void); extern void AssignGlobalPID(void); extern uint64 GetGlobalPID(void); +extern void OverrideBackendDataDistributedCommandOriginator(bool + distributedCommandOriginator); extern uint64 ExtractGlobalPID(char *applicationName); +extern int ExtractNodeIdFromGlobalPID(uint64 globalPID); +extern int ExtractProcessIdFromGlobalPID(uint64 globalPID); extern void GetBackendDataForProc(PGPROC *proc, BackendData *result); extern void CancelTransactionDueToDeadlock(PGPROC *proc); extern bool MyBackendGotCancelledDueToDeadlock(bool clearState); @@ -77,6 +82,11 @@ extern int GetAllActiveClientBackendCount(void); extern void IncrementClientBackendCounter(void); extern void DecrementClientBackendCounter(void); +extern bool ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, + char *queryString, StringInfo queryResultString, + bool reportResultError); + #define INVALID_CITUS_INTERNAL_BACKEND_GPID 0 +#define GLOBAL_PID_NODE_ID_FOR_NODES_NOT_IN_METADATA 99999999 #endif /* BACKEND_DATA_H */ diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 31601dc2a..1b2b92590 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -151,6 +151,8 @@ extern ObjectAddress AlterCollationSchemaStmtObjectAddress(Node *stmt, extern List * PostprocessAlterCollationSchemaStmt(Node *stmt, const char *queryString); extern char * GenerateBackupNameForCollationCollision(const ObjectAddress *address); extern ObjectAddress DefineCollationStmtObjectAddress(Node *stmt, bool missing_ok); +extern List * PreprocessDefineCollationStmt(Node *stmt, const char *queryString, + ProcessUtilityContext processUtilityContext); extern List * PostprocessDefineCollationStmt(Node *stmt, const char *queryString); /* database.c - forward declarations */ @@ -465,6 +467,54 @@ extern Oid GetSequenceOid(Oid relationId, AttrNumber attnum); extern bool ConstrTypeUsesIndex(ConstrType constrType); +/* text_search.c - forward declarations */ +extern List * PostprocessCreateTextSearchConfigurationStmt(Node *node, + const char *queryString); +extern List * GetCreateTextSearchConfigStatements(const ObjectAddress *address); +extern List * CreateTextSearchConfigDDLCommandsIdempotent(const ObjectAddress *address); +extern List * PreprocessDropTextSearchConfigurationStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessAlterTextSearchConfigurationStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessRenameTextSearchConfigurationStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessAlterTextSearchConfigurationSchemaStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PostprocessAlterTextSearchConfigurationSchemaStmt(Node *node, + const char *queryString); +extern List * PreprocessTextSearchConfigurationCommentStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PreprocessAlterTextSearchConfigurationOwnerStmt(Node *node, + const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PostprocessAlterTextSearchConfigurationOwnerStmt(Node *node, + const char *queryString); +extern ObjectAddress CreateTextSearchConfigurationObjectAddress(Node *node, + bool missing_ok); +extern ObjectAddress RenameTextSearchConfigurationStmtObjectAddress(Node *node, + bool missing_ok); +extern ObjectAddress AlterTextSearchConfigurationStmtObjectAddress(Node *node, + bool missing_ok); +extern ObjectAddress AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, + bool missing_ok); +extern ObjectAddress TextSearchConfigurationCommentObjectAddress(Node *node, + bool missing_ok); +extern ObjectAddress AlterTextSearchConfigurationOwnerObjectAddress(Node *node, + bool missing_ok); +extern char * GenerateBackupNameForTextSearchConfiguration(const ObjectAddress *address); +extern List * get_ts_config_namelist(Oid tsconfigOid); + /* truncate.c - forward declarations */ extern void PreprocessTruncateStatement(TruncateStmt *truncateStatement); diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index b91fba87e..ebf4a6147 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -31,6 +31,7 @@ extern void AssertObjectTypeIsFunctional(ObjectType type); extern void QualifyTreeNode(Node *stmt); extern char * DeparseTreeNode(Node *stmt); +extern List * DeparseTreeNodes(List *stmts); /* forward declarations for deparse_attribute_stmts.c */ extern char * DeparseRenameAttributeStmt(Node *); @@ -59,6 +60,15 @@ extern char * DeparseAlterTableStmt(Node *node); extern void QualifyAlterTableSchemaStmt(Node *stmt); +/* foward declarations fro deparse_text_search.c */ +extern char * DeparseCreateTextSearchStmt(Node *node); +extern char * DeparseDropTextSearchConfigurationStmt(Node *node); +extern char * DeparseRenameTextSearchConfigurationStmt(Node *node); +extern char * DeparseAlterTextSearchConfigurationStmt(Node *node); +extern char * DeparseAlterTextSearchConfigurationSchemaStmt(Node *node); +extern char * DeparseTextSearchConfigurationCommentStmt(Node *node); +extern char * DeparseAlterTextSearchConfigurationOwnerStmt(Node *node); + /* forward declarations for deparse_schema_stmts.c */ extern char * DeparseCreateSchemaStmt(Node *node); extern char * DeparseDropSchemaStmt(Node *node); @@ -140,6 +150,14 @@ extern char * DeparseAlterExtensionStmt(Node *stmt); /* forward declarations for deparse_database_stmts.c */ extern char * DeparseAlterDatabaseOwnerStmt(Node *node); +/* forward declatations for depatse_text_search_stmts.c */ +extern void QualifyDropTextSearchConfigurationStmt(Node *node); +extern void QualifyAlterTextSearchConfigurationStmt(Node *node); +extern void QualifyRenameTextSearchConfigurationStmt(Node *node); +extern void QualifyAlterTextSearchConfigurationSchemaStmt(Node *node); +extern void QualifyTextSearchConfigurationCommentStmt(Node *node); +extern void QualifyAlterTextSearchConfigurationOwnerStmt(Node *node); + /* forward declarations for deparse_sequence_stmts.c */ extern char * DeparseDropSequenceStmt(Node *node); extern char * DeparseRenameSequenceStmt(Node *node); diff --git a/src/include/distributed/distribution_column.h b/src/include/distributed/distribution_column.h index 0bad0fe52..ced1be9a7 100644 --- a/src/include/distributed/distribution_column.h +++ b/src/include/distributed/distribution_column.h @@ -19,11 +19,9 @@ /* Remaining metadata utility functions */ -extern Var * FindColumnWithNameOnTargetRelation(Oid sourceRelationId, - char *sourceColumnName, - Oid targetRelationId); -extern Var * BuildDistributionKeyFromColumnName(Relation distributedRelation, - char *columnName); -extern char * ColumnToColumnName(Oid relationId, char *columnNodeString); +extern Var * BuildDistributionKeyFromColumnName(Oid relationId, + char *columnName, + LOCKMODE lockMode); +extern char * ColumnToColumnName(Oid relationId, Node *columnNode); #endif /* DISTRIBUTION_COLUMN_H */ diff --git a/src/include/distributed/function_call_delegation.h b/src/include/distributed/function_call_delegation.h index 5471a3f1d..bf3dbe170 100644 --- a/src/include/distributed/function_call_delegation.h +++ b/src/include/distributed/function_call_delegation.h @@ -23,7 +23,7 @@ extern bool InTopLevelDelegatedFunctionCall; extern bool InDelegatedProcedureCall; PlannedStmt * TryToDelegateFunctionCall(DistributedPlanningContext *planContext); -extern void ResetAllowedShardKeyValue(void); +extern void CheckAndResetAllowedShardKeyValueIfNeeded(void); extern bool IsShardKeyValueAllowed(Const *shardKey, uint32 colocationId); #endif /* FUNCTION_CALL_DELEGATION_H */ diff --git a/src/include/distributed/lock_graph.h b/src/include/distributed/lock_graph.h index 46fec1dee..f204ebb03 100644 --- a/src/include/distributed/lock_graph.h +++ b/src/include/distributed/lock_graph.h @@ -31,11 +31,13 @@ */ typedef struct WaitEdge { + uint64 waitingGPid; int waitingPid; int waitingNodeId; int64 waitingTransactionNum; TimestampTz waitingTransactionStamp; + uint64 blockingGPid; int blockingPid; int blockingNodeId; int64 blockingTransactionNum; @@ -58,7 +60,7 @@ typedef struct WaitGraph } WaitGraph; -extern WaitGraph * BuildGlobalWaitGraph(void); +extern WaitGraph * BuildGlobalWaitGraph(bool onlyDistributedTx); extern bool IsProcessWaitingForLock(PGPROC *proc); extern bool IsInDistributedTransaction(BackendData *backendData); extern TimestampTz ParseTimestampTzField(PGresult *result, int rowIndex, int colIndex); diff --git a/src/include/distributed/metadata/dependency.h b/src/include/distributed/metadata/dependency.h index d20103011..141b2a628 100644 --- a/src/include/distributed/metadata/dependency.h +++ b/src/include/distributed/metadata/dependency.h @@ -19,6 +19,7 @@ extern List * GetUniqueDependenciesList(List *objectAddressesList); extern List * GetDependenciesForObject(const ObjectAddress *target); +extern List * GetAllSupportedDependenciesForObject(const ObjectAddress *target); extern List * GetAllDependenciesForObject(const ObjectAddress *target); extern List * OrderObjectAddressListInDependencyOrder(List *objectAddressList); extern bool SupportedDependencyByCitus(const ObjectAddress *address); diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index 472cd83e2..5ea04ec73 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -30,8 +30,8 @@ extern bool IsObjectAddressOwnedByExtension(const ObjectAddress *target, ObjectAddress *extensionAddress); extern ObjectAddress PgGetObjectAddress(char *ttype, ArrayType *namearr, ArrayType *argsarr); - extern List * GetDistributedObjectAddressList(void); +extern RoleSpec * GetRoleSpecObjectForUser(Oid roleOid); extern void UpdateDistributedObjectColocationId(uint32 oldColocationId, uint32 newColocationId); #endif /* CITUS_METADATA_DISTOBJECT_H */ diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index d1db0f2fe..3e7a3b6f3 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -238,7 +238,7 @@ extern void DeleteShardRow(uint64 shardId); extern void UpdateShardPlacementState(uint64 placementId, char shardState); extern void UpdatePlacementGroupId(uint64 placementId, int groupId); extern void DeleteShardPlacementRow(uint64 placementId); -extern void CreateDistributedTable(Oid relationId, Var *distributionColumn, +extern void CreateDistributedTable(Oid relationId, char *distributionColumnName, char distributionMethod, int shardCount, bool shardCountIsStrict, char *colocateWithTableName, bool viaDeprecatedAPI); diff --git a/src/include/distributed/transaction_management.h b/src/include/distributed/transaction_management.h index 48bce27a5..5ff34e21a 100644 --- a/src/include/distributed/transaction_management.h +++ b/src/include/distributed/transaction_management.h @@ -70,6 +70,9 @@ typedef struct AllowedDistributionColumn Const *distributionColumnValue; uint32 colocationId; bool isActive; + + /* In nested executor, track the level at which value is set */ + int executorLevel; } AllowedDistributionColumn; /* diff --git a/src/include/distributed/worker_create_or_replace.h b/src/include/distributed/worker_create_or_replace.h index 60323d172..148cee138 100644 --- a/src/include/distributed/worker_create_or_replace.h +++ b/src/include/distributed/worker_create_or_replace.h @@ -19,6 +19,7 @@ #define CREATE_OR_REPLACE_COMMAND "SELECT worker_create_or_replace_object(%s);" extern char * WrapCreateOrReplace(const char *sql); +extern char * WrapCreateOrReplaceList(List *sqls); extern char * GenerateBackupNameForCollision(const ObjectAddress *address); extern RenameStmt * CreateRenameStatement(const ObjectAddress *address, char *newName); diff --git a/src/include/distributed/worker_manager.h b/src/include/distributed/worker_manager.h index 0a6b637b3..db8adaedb 100644 --- a/src/include/distributed/worker_manager.h +++ b/src/include/distributed/worker_manager.h @@ -86,6 +86,7 @@ extern List * ActiveReadableNodeList(void); extern WorkerNode * FindWorkerNode(const char *nodeName, int32 nodePort); extern WorkerNode * FindWorkerNodeOrError(const char *nodeName, int32 nodePort); extern WorkerNode * FindWorkerNodeAnyCluster(const char *nodeName, int32 nodePort); +extern WorkerNode * FindNodeWithNodeId(int nodeId); extern List * ReadDistNode(bool includeNodesFromOtherClusters); extern void EnsureCoordinator(void); extern void InsertCoordinatorIfClusterEmpty(void); diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index 25f4388e1..916d62afa 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -256,3 +256,7 @@ s/CREATE TABLESPACE test_tablespace LOCATION.*/CREATE TABLESPACE test_tablespace s/(.*absolute correlation \()([0,1]\.[0-9]+)(\) of var attribute [0-9]+ is smaller than.*)/\1X\.YZ\3/g s/NOTICE: issuing WITH placement_data\(shardid, shardstate, shardlength, groupid, placementid\) AS \(VALUES \([0-9]+, [0-9]+, [0-9]+, [0-9]+, [0-9]+\)\)/NOTICE: issuing WITH placement_data\(shardid, shardstate, shardlength, groupid, placementid\) AS \(VALUES \(xxxxxx, xxxxxx, xxxxxx, xxxxxx, xxxxxx\)\)/g + +# global_pid when pg_cancel_backend is sent to workers +s/pg_cancel_backend\('[0-9]+'::bigint\)/pg_cancel_backend('xxxxx'::bigint)/g +s/issuing SELECT pg_cancel_backend\([0-9]+::integer\)/issuing SELECT pg_cancel_backend(xxxxx::integer)/g diff --git a/src/test/regress/expected/aggregate_support.out b/src/test/regress/expected/aggregate_support.out index 89ce53c70..bf35d7f4b 100644 --- a/src/test/regress/expected/aggregate_support.out +++ b/src/test/regress/expected/aggregate_support.out @@ -875,6 +875,7 @@ BEGIN RETURN $1 * $1; END; $function$; +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION square_func(int) RETURNS int LANGUAGE plpgsql @@ -883,6 +884,7 @@ BEGIN RETURN $1 * $1; END; $function$; +RESET citus.enable_metadata_sync; SELECT const_function(1), string_agg(a::character, ',') FROM t1; NOTICE: stable_fn called CONTEXT: PL/pgSQL function const_function(integer) line XX at RAISE diff --git a/src/test/regress/expected/citus_local_tables_mx.out b/src/test/regress/expected/citus_local_tables_mx.out index 8ea410829..0052c63af 100644 --- a/src/test/regress/expected/citus_local_tables_mx.out +++ b/src/test/regress/expected/citus_local_tables_mx.out @@ -724,6 +724,58 @@ $$); (localhost,57638,t,0) (2 rows) +-- verify that partitioned citus local tables with dropped columns can be distributed. issue: #5577 +CREATE TABLE parent_dropped_col(a int, eventtime date) PARTITION BY RANGE ( eventtime); +SELECT citus_add_local_table_to_metadata('parent_dropped_col'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +ALTER TABLE parent_dropped_col DROP column a; +CREATE TABLE parent_dropped_col_1 PARTITION OF parent_dropped_col for VALUES FROM ('2000-01-01') TO ('2001-01-01'); +SELECT create_distributed_table('parent_dropped_col', 'eventtime'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- another example to test +CREATE TABLE parent_dropped_col_2( + col_to_drop_0 text, + col_to_drop_1 text, + col_to_drop_2 date, + col_to_drop_3 inet, + col_to_drop_4 date, + measureid integer, + eventdatetime date, + measure_data jsonb, + PRIMARY KEY (measureid, eventdatetime, measure_data)) + PARTITION BY RANGE(eventdatetime); +select citus_add_local_table_to_metadata('parent_dropped_col_2'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +ALTER TABLE parent_dropped_col_2 DROP COLUMN col_to_drop_1; +CREATE TABLE parent_dropped_col_2_2000 PARTITION OF parent_dropped_col_2 FOR VALUES FROM ('2000-01-01') TO ('2001-01-01'); +SELECT create_distributed_table('parent_dropped_col_2', 'measureid'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- verify that the partitioned tables are distributed with the correct distribution column +SELECT logicalrelid, partmethod, partkey FROM pg_dist_partition + WHERE logicalrelid IN ('parent_dropped_col'::regclass, 'parent_dropped_col_2'::regclass) + ORDER BY logicalrelid; + logicalrelid | partmethod | partkey +--------------------------------------------------------------------- + parent_dropped_col | h | {VAR :varno 1 :varattno 1 :vartype 1082 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} + parent_dropped_col_2 | h | {VAR :varno 1 :varattno 5 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 5 :location -1} +(2 rows) + -- cleanup at exit set client_min_messages to error; DROP SCHEMA citus_local_tables_mx CASCADE; diff --git a/src/test/regress/expected/citus_local_tables_queries_mx.out b/src/test/regress/expected/citus_local_tables_queries_mx.out index 684644476..52b73e7df 100644 --- a/src/test/regress/expected/citus_local_tables_queries_mx.out +++ b/src/test/regress/expected/citus_local_tables_queries_mx.out @@ -58,6 +58,7 @@ CREATE TABLE postgres_local_table(a int, b int); -- We shouldn't use LIMIT in INSERT SELECT queries to make the test faster as -- LIMIT would force planner to wrap SELECT query in an intermediate result and -- this might reduce the coverage of the test cases. +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$ BEGIN SET client_min_messages to ERROR; @@ -74,6 +75,7 @@ CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$ RESET client_min_messages; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; --------------------------------------------------------------------- ---- SELECT ---- --------------------------------------------------------------------- diff --git a/src/test/regress/expected/coordinator_evaluation.out b/src/test/regress/expected/coordinator_evaluation.out index bd309bdb2..f675c316f 100644 --- a/src/test/regress/expected/coordinator_evaluation.out +++ b/src/test/regress/expected/coordinator_evaluation.out @@ -523,6 +523,7 @@ BEGIN RETURN trunc(random() * (end_int-start_int) + start_int); END; $$ LANGUAGE 'plpgsql' STRICT; +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE coordinator_evaluation.test_procedure(int) LANGUAGE plpgsql AS $procedure$ @@ -532,6 +533,7 @@ BEGIN PERFORM DISTINCT value FROM coordinator_evaluation_table_2 WHERE key = filterKey; END; $procedure$; +RESET citus.enable_metadata_sync; -- we couldn't find a meaningful query to write for this -- however this query fails before https://github.com/citusdata/citus/pull/3454 SET client_min_messages TO DEBUG2; @@ -564,6 +566,7 @@ BEGIN INSERT INTO coordinator_evaluation_table_2 VALUES (filterKey, filterKey); END; $procedure$; +DEBUG: switching to sequential query execution mode RESET citus.log_remote_commands ; RESET client_min_messages; -- these calls would INSERT key = 101, so test if insert succeeded diff --git a/src/test/regress/expected/coordinator_evaluation_modify.out b/src/test/regress/expected/coordinator_evaluation_modify.out index 906dbffef..e75ac60b1 100644 --- a/src/test/regress/expected/coordinator_evaluation_modify.out +++ b/src/test/regress/expected/coordinator_evaluation_modify.out @@ -833,11 +833,13 @@ EXECUTE router_with_only_function; SET citus.log_local_commands TO ON; SET search_path TO coordinator_evaluation_combinations_modify; -- returns 2 on the worker +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_constant_stable() RETURNS INT AS $$ BEGIN RETURN 2; END; $$ language plpgsql STABLE; +RESET citus.enable_metadata_sync; -- all local values INSERT INTO user_info_data (user_id, u_data) VALUES (3, '(''test3'', 3)'), (4, '(''test4'', 4)'), (7, '(''test7'', 7)'), diff --git a/src/test/regress/expected/coordinator_shouldhaveshards.out b/src/test/regress/expected/coordinator_shouldhaveshards.out index 2cd9f2f20..5d5fa982c 100644 --- a/src/test/regress/expected/coordinator_shouldhaveshards.out +++ b/src/test/regress/expected/coordinator_shouldhaveshards.out @@ -898,9 +898,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM (SELECT in -- a helper function which return true if the coordinated -- trannsaction uses 2PC +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC() RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus', $$coordinated_transaction_should_use_2PC$$; +RESET citus.enable_metadata_sync; -- a local SELECT followed by remote SELECTs -- does not trigger 2PC BEGIN; diff --git a/src/test/regress/expected/distributed_collations.out b/src/test/regress/expected/distributed_collations.out index f2413d0e5..bc6a5a859 100644 --- a/src/test/regress/expected/distributed_collations.out +++ b/src/test/regress/expected/distributed_collations.out @@ -163,3 +163,19 @@ SELECT run_command_on_workers($$DROP USER collationuser;$$); (localhost,57638,t,"DROP ROLE") (2 rows) +\c - - - :worker_1_port +-- test creating a collation on a worker +CREATE COLLATION another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +-- test if creating a collation on a worker on a local +-- schema raises the right error +SET citus.enable_ddl_propagation TO off; +CREATE SCHEMA collation_creation_on_worker; +SET citus.enable_ddl_propagation TO on; +CREATE COLLATION collation_creation_on_worker.another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +SET citus.enable_ddl_propagation TO off; +DROP SCHEMA collation_creation_on_worker; +SET citus.enable_ddl_propagation TO on; diff --git a/src/test/regress/expected/distributed_functions_conflict.out b/src/test/regress/expected/distributed_functions_conflict.out index b961fb955..354aea9c0 100644 --- a/src/test/regress/expected/distributed_functions_conflict.out +++ b/src/test/regress/expected/distributed_functions_conflict.out @@ -3,6 +3,7 @@ CREATE SCHEMA proc_conflict; \c - - - :worker_1_port SET search_path TO proc_conflict; +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ BEGIN RETURN state * 2 + i; @@ -12,6 +13,7 @@ CREATE AGGREGATE existing_agg(int) ( SFUNC = existing_func, STYPE = int ); +RESET citus.enable_metadata_sync; \c - - - :master_port SET search_path TO proc_conflict; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ @@ -62,6 +64,7 @@ DROP AGGREGATE existing_agg(int) CASCADE; DROP FUNCTION existing_func(int, int) CASCADE; \c - - - :worker_1_port SET search_path TO proc_conflict; +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ BEGIN RETURN state * 3 + i; @@ -71,6 +74,7 @@ CREATE AGGREGATE existing_agg(int) ( SFUNC = existing_func, STYPE = int ); +RESET citus.enable_metadata_sync; \c - - - :master_port SET search_path TO proc_conflict; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ diff --git a/src/test/regress/expected/fast_path_router_modify.out b/src/test/regress/expected/fast_path_router_modify.out index f99aac5d4..bf7e07362 100644 --- a/src/test/regress/expected/fast_path_router_modify.out +++ b/src/test/regress/expected/fast_path_router_modify.out @@ -228,7 +228,10 @@ BEGIN DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT modify_fast_path_plpsql(1,1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -241,6 +244,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(2,2); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -253,6 +257,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(3,3); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -265,6 +270,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(4,4); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -277,6 +283,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(5,5); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -289,6 +296,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(6,6); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query CONTEXT: SQL statement "DELETE FROM modify_fast_path WHERE key = $1 AND value_1 = $2" PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statement @@ -301,6 +309,7 @@ PL/pgSQL function modify_fast_path_plpsql(integer,integer) line XX at SQL statem (1 row) SELECT modify_fast_path_plpsql(6,6); +DEBUG: function does not have co-located tables modify_fast_path_plpsql --------------------------------------------------------------------- diff --git a/src/test/regress/expected/follower_single_node.out b/src/test/regress/expected/follower_single_node.out index 7fbc40280..de2f88b6e 100644 --- a/src/test/regress/expected/follower_single_node.out +++ b/src/test/regress/expected/follower_single_node.out @@ -239,7 +239,7 @@ ERROR: node group 0 does not have a secondary node -- should work this time \c -reuse-previous=off regression - - :master_port SET search_path TO single_node; -SELECT 1 FROM master_add_node('localhost', :follower_master_port, groupid => 0, noderole => 'secondary'); +SELECT 1 FROM master_add_node('localhost', :follower_master_port, groupid => 0, noderole => 'secondary', nodecluster => 'second-cluster'); ?column? --------------------------------------------------------------------- 1 @@ -251,7 +251,7 @@ SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhavesha 1 (1 row) -\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" +\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" SET search_path TO single_node; SELECT * FROM test WHERE x = 1; x | y @@ -387,6 +387,40 @@ SELECT * FROM columnar_test ORDER BY 1,2; 1 | 8 (6 rows) +\c -reuse-previous=off regression - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path TO single_node; +CREATE TABLE dist_table (a INT, b INT); +SELECT create_distributed_table ('dist_table', 'a', shard_count:=4); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO dist_table VALUES (1, 1); +\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" +SET search_path TO single_node; +SELECT * FROM dist_table; + a | b +--------------------------------------------------------------------- + 1 | 1 +(1 row) + +SELECT global_pid AS follower_coordinator_gpid FROM get_all_active_transactions() WHERE process_id = pg_backend_pid() \gset +SELECT pg_typeof(:follower_coordinator_gpid); + pg_typeof +--------------------------------------------------------------------- + bigint +(1 row) + +SELECT pg_cancel_backend(:follower_coordinator_gpid); +ERROR: canceling statement due to user request +SET citus.log_remote_commands TO ON; +SELECT pg_cancel_backend(:follower_coordinator_gpid) FROM dist_table WHERE a = 1; +NOTICE: executing the command locally: SELECT pg_cancel_backend('xxxxx'::bigint) AS pg_cancel_backend FROM single_node.dist_table_102008 dist_table WHERE (a OPERATOR(pg_catalog.=) 1) +NOTICE: issuing SELECT pg_cancel_backend(xxxxx::integer) +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +ERROR: canceling statement due to user request -- Cleanup \c -reuse-previous=off regression - - :master_port SET search_path TO single_node; diff --git a/src/test/regress/expected/forcedelegation_functions.out b/src/test/regress/expected/forcedelegation_functions.out index c4e07d5f9..c26f7b75b 100644 --- a/src/test/regress/expected/forcedelegation_functions.out +++ b/src/test/regress/expected/forcedelegation_functions.out @@ -293,6 +293,8 @@ BEGIN RETURN ret_val; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands CREATE OR REPLACE FUNCTION func_calls_forcepush_func() RETURNS NUMERIC AS $$ DECLARE incremented_val NUMERIC; @@ -302,6 +304,8 @@ BEGIN RETURN incremented_val; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('func_calls_forcepush_func()'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands @@ -354,12 +358,16 @@ PL/pgSQL function func_calls_forcepush_func() line XX at SQL statement 101 (1 row) +-- Block distributing that function as distributing it causes +-- different test output on PG 14. +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_val() RETURNS INT AS $$ BEGIN RETURN 100::INT; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- -- UDF calling another UDF in a FROM clause -- fn() @@ -377,7 +385,10 @@ BEGIN RETURN incremented_val; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT func_calls_forcepush_func_infrom(); +DEBUG: function does not have co-located tables DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT inner_force_delegation_function FROM inner_force_delegation_function(add_val + 100)" PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement @@ -395,6 +406,7 @@ PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement BEGIN; SELECT func_calls_forcepush_func_infrom(); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT inner_force_delegation_function FROM inner_force_delegation_function(add_val + 100)" PL/pgSQL function func_calls_forcepush_func_infrom() line XX at SQL statement @@ -428,7 +440,10 @@ BEGIN RETURN incremented_val; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT func_calls_forcepush_func_intarget(); +DEBUG: function does not have co-located tables DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT inner_force_delegation_function(100 + 100) OFFSET 0" PL/pgSQL function func_calls_forcepush_func_intarget() line XX at SQL statement @@ -446,6 +461,7 @@ PL/pgSQL function func_calls_forcepush_func_intarget() line XX at SQL statement BEGIN; SELECT func_calls_forcepush_func_intarget(); +DEBUG: not pushing down function calls in a multi-statement transaction NOTICE: inner_force_delegation_function():201 DETAIL: from localhost:xxxxx CONTEXT: SQL statement "SELECT inner_force_delegation_function(100 + 100) OFFSET 0" @@ -473,6 +489,8 @@ BEGIN END if; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('test_recursive(int)', '$1', colocate_with := 'test_nested', force_delegation := true); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands @@ -544,6 +562,8 @@ BEGIN RETURN x + y; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'test_non_constant(int,bigint)', '$1', @@ -610,6 +630,8 @@ BEGIN INSERT INTO emp VALUES (empname, 33); END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands CREATE OR REPLACE FUNCTION outer_emp() RETURNS void AS $$ @@ -618,6 +640,8 @@ BEGIN PERFORM inner_emp('hello'); END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('inner_emp(text)','empname', force_delegation := true); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands @@ -627,6 +651,7 @@ DETAIL: A command for a distributed function is run. To make sure subsequent co (1 row) SELECT outer_emp(); +DEBUG: function does not have co-located tables DEBUG: Skipping pushdown of function from a PL/PgSQL simple expression CONTEXT: SQL statement "SELECT inner_emp('hello')" PL/pgSQL function outer_emp() line XX at PERFORM @@ -650,6 +675,8 @@ BEGIN INSERT INTO forcepushdown_schema.test_forcepushdown SELECT(a+1); END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_select_data(int)', 'a', colocate_with := 'test_forcepushdown', @@ -725,6 +752,8 @@ BEGIN SELECT intcol FROM forcepushdown_schema.test_forcepushdown_noncolocate; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_select_data_nonlocal(int)', 'a', colocate_with := 'test_forcepushdown', @@ -803,6 +832,8 @@ BEGIN INSERT INTO forcepushdown_schema.test_forcepushdown_char VALUES (a); END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_data_char(char)', 'a', colocate_with := 'test_forcepushdown_char', @@ -821,6 +852,8 @@ BEGIN INSERT INTO forcepushdown_schema.test_forcepushdown_varchar VALUES (a); END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_data_varchar(varchar)', 'a', colocate_with := 'test_forcepushdown_varchar', @@ -839,6 +872,8 @@ BEGIN INSERT INTO forcepushdown_schema.test_forcepushdown_text VALUES (a); END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_data_text(text)', 'a', colocate_with := 'test_forcepushdown_text', @@ -947,6 +982,8 @@ BEGIN RAISE NOTICE 'Result: %', var; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'select_data(int)', 'a', colocate_with := 'test_subquery', @@ -969,6 +1006,8 @@ BEGIN RAISE NOTICE 'Result: %', var; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'select_data_noncolocate(int)', 'a', colocate_with := 'test_subquery', @@ -990,6 +1029,8 @@ BEGIN RAISE NOTICE 'Result: %', var; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_select_data_cte1(int)', 'a', colocate_with := 'test_subquery', @@ -1011,6 +1052,8 @@ BEGIN RAISE NOTICE 'Result: %', var; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_select_data_cte2(int)', 'a', colocate_with := 'test_subquery', @@ -1033,6 +1076,8 @@ BEGIN RAISE NOTICE 'Result: %', var; END; $fn$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function( 'insert_data_cte_nondist(int)', 'a', colocate_with := 'test_subquery', @@ -1203,6 +1248,8 @@ BEGIN RETURN x + y; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('test_prepare(int,int)','x',force_delegation :=true, colocate_with := 'table_test_prepare'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands @@ -1223,9 +1270,12 @@ BEGIN PERFORM 1, 1 + a FROM test_prepare(x + 1, y + 1) a; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- First 5 get delegated and succeeds BEGIN; SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1244,6 +1294,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM (1 row) SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1262,6 +1313,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM (1 row) SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1280,6 +1332,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM (1 row) SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1298,6 +1351,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM (1 row) SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1324,6 +1378,7 @@ SELECT COUNT(*) FROM table_test_prepare; -- 6th execution will be generic plan and should get delegated SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1342,6 +1397,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM (1 row) SELECT outer_test_prepare(1,1); +DEBUG: not pushing down function calls in a multi-statement transaction DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1362,6 +1418,7 @@ PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM END; -- Fails as expected SELECT outer_test_prepare(1,2); +DEBUG: function does not have co-located tables DEBUG: pushing down function call in a multi-statement transaction CONTEXT: SQL statement "SELECT FROM test_prepare(x, y)" PL/pgSQL function outer_test_prepare(integer,integer) line XX at PERFORM @@ -1397,6 +1454,8 @@ BEGIN RETURN x; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('test(int)', 'x', colocate_with := 'test_perform', force_delegation := true); DEBUG: switching to sequential query execution mode @@ -1418,10 +1477,471 @@ NOTICE: INPUT 3 CONTEXT: PL/pgSQL function test(integer) line XX at RAISE SQL statement "SELECT test(3)" PL/pgSQL function inline_code_block line XX at PERFORM +CREATE TABLE testnested_table (x int, y int); +SELECT create_distributed_table('testnested_table','x'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE FUNCTION inner_fn(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +-- Non-force function calling force-delegation function +CREATE OR REPLACE FUNCTION outer_local_fn() +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM 1 FROM inner_fn(1); + INSERT INTO forcepushdown_schema.testnested_table VALUES (2,3); + PERFORM 1 FROM inner_fn(4); + INSERT INTO forcepushdown_schema.testnested_table VALUES (5,6); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +SELECT create_distributed_function('inner_fn(int)','x', + colocate_with:='testnested_table', force_delegation := true); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT outer_local_fn(); +DEBUG: function does not have co-located tables +DEBUG: pushing down function call in a multi-statement transaction +CONTEXT: SQL statement "SELECT 1 FROM inner_fn(1)" +PL/pgSQL function outer_local_fn() line XX at PERFORM +DEBUG: pushing down the function call +CONTEXT: SQL statement "SELECT 1 FROM inner_fn(1)" +PL/pgSQL function outer_local_fn() line XX at PERFORM +DEBUG: pushing down function call in a multi-statement transaction +CONTEXT: SQL statement "SELECT 1 FROM inner_fn(4)" +PL/pgSQL function outer_local_fn() line XX at PERFORM +DEBUG: pushing down the function call +CONTEXT: SQL statement "SELECT 1 FROM inner_fn(4)" +PL/pgSQL function outer_local_fn() line XX at PERFORM + outer_local_fn +--------------------------------------------------------------------- + +(1 row) + +-- Rows from 1-6 should appear +SELECT * FROM testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- + 1 | 1 + 2 | 3 + 4 | 4 + 5 | 6 +(4 rows) + +BEGIN; +SELECT outer_local_fn(); +DEBUG: not pushing down function calls in a multi-statement transaction + outer_local_fn +--------------------------------------------------------------------- + +(1 row) + +END; +SELECT * FROM testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- + 1 | 1 + 1 | 1 + 2 | 3 + 2 | 3 + 4 | 4 + 4 | 4 + 5 | 6 + 5 | 6 +(8 rows) + +DROP FUNCTION inner_fn(int); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +DROP FUNCTION outer_local_fn(); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +TRUNCATE TABLE testnested_table; +CREATE OR REPLACE FUNCTION inner_fn(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +-- Force-delegation function calling non-force function +CREATE OR REPLACE FUNCTION outer_fn(y int, z int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM 1 FROM forcepushdown_schema.inner_fn(y); + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); + PERFORM 1 FROM forcepushdown_schema.inner_fn(z); + INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +SELECT create_distributed_function('inner_fn(int)','x', + colocate_with:='testnested_table', force_delegation := false); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_function('outer_fn(int, int)','y', + colocate_with:='testnested_table', force_delegation := true); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT outer_fn(1, 2); +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x)" +PL/pgSQL function forcepushdown_schema.inner_fn(integer) line XX at SQL statement +SQL statement "SELECT 1 FROM forcepushdown_schema.inner_fn(z)" +PL/pgSQL function forcepushdown_schema.outer_fn(integer,integer) line XX at PERFORM +while executing command on localhost:xxxxx +BEGIN; +SELECT outer_fn(1, 2); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x)" +PL/pgSQL function forcepushdown_schema.inner_fn(integer) line XX at SQL statement +SQL statement "SELECT 1 FROM forcepushdown_schema.inner_fn(z)" +PL/pgSQL function forcepushdown_schema.outer_fn(integer,integer) line XX at PERFORM +while executing command on localhost:xxxxx +END; +-- No rows +SELECT * FROM testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- +(0 rows) + +-- Force-delegation function calling force-delegation function +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); + PERFORM forcepushdown_schema.force_push_inner(x+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +SELECT create_distributed_function( + 'force_push_outer(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_function( + 'force_push_inner(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +-- Keys 7,8,9,14 fall on one node and 15 on a different node +-- Function gets delegated to node with shard-key = 7 and inner function +-- will not be delegated but inserts shard-key = 8 locally +SELECT force_push_outer(7); +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)" +PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement +SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1" +PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM +while executing command on localhost:xxxxx +BEGIN; +-- Function gets delegated to node with shard-key = 8 and inner function +-- will not be delegated but inserts shard-key = 9 locally +SELECT force_push_outer(8); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)" +PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement +SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1" +PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM +while executing command on localhost:xxxxx +END; +BEGIN; +-- Function gets delegated to node with shard-key = 14 and inner function +-- will not be delegated but fails to insert shard-key = 15 remotely +SELECT force_push_outer(14); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)" +PL/pgSQL function forcepushdown_schema.force_push_inner(integer) line XX at SQL statement +SQL statement "SELECT forcepushdown_schema.force_push_inner(x+1) LIMIT 1" +PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at PERFORM +while executing command on localhost:xxxxx +END; +SELECT * FROM testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- +(0 rows) + +-- +-- Function-1() --> function-2() --> function-3() +-- +CREATE OR REPLACE FUNCTION force_push_1(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); + PERFORM forcepushdown_schema.force_push_2(x+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +CREATE OR REPLACE FUNCTION force_push_2(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); + PERFORM forcepushdown_schema.force_push_3(y+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +CREATE OR REPLACE FUNCTION force_push_3(z int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +SELECT create_distributed_function( + 'force_push_1(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_function( + 'force_push_2(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_function( + 'force_push_3(int)', 'z', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +TRUNCATE TABLE testnested_table; +BEGIN; +-- All local inserts +SELECT force_push_1(7); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)" +PL/pgSQL function forcepushdown_schema.force_push_2(integer) line XX at SQL statement +SQL statement "SELECT forcepushdown_schema.force_push_2(x+1) LIMIT 1" +PL/pgSQL function forcepushdown_schema.force_push_1(integer) line XX at PERFORM +while executing command on localhost:xxxxx +END; +BEGIN; +-- Local(shard-keys 13, 15) + remote insert (shard-key 14) +SELECT force_push_1(13); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y)" +PL/pgSQL function forcepushdown_schema.force_push_2(integer) line XX at SQL statement +SQL statement "SELECT forcepushdown_schema.force_push_2(x+1) LIMIT 1" +PL/pgSQL function forcepushdown_schema.force_push_1(integer) line XX at PERFORM +while executing command on localhost:xxxxx +END; +SELECT * FROM testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- +(0 rows) + +TRUNCATE TABLE testnested_table; +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM FROM forcepushdown_schema.force_push_inner(x); + INSERT INTO forcepushdown_schema.testnested_table VALUES (x+1,x+1); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +SELECT create_distributed_function( + 'force_push_inner(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_function( + 'force_push_outer(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands + create_distributed_function +--------------------------------------------------------------------- + +(1 row) + +BEGIN; +SELECT force_push_outer(7); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +ERROR: queries must filter by the distribution argument in the same colocation group when using the forced function pushdown +HINT: consider disabling forced delegation through create_distributed_table(..., force_delegation := false) +CONTEXT: SQL statement "INSERT INTO forcepushdown_schema.testnested_table VALUES (x+1,x+1)" +PL/pgSQL function forcepushdown_schema.force_push_outer(integer) line XX at SQL statement +while executing command on localhost:xxxxx +END; +TABLE testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- +(0 rows) + +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + RAISE NOTICE '%', y; +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM FROM forcepushdown_schema.force_push_inner(x+1); + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands +BEGIN; +SELECT force_push_outer(9); +DEBUG: pushing down function call in a multi-statement transaction +DEBUG: pushing down the function call +NOTICE: 10 +DETAIL: from localhost:xxxxx + force_push_outer +--------------------------------------------------------------------- + +(1 row) + +END; +TABLE testnested_table ORDER BY 1; + x | y +--------------------------------------------------------------------- + 9 | 9 +(1 row) + RESET client_min_messages; SET citus.log_remote_commands TO off; DROP SCHEMA forcepushdown_schema CASCADE; -NOTICE: drop cascades to 38 other objects +NOTICE: drop cascades to 46 other objects DETAIL: drop cascades to table test_forcepushdown drop cascades to table test_forcepushdown_noncolocate drop cascades to function insert_data(integer) @@ -1460,3 +1980,11 @@ drop cascades to function test_prepare(integer,integer) drop cascades to function outer_test_prepare(integer,integer) drop cascades to table test_perform drop cascades to function test(integer) +drop cascades to table testnested_table +drop cascades to function inner_fn(integer) +drop cascades to function outer_fn(integer,integer) +drop cascades to function force_push_inner(integer) +drop cascades to function force_push_outer(integer) +drop cascades to function force_push_1(integer) +drop cascades to function force_push_2(integer) +drop cascades to function force_push_3(integer) diff --git a/src/test/regress/expected/function_propagation.out b/src/test/regress/expected/function_propagation.out new file mode 100644 index 000000000..a40f1f9a2 --- /dev/null +++ b/src/test/regress/expected/function_propagation.out @@ -0,0 +1,321 @@ +CREATE SCHEMA function_propagation_schema; +SET search_path TO 'function_propagation_schema'; +-- Check whether supported dependencies can be distributed while propagating functions +-- Check types +SET citus.enable_metadata_sync TO OFF; + CREATE TYPE function_prop_type AS (a int, b int); +RESET citus.enable_metadata_sync; +CREATE OR REPLACE FUNCTION func_1(param_1 function_prop_type) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +-- Check all dependent objects and function depends on all nodes +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (schema,{function_propagation_schema},{}) +(1 row) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (type,{function_propagation_schema.function_prop_type},{}) +(1 row) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (schema,{function_propagation_schema},{}) + localhost | 57638 | t | (schema,{function_propagation_schema},{}) +(2 rows) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (type,{function_propagation_schema.function_prop_type},{}) + localhost | 57638 | t | (type,{function_propagation_schema.function_prop_type},{}) +(2 rows) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type}) + localhost | 57638 | t | (function,"{function_propagation_schema,func_1}",{function_propagation_schema.function_prop_type}) +(2 rows) + +SET citus.enable_metadata_sync TO OFF; + CREATE TYPE function_prop_type_2 AS (a int, b int); +RESET citus.enable_metadata_sync; +CREATE OR REPLACE FUNCTION func_2(param_1 int) +RETURNS function_prop_type_2 +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (type,{function_propagation_schema.function_prop_type_2},{}) +(1 row) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,func_2}",{integer}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (type,{function_propagation_schema.function_prop_type_2},{}) + localhost | 57638 | t | (type,{function_propagation_schema.function_prop_type_2},{}) +(2 rows) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,func_2}",{integer}) + localhost | 57638 | t | (function,"{function_propagation_schema,func_2}",{integer}) +(2 rows) + +-- Have a separate check for type created in transaction +BEGIN; + CREATE TYPE function_prop_type_3 AS (a int, b int); +COMMIT; +-- Objects in the body part is not found as dependency +CREATE OR REPLACE FUNCTION func_3(param_1 int) +RETURNS int +LANGUAGE plpgsql AS +$$ +DECLARE + internal_param1 function_prop_type_3; +BEGIN + return 1; +END; +$$; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_3'::regtype::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,func_3}",{integer}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,func_3}",{integer}) + localhost | 57638 | t | (function,"{function_propagation_schema,func_3}",{integer}) +(2 rows) + +-- Check table +CREATE TABLE function_prop_table(a int, b int); +-- Non-distributed table is not distributed as dependency +CREATE OR REPLACE FUNCTION func_4(param_1 function_prop_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +WARNING: Citus can't distribute function "func_4" having dependency on non-distributed relation "function_prop_table" +DETAIL: Function will be created only locally +HINT: To distribute function, distribute dependent relations first. Then, re-create the function +CREATE OR REPLACE FUNCTION func_5(param_1 int) +RETURNS function_prop_table +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +WARNING: Citus can't distribute function "func_5" having dependency on non-distributed relation "function_prop_table" +DETAIL: Function will be created only locally +HINT: To distribute function, distribute dependent relations first. Then, re-create the function +-- Functions can be created with distributed table dependency +SELECT create_distributed_table('function_prop_table', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE OR REPLACE FUNCTION func_6(param_1 function_prop_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table}) + localhost | 57638 | t | (function,"{function_propagation_schema,func_6}",{function_propagation_schema.function_prop_table}) +(2 rows) + +-- Views are not supported +CREATE VIEW function_prop_view AS SELECT * FROM function_prop_table; +CREATE OR REPLACE FUNCTION func_7(param_1 function_prop_view) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +WARNING: Citus can't distribute functions having dependency on unsupported object of type "view" +DETAIL: Function will be created only locally +CREATE OR REPLACE FUNCTION func_8(param_1 int) +RETURNS function_prop_view +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +WARNING: Citus can't distribute functions having dependency on unsupported object of type "view" +DETAIL: Function will be created only locally +-- Check within transaction +BEGIN; + CREATE TYPE type_in_transaction AS (a int, b int); + CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction) + RETURNS int + LANGUAGE plpgsql AS + $$ + BEGIN + return 1; + END; + $$; + -- Within transaction functions are not distributed + SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + + SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + +COMMIT; +-- Show that recreating it outside transaction distributes the function and dependencies +CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (type,{function_propagation_schema.type_in_transaction},{}) +(1 row) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (type,{function_propagation_schema.type_in_transaction},{}) + localhost | 57638 | t | (type,{function_propagation_schema.type_in_transaction},{}) +(2 rows) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction}) + localhost | 57638 | t | (function,"{function_propagation_schema,func_in_transaction}",{function_propagation_schema.type_in_transaction}) +(2 rows) + +-- Test for SQL function with unsupported object in function body +CREATE TABLE table_in_sql_body(id int); +CREATE FUNCTION max_of_table() +RETURNS int +LANGUAGE SQL AS +$$ + SELECT max(id) FROM table_in_sql_body +$$; +-- Show that only function has propagated, since the table is not resolved as dependency +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regclass::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- +(0 rows) + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + (function,"{function_propagation_schema,max_of_table}",{}) +(1 row) + +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (function,"{function_propagation_schema,max_of_table}",{}) + localhost | 57638 | t | (function,"{function_propagation_schema,max_of_table}",{}) +(2 rows) + +-- Check extension owned table +CREATE TABLE extension_owned_table(a int); +SELECT run_command_on_workers($$ +CREATE TABLE function_propagation_schema.extension_owned_table(a int); +$$ +); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"CREATE TABLE") + (localhost,57638,t,"CREATE TABLE") +(2 rows) + +CREATE EXTENSION seg; +ALTER EXTENSION seg ADD TABLE extension_owned_table; +NOTICE: Citus does not propagate adding/dropping member objects +HINT: You can add/drop the member objects on the workers as well. +SELECT run_command_on_workers($$ +ALTER EXTENSION seg ADD TABLE function_propagation_schema.extension_owned_table; +$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,t,"ALTER EXTENSION") + (localhost,57638,t,"ALTER EXTENSION") +(2 rows) + +CREATE OR REPLACE FUNCTION func_for_ext_check(param_1 extension_owned_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; +RESET search_path; +SET client_min_messages TO WARNING; +DROP SCHEMA function_propagation_schema CASCADE; diff --git a/src/test/regress/expected/global_cancel.out b/src/test/regress/expected/global_cancel.out new file mode 100644 index 000000000..5ebc4098d --- /dev/null +++ b/src/test/regress/expected/global_cancel.out @@ -0,0 +1,83 @@ +CREATE SCHEMA global_cancel; +SET search_path TO global_cancel; +SET citus.next_shard_id TO 56789000; +CREATE TABLE dist_table (a INT, b INT); +SELECT create_distributed_table ('dist_table', 'a', shard_count:=4); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO dist_table VALUES (1, 1); +SELECT global_pid AS coordinator_gpid FROM get_all_active_transactions() WHERE process_id = pg_backend_pid() \gset +SELECT pg_typeof(:coordinator_gpid); + pg_typeof +--------------------------------------------------------------------- + bigint +(1 row) + +SELECT pg_cancel_backend(:coordinator_gpid); +ERROR: canceling statement due to user request +SET citus.log_remote_commands TO ON; +SELECT pg_cancel_backend(:coordinator_gpid) FROM dist_table WHERE a = 1; +NOTICE: issuing SELECT pg_cancel_backend('xxxxx'::bigint) AS pg_cancel_backend FROM global_cancel.dist_table_56789000 dist_table WHERE (a OPERATOR(pg_catalog.=) 1) +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +ERROR: canceling statement due to user request +BEGIN; +SELECT pg_cancel_backend(:coordinator_gpid) FROM dist_table WHERE a = 1; +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT pg_cancel_backend('xxxxx'::bigint) AS pg_cancel_backend FROM global_cancel.dist_table_56789000 dist_table WHERE (a OPERATOR(pg_catalog.=) 1) +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +ERROR: canceling statement due to user request +END; +SET citus.log_remote_commands TO OFF; +SELECT global_pid AS maintenance_daemon_gpid +FROM pg_stat_activity psa JOIN get_all_active_transactions() gaat ON psa.pid = gaat.process_id +WHERE application_name = 'Citus Maintenance Daemon' \gset +SET client_min_messages TO ERROR; +CREATE USER global_cancel_user; +SELECT 1 FROM run_command_on_workers('CREATE USER global_cancel_user'); + ?column? +--------------------------------------------------------------------- + 1 + 1 +(2 rows) + +RESET client_min_messages; +SET ROLE global_cancel_user; +SELECT pg_typeof(:maintenance_daemon_gpid); + pg_typeof +--------------------------------------------------------------------- + bigint +(1 row) + +SELECT pg_cancel_backend(:maintenance_daemon_gpid); +ERROR: must be a superuser to cancel superuser query +CONTEXT: while executing command on localhost:xxxxx +SELECT pg_terminate_backend(:maintenance_daemon_gpid); +ERROR: must be a superuser to terminate superuser process +CONTEXT: while executing command on localhost:xxxxx +RESET ROLE; +SELECT nodeid AS coordinator_node_id FROM pg_dist_node WHERE nodeport = :master_port \gset +SET client_min_messages TO DEBUG; +-- 10000000000 is the node id multiplier for global pid +SELECT pg_cancel_backend(10000000000 * :coordinator_node_id + 0); +DEBUG: PID 0 is not a PostgreSQL server process +DETAIL: from localhost:xxxxx + pg_cancel_backend +--------------------------------------------------------------------- + f +(1 row) + +SELECT pg_terminate_backend(10000000000 * :coordinator_node_id + 0); +DEBUG: PID 0 is not a PostgreSQL server process +DETAIL: from localhost:xxxxx + pg_terminate_backend +--------------------------------------------------------------------- + f +(1 row) + +RESET client_min_messages; +DROP SCHEMA global_cancel CASCADE; +NOTICE: drop cascades to table dist_table diff --git a/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out b/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out index 27e13263d..cae2222ed 100644 --- a/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out +++ b/src/test/regress/expected/isolation_get_distributed_wait_queries_mx.out @@ -2,13 +2,13 @@ Parsed test spec with 4 sessions starting permutation: s1-begin s1-update-ref-table-from-coordinator s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit s2-commit-worker s2-stop-connection step s1-begin: - BEGIN; + BEGIN; step s1-update-ref-table-from-coordinator: - UPDATE ref_table SET value_1 = 15; + UPDATE ref_table SET value_1 = 15; step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -16,7 +16,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -24,20 +24,20 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-ref-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- UPDATE ref_table SET value_1 = 12 WHERE user_id = 1| - UPDATE ref_table SET value_1 = 15; + UPDATE ref_table SET value_1 = 15; |localhost |coordinator_host | 57638| 57636 (1 row) step s1-commit: - COMMIT; + COMMIT; step s2-update-ref-table: <... completed> run_commands_on_session_level_connection_to_node @@ -54,7 +54,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -69,7 +69,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -77,7 +77,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -85,7 +85,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-ref-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -93,7 +93,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -101,7 +101,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -109,10 +109,10 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-ref-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- @@ -142,7 +142,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -150,7 +150,7 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -165,7 +165,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table s2-start-session-level-connection s2-begin-on-worker s2-update-dist-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -173,7 +173,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -181,7 +181,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-dist-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -189,7 +189,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -197,7 +197,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -205,10 +205,10 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-dist-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- @@ -238,7 +238,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -246,7 +246,7 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -261,7 +261,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-delete-from-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -269,7 +269,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -277,7 +277,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-delete-from-ref-table: - SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('DELETE FROM ref_table WHERE user_id = 1'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -285,7 +285,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -293,7 +293,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -301,10 +301,10 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-ref-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- @@ -334,7 +334,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -342,7 +342,7 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -357,7 +357,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -365,7 +365,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -373,7 +373,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-ref-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -381,7 +381,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -389,7 +389,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -397,10 +397,10 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-ref-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- @@ -430,7 +430,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -438,7 +438,7 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -453,7 +453,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-insert-into-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -461,7 +461,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -469,7 +469,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-insert-into-ref-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -477,7 +477,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -485,7 +485,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -493,7 +493,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-ref-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -501,7 +501,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement|current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- @@ -524,7 +524,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -532,7 +532,7 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -547,7 +547,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -555,7 +555,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -563,7 +563,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-to-ref-table: - SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -571,7 +571,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -579,7 +579,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -587,10 +587,10 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-ref-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- @@ -620,7 +620,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -628,7 +628,7 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -643,7 +643,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -651,7 +651,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -659,7 +659,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-to-ref-table: - SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -667,7 +667,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -675,7 +675,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -683,7 +683,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-ref-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -691,7 +691,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement|current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- @@ -714,7 +714,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -722,7 +722,7 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -737,7 +737,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-copy-to-ref-table s2-start-session-level-connection s2-begin-on-worker s2-copy-to-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -745,7 +745,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -753,7 +753,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-copy-to-ref-table: - SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -761,7 +761,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -769,7 +769,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -777,7 +777,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-copy-to-ref-table: - SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); + SELECT run_commands_on_session_level_connection_to_node('COPY ref_table FROM PROGRAM ''echo 10, 101 && echo 11, 111'' WITH CSV'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -785,7 +785,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement|current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- @@ -808,7 +808,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -816,7 +816,7 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -831,7 +831,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-select-for-update s2-start-session-level-connection s2-begin-on-worker s2-update-ref-table s3-select-distributed-waiting-queries s1-commit-worker s2-commit-worker s1-stop-connection s2-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -839,7 +839,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -847,7 +847,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-select-for-update: - SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table FOR UPDATE'); + SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table FOR UPDATE'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -855,7 +855,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -863,7 +863,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -871,10 +871,10 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-update-ref-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- @@ -904,7 +904,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -912,7 +912,7 @@ stop_session_level_connection_to_node (1 row) step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -927,7 +927,7 @@ restore_isolation_tester_func starting permutation: s2-start-session-level-connection s2-begin-on-worker s2-insert-into-ref-table s1-begin s1-alter-table s3-select-distributed-waiting-queries s2-commit-worker s1-commit s2-stop-connection step s2-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57638); + SELECT start_session_level_connection_to_node('localhost', 57638); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -935,7 +935,7 @@ start_session_level_connection_to_node (1 row) step s2-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -943,7 +943,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s2-insert-into-ref-table: - SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); + SELECT run_commands_on_session_level_connection_to_node('INSERT INTO ref_table VALUES(8,81),(9,91)'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -951,18 +951,18 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-begin: - BEGIN; + BEGIN; step s1-alter-table: - ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); + ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- - ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); + ALTER TABLE ref_table ADD CONSTRAINT rf_p_key PRIMARY KEY(user_id); |INSERT INTO ref_table VALUES(8,81),(9,91)|coordinator_host |localhost | 57636| 57638 (1 row) @@ -976,10 +976,10 @@ run_commands_on_session_level_connection_to_node step s1-alter-table: <... completed> step s1-commit: - COMMIT; + COMMIT; step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -994,28 +994,28 @@ restore_isolation_tester_func starting permutation: s1-begin s1-update-on-the-coordinator s2-update-on-the-coordinator s3-select-distributed-waiting-queries s1-commit step s1-begin: - BEGIN; + BEGIN; step s1-update-on-the-coordinator: - UPDATE tt1 SET value_1 = 4; + UPDATE tt1 SET value_1 = 4; step s2-update-on-the-coordinator: - UPDATE tt1 SET value_1 = 4; + UPDATE tt1 SET value_1 = 4; step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- - UPDATE tt1 SET value_1 = 4; + UPDATE tt1 SET value_1 = 4; | - UPDATE tt1 SET value_1 = 4; + UPDATE tt1 SET value_1 = 4; |coordinator_host |coordinator_host | 57636| 57636 (1 row) step s1-commit: - COMMIT; + COMMIT; step s2-update-on-the-coordinator: <... completed> restore_isolation_tester_func @@ -1026,7 +1026,7 @@ restore_isolation_tester_func starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table s4-start-session-level-connection s4-begin-on-worker s4-update-dist-table s3-select-distributed-waiting-queries s1-commit-worker s4-commit-worker s1-stop-connection s4-stop-connection step s1-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1034,7 +1034,7 @@ start_session_level_connection_to_node (1 row) step s1-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1042,7 +1042,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-update-dist-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1050,7 +1050,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s4-start-session-level-connection: - SELECT start_session_level_connection_to_node('localhost', 57637); + SELECT start_session_level_connection_to_node('localhost', 57637); start_session_level_connection_to_node --------------------------------------------------------------------- @@ -1058,7 +1058,7 @@ start_session_level_connection_to_node (1 row) step s4-begin-on-worker: - SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); run_commands_on_session_level_connection_to_node --------------------------------------------------------------------- @@ -1066,10 +1066,10 @@ run_commands_on_session_level_connection_to_node (1 row) step s4-update-dist-table: - SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); + SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); step s3-select-distributed-waiting-queries: - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port --------------------------------------------------------------------- @@ -1099,7 +1099,7 @@ run_commands_on_session_level_connection_to_node (1 row) step s1-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -1107,7 +1107,138 @@ stop_session_level_connection_to_node (1 row) step s4-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-start-session-level-connection s1-begin-on-worker s1-update-dist-table-id-1 s2-start-session-level-connection s2-update-dist-table-id-1 s3-select-distributed-waiting-queries s1-commit-worker s1-stop-connection s2-stop-connection +step s1-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57637); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-begin-on-worker: + SELECT run_commands_on_session_level_connection_to_node('BEGIN'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-update-dist-table-id-1: + SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4 WHERE user_id = 1'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-update-dist-table-id-1: + SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4 WHERE user_id = 1'); + +step s3-select-distributed-waiting-queries: + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; + +blocked_statement |current_statement_in_blocking_process |waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port +--------------------------------------------------------------------- +UPDATE tt1 SET value_1 = 4 WHERE user_id = 1|UPDATE tt1 SET value_1 = 4 WHERE user_id = 1|localhost |localhost | 57638| 57637 +(1 row) + +step s1-commit-worker: + SELECT run_commands_on_session_level_connection_to_node('COMMIT'); + +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-update-dist-table-id-1: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s1-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); + +stop_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +restore_isolation_tester_func +--------------------------------------------------------------------- + +(1 row) + + +starting permutation: s1-begin s1-update-ref-table-from-coordinator s2-start-session-level-connection s2-update-ref-table s3-select-distributed-waiting-queries s1-commit s2-stop-connection +step s1-begin: + BEGIN; + +step s1-update-ref-table-from-coordinator: + UPDATE ref_table SET value_1 = 15; + +step s2-start-session-level-connection: + SELECT start_session_level_connection_to_node('localhost', 57638); + +start_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-update-ref-table: + SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); + +step s3-select-distributed-waiting-queries: + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; + +blocked_statement |current_statement_in_blocking_process|waiting_node_name|blocking_node_name|waiting_node_port|blocking_node_port +--------------------------------------------------------------------- +UPDATE ref_table SET value_1 = 12 WHERE user_id = 1| + UPDATE ref_table SET value_1 = 15; +|localhost |coordinator_host | 57638| 57636 +(1 row) + +step s1-commit: + COMMIT; + +step s2-update-ref-table: <... completed> +run_commands_on_session_level_connection_to_node +--------------------------------------------------------------------- + +(1 row) + +step s2-stop-connection: + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_rebalancer_deferred_drop.out b/src/test/regress/expected/isolation_rebalancer_deferred_drop.out index d89a71ece..6963e9122 100644 --- a/src/test/regress/expected/isolation_rebalancer_deferred_drop.out +++ b/src/test/regress/expected/isolation_rebalancer_deferred_drop.out @@ -96,7 +96,7 @@ step s1-commit: COMMIT; step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- @@ -148,7 +148,8 @@ step s1-move-placement-back: SET client_min_messages to NOTICE; SHOW log_error_verbosity; SELECT master_move_shard_placement((SELECT * FROM selected_shard), 'localhost', 57638, 'localhost', 57637); - + +step s1-move-placement-back: <... completed> log_error_verbosity --------------------------------------------------------------------- verbose @@ -159,7 +160,7 @@ step s1-commit: COMMIT; step s2-stop-connection: - SELECT stop_session_level_connection_to_node(); + SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node --------------------------------------------------------------------- diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index e83e71919..2d49f8586 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -101,13 +101,14 @@ step s2-view-worker: FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' AND - query NOT ILIKE '%dump_local_wait_edges%' + query NOT ILIKE '%dump_local_%' AND + query NOT ILIKE '%citus_internal_local_blocked_processes%' ORDER BY query, query_hostport DESC; query |query_hostname|query_hostport|distributed_query_host_name|distributed_query_host_port|state |wait_event_type|wait_event|usename |datname --------------------------------------------------------------------- -UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression -UPDATE public.ref_table_1500767 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression +UPDATE public.ref_table_1500777 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57638|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression +UPDATE public.ref_table_1500777 ref_table SET a = (a OPERATOR(pg_catalog.+) 1)|localhost | 57637|coordinator_host | 57636|idle in transaction|Client |ClientRead|postgres|regression (2 rows) step s2-end: diff --git a/src/test/regress/expected/local_shard_copy.out b/src/test/regress/expected/local_shard_copy.out index ec9828a04..33ef9f7df 100644 --- a/src/test/regress/expected/local_shard_copy.out +++ b/src/test/regress/expected/local_shard_copy.out @@ -62,6 +62,7 @@ SET citus.log_local_commands TO ON; -- returns true of the distribution key filter -- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard -- placement which is local to this not +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$ DECLARE shard_is_local BOOLEAN := FALSE; @@ -84,6 +85,7 @@ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) R RETURN shard_is_local; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- pick some example values that reside on the shards locally and remote -- distribution key values of 1,6, 500 and 701 are LOCAL to shards, -- we'll use these values in the tests diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index 51daa4046..781a9c86c 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -94,6 +94,7 @@ SET search_path TO local_shard_execution; -- returns true of the distribution key filter -- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard -- placement which is local to this not +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$ DECLARE shard_is_local BOOLEAN := FALSE; @@ -116,6 +117,7 @@ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) R RETURN shard_is_local; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- test case for issue #3556 SET citus.log_intermediate_results TO TRUE; SET client_min_messages TO DEBUG1; @@ -801,6 +803,7 @@ BEGIN; ERROR: VACUUM cannot run inside a transaction block ROLLBACK; -- make sure that functions can use local execution +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE only_local_execution() AS $$ DECLARE cnt INT; BEGIN @@ -896,6 +899,7 @@ CREATE OR REPLACE PROCEDURE local_execution_followed_by_dist() AS $$ SELECT count(*) INTO cnt FROM distributed_table; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CALL local_execution_followed_by_dist(); NOTICE: executing the command locally: INSERT INTO local_shard_execution.distributed_table_1470001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text CONTEXT: SQL statement "INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29'" diff --git a/src/test/regress/expected/local_shard_execution_replicated.out b/src/test/regress/expected/local_shard_execution_replicated.out index 9d880f44b..731c825c3 100644 --- a/src/test/regress/expected/local_shard_execution_replicated.out +++ b/src/test/regress/expected/local_shard_execution_replicated.out @@ -770,6 +770,7 @@ NOTICE: executing the command locally: INSERT INTO local_shard_execution_replic ERROR: VACUUM cannot run inside a transaction block ROLLBACK; -- make sure that functions can use local execution +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE only_local_execution() AS $$ DECLARE cnt INT; BEGIN @@ -865,6 +866,7 @@ CREATE OR REPLACE PROCEDURE local_execution_followed_by_dist() AS $$ SELECT count(*) INTO cnt FROM distributed_table; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CALL local_execution_followed_by_dist(); NOTICE: executing the command locally: INSERT INTO local_shard_execution_replicated.distributed_table_1500001 AS citus_table_alias (key, value, age) VALUES (1, '11'::text, 21) ON CONFLICT(key) DO UPDATE SET value = '29'::text CONTEXT: SQL statement "INSERT INTO distributed_table VALUES (1, '11',21) ON CONFLICT(key) DO UPDATE SET value = '29'" diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 24aa7302f..9651fcc63 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -987,6 +987,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ERROR: Node with group id 123123123 for shard placement xxxxx does not exist ROLLBACK; -- create a volatile function that returns the local node id +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_node_id() RETURNS INT AS $$ DECLARE localGroupId int; @@ -999,6 +1000,7 @@ BEGIN nodeport = 57637 AND nodename = 'localhost' AND isactive AND nodecluster = 'default'; RETURN localGroupId; END; $$ language plpgsql; +RESET citus.enable_metadata_sync; -- fails because we ingest more placements for the same shards to the same worker node BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); diff --git a/src/test/regress/expected/multi_create_table.out b/src/test/regress/expected/multi_create_table.out index a09f1fbcc..4ec8a27da 100644 --- a/src/test/regress/expected/multi_create_table.out +++ b/src/test/regress/expected/multi_create_table.out @@ -7,6 +7,7 @@ -- reference and hash-distributed version of orders, customer and part tables. SET citus.next_shard_id TO 360000; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -23,6 +24,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE lineitem ( l_orderkey bigint not null, l_partkey integer not null, diff --git a/src/test/regress/expected/multi_deparse_function.out b/src/test/regress/expected/multi_deparse_function.out index b85078258..656c89c40 100644 --- a/src/test/regress/expected/multi_deparse_function.out +++ b/src/test/regress/expected/multi_deparse_function.out @@ -689,17 +689,7 @@ SELECT create_distributed_function('func_custom_param(intpair)'); (1 row) RESET citus.enable_metadata_sync; -SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param; -$cmd$); -INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_custom_param(function_tests.intpair) RENAME TO func_with_custom_param; -CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE - deparse_and_run_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") -(2 rows) - -- a function that returns TABLE CREATE FUNCTION func_returns_table(IN count INT) RETURNS TABLE (x INT, y INT) @@ -713,17 +703,7 @@ SELECT create_distributed_function('func_returns_table(INT)'); (1 row) RESET citus.enable_metadata_sync; -SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_returns_table ROWS 100; -$cmd$); -INFO: Propagating deparsed query: ALTER FUNCTION function_tests.func_returns_table(integer) ROWS 100.000000; -CONTEXT: PL/pgSQL function deparse_and_run_on_workers(text) line XX at RAISE - deparse_and_run_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"ALTER FUNCTION") - (localhost,57638,t,"ALTER FUNCTION") -(2 rows) - -- clear objects SET client_min_messages TO WARNING; -- suppress cascading objects dropping DROP SCHEMA "CiTuS.TeeN" CASCADE; diff --git a/src/test/regress/expected/multi_distribution_metadata.out b/src/test/regress/expected/multi_distribution_metadata.out index ba004974b..c76b45a5b 100644 --- a/src/test/regress/expected/multi_distribution_metadata.out +++ b/src/test/regress/expected/multi_distribution_metadata.out @@ -220,7 +220,7 @@ SELECT column_to_column_name('pg_dist_node'::regclass,'{FROMEXPR :fromlist ({RAN ERROR: not a valid column -- test column_name_to_column with illegal arguments SELECT column_name_to_column(1204127312,''); -ERROR: could not open relation with OID 1204127312 +ERROR: relation does not exist SELECT column_name_to_column('customers','notacolumn'); ERROR: column "notacolumn" of relation "customers" does not exist -- make one huge shard and manually inspect shard row diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 70dc4c2a0..e5cbcd994 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -77,6 +77,8 @@ END $func$ LANGUAGE plpgsql; CREATE SCHEMA test; :create_function_test_maintenance_worker +WARNING: Citus can't distribute functions having dependency on unsupported object of type "view" +DETAIL: Function will be created only locally -- check maintenance daemon is started SELECT datname, current_database(), usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') @@ -107,6 +109,7 @@ DROP EXTENSION citus; -- these tests switch between citus versions and call ddl's that require pg_dist_object to be created SET citus.enable_metadata_sync TO 'false'; SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.0-1'; ALTER EXTENSION citus UPDATE TO '8.0-2'; ALTER EXTENSION citus UPDATE TO '8.0-3'; @@ -425,20 +428,20 @@ SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDE ALTER EXTENSION citus UPDATE TO '9.4-2'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -466,20 +469,20 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '9.4-1'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -575,20 +578,20 @@ SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDE ALTER EXTENSION citus UPDATE TO '9.5-2'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -616,20 +619,20 @@ SELECT * FROM multi_extension.print_extension_changes(); ALTER EXTENSION citus UPDATE TO '9.5-1'; -- should see the old source code SELECT prosrc FROM pg_proc WHERE proname = 'master_update_table_statistics' ORDER BY 1; - prosrc + prosrc --------------------------------------------------------------------- - + - DECLARE + - colocated_tables regclass[]; + - BEGIN + - SELECT get_colocated_table_array(relation) INTO colocated_tables;+ - PERFORM + - master_update_shard_statistics(shardid) + - FROM + - pg_dist_shard + - WHERE + - logicalrelid = ANY (colocated_tables); + - END; + + + + DECLARE + + colocated_tables regclass[]; + + BEGIN + + SELECT get_colocated_table_array(relation) INTO colocated_tables;+ + PERFORM + + master_update_shard_statistics(shardid) + + FROM + + pg_dist_shard + + WHERE + + logicalrelid = ANY (colocated_tables); + + END; + (1 row) @@ -757,6 +760,7 @@ SELECT * FROM multi_extension.print_extension_changes(); \set VERBOSITY terse CREATE TABLE columnar_table(a INT, b INT) USING columnar; SET citus.enable_version_checks TO ON; +SET columnar.enable_version_checks TO ON; -- all should throw an error due to version mismatch VACUUM FULL columnar_table; ERROR: loaded Citus library version differs from installed extension version @@ -785,6 +789,7 @@ CREATE TABLE new_columnar_table (a int) USING columnar; ERROR: loaded Citus library version differs from installed extension version -- do cleanup for the rest of the tests SET citus.enable_version_checks TO OFF; +SET columnar.enable_version_checks TO OFF; DROP TABLE columnar_table; RESET columnar.enable_custom_scan; \set VERBOSITY default @@ -996,7 +1001,7 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Snapshot of state at 11.0-1 ALTER EXTENSION citus UPDATE TO '11.0-1'; SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object + previous_object | current_object --------------------------------------------------------------------- function citus_disable_node(text,integer) void | function create_distributed_function(regprocedure,text,text) void | @@ -1007,13 +1012,18 @@ SELECT * FROM multi_extension.print_extension_changes(); | function citus_check_connection_to_node(text,integer) boolean | function citus_disable_node(text,integer,boolean) void | function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) void + | function citus_internal_global_blocked_processes() SETOF record + | function citus_internal_local_blocked_processes() SETOF record | function citus_run_local_command(text) void | function citus_shard_indexes_on_worker() SETOF record | function citus_shards_on_worker() SETOF record | function create_distributed_function(regprocedure,text,text,boolean) void + | function pg_cancel_backend(bigint) boolean + | function pg_terminate_backend(bigint,bigint) boolean + | function worker_create_or_replace_object(text[]) boolean | function worker_drop_sequence_dependency(text) void | function worker_drop_shell_table(text) void -(15 rows) +(20 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version @@ -1040,6 +1050,7 @@ ORDER BY 1, 2; -- see incompatible version errors out RESET citus.enable_version_checks; +RESET columnar.enable_version_checks; DROP EXTENSION citus; CREATE EXTENSION citus VERSION '8.0-1'; ERROR: specified version incompatible with loaded Citus library @@ -1047,8 +1058,10 @@ DETAIL: Loaded library requires 11.0, but 8.0-1 was specified. HINT: If a newer library is present, restart the database and try the command again. -- Test non-distributed queries work even in version mismatch SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.1-1'; SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; -- Test CREATE TABLE CREATE TABLE version_mismatch_table(column1 int); -- Test COPY @@ -1098,15 +1111,18 @@ $function$; ERROR: cannot change return type of existing function HINT: Use DROP FUNCTION relation_is_a_known_shard(regclass) first. SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; -- This will fail because of previous function declaration ALTER EXTENSION citus UPDATE TO '8.1-1'; NOTICE: version "8.1-1" of extension "citus" is already installed -- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; DROP FUNCTION pg_catalog.relation_is_a_known_shard(regclass); ERROR: cannot drop function relation_is_a_known_shard(regclass) because extension citus requires it HINT: You can drop extension citus instead. SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; ALTER EXTENSION citus UPDATE TO '8.1-1'; NOTICE: version "8.1-1" of extension "citus" is already installed -- Test updating to the latest version without specifying the version number @@ -1119,8 +1135,10 @@ CREATE EXTENSION citus; \c - - - :worker_1_port DROP EXTENSION citus; SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.0-1'; SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; -- during ALTER EXTENSION, we should invalidate the cache ALTER EXTENSION citus UPDATE; -- if cache is invalidated succesfull, this \d should work without any problem @@ -1197,6 +1215,8 @@ HINT: You can manually create a database and its extensions on workers. CREATE EXTENSION citus; CREATE SCHEMA test; :create_function_test_maintenance_worker +WARNING: Citus can't distribute functions having dependency on unsupported object of type "view" +DETAIL: Function will be created only locally -- see that the daemon started SELECT datname, current_database(), usename, (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') diff --git a/src/test/regress/expected/multi_function_in_join.out b/src/test/regress/expected/multi_function_in_join.out index c80b26c57..7d62e286b 100644 --- a/src/test/regress/expected/multi_function_in_join.out +++ b/src/test/regress/expected/multi_function_in_join.out @@ -39,14 +39,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL; -SELECT create_distributed_function('add(integer,integer)'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC; id | data | sum --------------------------------------------------------------------- @@ -60,7 +54,10 @@ BEGIN RETURN i + 1; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN increment(2) val ON (id = val) ORDER BY id ASC; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT val FROM functions_in_joins.increment(2) val(val) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, val.val FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.val FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(val integer)) val ON ((table1.id OPERATOR(pg_catalog.=) val.val))) ORDER BY table1.id id | data | val @@ -69,6 +66,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta (1 row) -- a function that returns a set of integers +-- Block distributing function as we have tests below to test it locally +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION next_k_integers(IN first_value INTEGER, IN k INTEGER DEFAULT 3, OUT result INTEGER) @@ -77,6 +76,7 @@ BEGIN RETURN QUERY SELECT x FROM generate_series(first_value, first_value+k-1) f(x); END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT * FROM table1 JOIN next_k_integers(3,2) next_integers ON (id = next_integers.result) ORDER BY id ASC; @@ -93,7 +93,10 @@ CREATE FUNCTION get_set_of_records() RETURNS SETOF RECORD AS $cmd$ SELECT x, x+1 FROM generate_series(0,4) f(x) $cmd$ LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN get_set_of_records() AS t2(x int, y int) ON (id = x) ORDER BY id ASC; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM functions_in_joins.get_set_of_records() t2(x integer, y integer) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, t2.x, t2.y FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) t2 ON ((table1.id OPERATOR(pg_catalog.=) t2.x))) ORDER BY table1.id id | data | x | y @@ -108,7 +111,10 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta CREATE FUNCTION dup(int) RETURNS TABLE(f1 int, f2 text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT f.* FROM table1 t JOIN dup(32) f ON (f1 = id); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT f1, f2 FROM functions_in_joins.dup(32) f(f1, f2) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FROM (functions_in_joins.table1 t JOIN (SELECT intermediate_result.f1, intermediate_result.f2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(f1 integer, f2 text)) f ON ((f.f1 OPERATOR(pg_catalog.=) t.id))) f1 | f2 @@ -119,7 +125,10 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FR -- a stable function CREATE OR REPLACE FUNCTION the_minimum_id() RETURNS INTEGER STABLE AS 'SELECT min(id) FROM table1' LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN the_minimum_id() min_id ON (id = min_id); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT min_id FROM functions_in_joins.the_minimum_id() min_id(min_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, min_id.min_id FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.min_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min_id integer)) min_id ON ((table1.id OPERATOR(pg_catalog.=) min_id.min_id))) id | data | min_id @@ -180,7 +189,10 @@ begin return result; end; $$ language plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN max_and_min() m ON (m.maximum = data OR m.minimum = data) ORDER BY 1,2,3,4; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT minimum, maximum FROM functions_in_joins.max_and_min() m(minimum, maximum) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, m.minimum, m.maximum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.minimum, intermediate_result.maximum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(minimum integer, maximum integer)) m ON (((m.maximum OPERATOR(pg_catalog.=) table1.data) OR (m.minimum OPERATOR(pg_catalog.=) table1.data)))) ORDER BY table1.id, table1.data, m.minimum, m.maximum id | data | minimum | maximum @@ -198,6 +210,7 @@ SET client_min_messages TO ERROR; -- function joins in CTE results can create lateral joins that are not supported -- we execute the query within a function to consolidate the error messages -- between different executors +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION raise_failed_execution_func_join(query text) RETURNS void AS $$ BEGIN EXECUTE query; @@ -209,6 +222,7 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ WITH one_row AS ( SELECT * FROM table1 WHERE id=52 @@ -219,8 +233,10 @@ SELECT raise_failed_execution_func_join($$ $$); ERROR: Task failed to execute -- a user-defined immutable function +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION the_answer_to_life() RETURNS INTEGER IMMUTABLE AS 'SELECT 42' LANGUAGE SQL; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ SELECT * FROM table1 JOIN the_answer_to_life() the_answer ON (id = the_answer); $$); diff --git a/src/test/regress/expected/multi_function_in_join_0.out b/src/test/regress/expected/multi_function_in_join_0.out index 5b818855b..5f2bd70c7 100644 --- a/src/test/regress/expected/multi_function_in_join_0.out +++ b/src/test/regress/expected/multi_function_in_join_0.out @@ -39,14 +39,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL; -SELECT create_distributed_function('add(integer,integer)'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands - create_distributed_function ---------------------------------------------------------------------- - -(1 row) - SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC; DEBUG: generating subplan XXX_1 for subquery SELECT sum FROM functions_in_joins.add(3, 5) sum(sum) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, sum.sum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.sum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(sum integer)) sum ON ((table1.id OPERATOR(pg_catalog.=) sum.sum))) ORDER BY table1.id @@ -62,7 +56,10 @@ BEGIN RETURN i + 1; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN increment(2) val ON (id = val) ORDER BY id ASC; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT val FROM functions_in_joins.increment(2) val(val) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, val.val FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.val FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(val integer)) val ON ((table1.id OPERATOR(pg_catalog.=) val.val))) ORDER BY table1.id id | data | val @@ -71,6 +68,8 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta (1 row) -- a function that returns a set of integers +-- Block distributing function as we have tests below to test it locally +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION next_k_integers(IN first_value INTEGER, IN k INTEGER DEFAULT 3, OUT result INTEGER) @@ -79,6 +78,7 @@ BEGIN RETURN QUERY SELECT x FROM generate_series(first_value, first_value+k-1) f(x); END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT * FROM table1 JOIN next_k_integers(3,2) next_integers ON (id = next_integers.result) ORDER BY id ASC; @@ -95,7 +95,10 @@ CREATE FUNCTION get_set_of_records() RETURNS SETOF RECORD AS $cmd$ SELECT x, x+1 FROM generate_series(0,4) f(x) $cmd$ LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN get_set_of_records() AS t2(x int, y int) ON (id = x) ORDER BY id ASC; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT x, y FROM functions_in_joins.get_set_of_records() t2(x integer, y integer) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, t2.x, t2.y FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.x, intermediate_result.y FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(x integer, y integer)) t2 ON ((table1.id OPERATOR(pg_catalog.=) t2.x))) ORDER BY table1.id id | data | x | y @@ -110,7 +113,10 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, ta CREATE FUNCTION dup(int) RETURNS TABLE(f1 int, f2 text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT f.* FROM table1 t JOIN dup(32) f ON (f1 = id); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT f1, f2 FROM functions_in_joins.dup(32) f(f1, f2) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FROM (functions_in_joins.table1 t JOIN (SELECT intermediate_result.f1, intermediate_result.f2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(f1 integer, f2 text)) f ON ((f.f1 OPERATOR(pg_catalog.=) t.id))) f1 | f2 @@ -121,7 +127,10 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT f.f1, f.f2 FR -- a stable function CREATE OR REPLACE FUNCTION the_minimum_id() RETURNS INTEGER STABLE AS 'SELECT min(id) FROM table1' LANGUAGE SQL; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN the_minimum_id() min_id ON (id = min_id); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT min_id FROM functions_in_joins.the_minimum_id() min_id(min_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, min_id.min_id FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.min_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(min_id integer)) min_id ON ((table1.id OPERATOR(pg_catalog.=) min_id.min_id))) id | data | min_id @@ -182,7 +191,10 @@ begin return result; end; $$ language plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM table1 JOIN max_and_min() m ON (m.maximum = data OR m.minimum = data) ORDER BY 1,2,3,4; +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT minimum, maximum FROM functions_in_joins.max_and_min() m(minimum, maximum) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT table1.id, table1.data, m.minimum, m.maximum FROM (functions_in_joins.table1 JOIN (SELECT intermediate_result.minimum, intermediate_result.maximum FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(minimum integer, maximum integer)) m ON (((m.maximum OPERATOR(pg_catalog.=) table1.data) OR (m.minimum OPERATOR(pg_catalog.=) table1.data)))) ORDER BY table1.id, table1.data, m.minimum, m.maximum id | data | minimum | maximum @@ -200,6 +212,7 @@ SET client_min_messages TO ERROR; -- function joins in CTE results can create lateral joins that are not supported -- we execute the query within a function to consolidate the error messages -- between different executors +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION raise_failed_execution_func_join(query text) RETURNS void AS $$ BEGIN EXECUTE query; @@ -211,6 +224,7 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ WITH one_row AS ( SELECT * FROM table1 WHERE id=52 @@ -221,8 +235,10 @@ SELECT raise_failed_execution_func_join($$ $$); ERROR: Task failed to execute -- a user-defined immutable function +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION the_answer_to_life() RETURNS INTEGER IMMUTABLE AS 'SELECT 42' LANGUAGE SQL; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ SELECT * FROM table1 JOIN the_answer_to_life() the_answer ON (id = the_answer); $$); diff --git a/src/test/regress/expected/multi_insert_select.out b/src/test/regress/expected/multi_insert_select.out index 99c5e8e56..c7679d02e 100644 --- a/src/test/regress/expected/multi_insert_select.out +++ b/src/test/regress/expected/multi_insert_select.out @@ -127,6 +127,7 @@ WHERE user_id < 0; NOTICE: evaluating on master -- make sure we don't evaluate stable functions with column arguments +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION evaluate_on_master(x int) RETURNS int LANGUAGE plpgsql STABLE AS $function$ @@ -135,6 +136,7 @@ BEGIN RETURN x; END; $function$; +RESET citus.enable_metadata_sync; INSERT INTO raw_events_second (user_id, value_1) SELECT user_id, evaluate_on_master(value_1) diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 5680b73c3..be923eee3 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -26,10 +26,12 @@ ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000; SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset -- Create the necessary test utility function +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION activate_node_snapshot() RETURNS text[] LANGUAGE C STRICT AS 'citus'; +RESET citus.enable_metadata_sync; COMMENT ON FUNCTION activate_node_snapshot() IS 'commands to activate node snapshot'; -- Show that none of the existing tables are qualified to be MX tables @@ -89,12 +91,14 @@ SELECT unnest(activate_node_snapshot()) order by 1; (27 rows) -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) RETURNS void LANGUAGE C STRICT AS 'citus', $$master_create_distributed_table$$; +RESET citus.enable_metadata_sync; COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) diff --git a/src/test/regress/expected/multi_modifications.out b/src/test/regress/expected/multi_modifications.out index c6323c793..5b5764593 100644 --- a/src/test/regress/expected/multi_modifications.out +++ b/src/test/regress/expected/multi_modifications.out @@ -467,13 +467,6 @@ UPDATE limit_orders SET placed_at = LEAST(placed_at, now()::timestamp) WHERE id UPDATE limit_orders SET array_of_values = 1 || array_of_values WHERE id = 246; CREATE FUNCTION immutable_append(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -\c - - - :worker_1_port -CREATE FUNCTION immutable_append(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -\c - - - :worker_2_port -CREATE FUNCTION immutable_append(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -\c - - - :master_port -- immutable function calls with vars are also allowed UPDATE limit_orders SET array_of_values = immutable_append(array_of_values, 2) WHERE id = 246; diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index 226529040..47b113378 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -329,22 +329,15 @@ SELECT count(*) FROM pg_dist_transaction; (1 row) -- create a check function -SELECT * from run_command_on_workers('CREATE FUNCTION reject_large_id() RETURNS trigger AS $rli$ +CREATE FUNCTION reject_large_id() RETURNS trigger AS $rli$ BEGIN IF (NEW.id > 30) THEN - RAISE ''illegal value''; + RAISE 'illegal value'; END IF; RETURN NEW; END; -$rli$ LANGUAGE plpgsql;') -ORDER BY nodeport; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | CREATE FUNCTION - localhost | 57638 | t | CREATE FUNCTION -(2 rows) - +$rli$ LANGUAGE plpgsql; -- register after insert trigger SELECT * FROM run_command_on_placements('researchers', 'CREATE CONSTRAINT TRIGGER reject_large_researcher_id AFTER INSERT ON %s DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_large_id()') ORDER BY nodeport, shardid; @@ -418,14 +411,7 @@ ORDER BY nodeport, shardid; localhost | 57638 | 1200001 | t | DROP TRIGGER (4 rows) -SELECT * FROM run_command_on_workers('drop function reject_large_id()') -ORDER BY nodeport; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | DROP FUNCTION - localhost | 57638 | t | DROP FUNCTION -(2 rows) - +DROP FUNCTION reject_large_id(); -- ALTER and copy are compatible BEGIN; ALTER TABLE labs ADD COLUMN motto text; @@ -522,6 +508,7 @@ AND s.logicalrelid = 'objects'::regclass; -- create trigger on one worker to reject certain values \c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -531,6 +518,7 @@ CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON objects_1200003 DEFERRABLE INITIALLY IMMEDIATE @@ -573,6 +561,7 @@ DELETE FROM objects; -- there cannot be errors on different shards at different times -- because the first failure will fail the whole transaction \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -582,6 +571,7 @@ CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON labs_1200002 DEFERRABLE INITIALLY IMMEDIATE @@ -850,6 +840,7 @@ SELECT * FROM reference_modifying_xacts; -- lets fail on of the workers at before the commit time \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 999) THEN @@ -859,6 +850,7 @@ CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_reference AFTER INSERT ON reference_modifying_xacts_1200006 DEFERRABLE INITIALLY IMMEDIATE @@ -938,6 +930,7 @@ INSERT INTO hash_modifying_xacts VALUES (2, 2); ABORT; -- lets fail one of the workers before COMMIT time for the hash table \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 997) THEN @@ -947,6 +940,7 @@ CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_hash AFTER INSERT ON hash_modifying_xacts_1200007 DEFERRABLE INITIALLY IMMEDIATE diff --git a/src/test/regress/expected/multi_multiuser.out b/src/test/regress/expected/multi_multiuser.out index 2c7be4c90..0e25a38d2 100644 --- a/src/test/regress/expected/multi_multiuser.out +++ b/src/test/regress/expected/multi_multiuser.out @@ -622,10 +622,12 @@ ERROR: permission denied for function worker_cleanup_job_schema_cache RESET ROLE; -- to test access to files created during repartition we will create some on worker 1 \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION citus_rm_job_directory(bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; SET ROLE full_access; SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS a', 'a', 23, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); worker_hash_partition_table @@ -636,10 +638,12 @@ SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS RESET ROLE; -- all attempts for transfer are initiated from other workers \c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION citus_rm_job_directory(bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; -- super user should not be able to copy files created by a user SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); WARNING: could not open file "base/pgsql_job_cache/job_0042/task_000001/p_00001.xxxx": No such file or directory diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index 045563d6f..ff6debaf9 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -139,6 +139,8 @@ BEGIN INSERT INTO mx_add_coordinator.ref(groupid) VALUES (gid); END; $$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('my_group_id()', colocate_with := 'ref'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands diff --git a/src/test/regress/expected/multi_mx_call.out b/src/test/regress/expected/multi_mx_call.out index 37c989885..5d440e67b 100644 --- a/src/test/regress/expected/multi_mx_call.out +++ b/src/test/regress/expected/multi_mx_call.out @@ -402,6 +402,7 @@ select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass -- Test that we handle transactional constructs correctly inside a procedure -- that is routed to the workers. +SET citus.enable_metadata_sync TO OFF; CREATE PROCEDURE mx_call_proc_tx(x int) LANGUAGE plpgsql AS $$ BEGIN INSERT INTO multi_mx_call.mx_call_dist_table_1 VALUES (x, -1), (x+1, 4); @@ -411,6 +412,7 @@ BEGIN -- Now do the final update! UPDATE multi_mx_call.mx_call_dist_table_1 SET val = val-1 WHERE id >= x; END;$$; +RESET citus.enable_metadata_sync; -- before distribution ... CALL multi_mx_call.mx_call_proc_tx(10); -- after distribution ... @@ -486,6 +488,8 @@ BEGIN RAISE WARNING 'warning'; RAISE EXCEPTION 'error'; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands select create_distributed_function('mx_call_proc_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands @@ -552,6 +556,8 @@ SET client_min_messages TO DEBUG1; -- CREATE FUNCTION mx_call_add(int, int) RETURNS int AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('mx_call_add(int,int)'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands diff --git a/src/test/regress/expected/multi_mx_call_0.out b/src/test/regress/expected/multi_mx_call_0.out index 6f33d4dae..474d3a637 100644 --- a/src/test/regress/expected/multi_mx_call_0.out +++ b/src/test/regress/expected/multi_mx_call_0.out @@ -402,6 +402,7 @@ select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass -- Test that we handle transactional constructs correctly inside a procedure -- that is routed to the workers. +SET citus.enable_metadata_sync TO OFF; CREATE PROCEDURE mx_call_proc_tx(x int) LANGUAGE plpgsql AS $$ BEGIN INSERT INTO multi_mx_call.mx_call_dist_table_1 VALUES (x, -1), (x+1, 4); @@ -411,6 +412,7 @@ BEGIN -- Now do the final update! UPDATE multi_mx_call.mx_call_dist_table_1 SET val = val-1 WHERE id >= x; END;$$; +RESET citus.enable_metadata_sync; -- before distribution ... CALL multi_mx_call.mx_call_proc_tx(10); -- after distribution ... @@ -486,6 +488,8 @@ BEGIN RAISE WARNING 'warning'; RAISE EXCEPTION 'error'; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands select create_distributed_function('mx_call_proc_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands @@ -552,6 +556,8 @@ SET client_min_messages TO DEBUG1; -- CREATE FUNCTION mx_call_add(int, int) RETURNS int AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('mx_call_add(int,int)'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index ef67a626e..6036bd325 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -63,30 +63,7 @@ CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. --- create UDFs in worker node -CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO citus_mx_test_schema; -CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, @@ -98,30 +75,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( ); -- now create required stuff in the worker 2 \c - - - :worker_2_port --- create UDF -CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO citus_mx_test_schema; -CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, diff --git a/src/test/regress/expected/multi_mx_explain.out b/src/test/regress/expected/multi_mx_explain.out index 2c58dd003..1c585a027 100644 --- a/src/test/regress/expected/multi_mx_explain.out +++ b/src/test/regress/expected/multi_mx_explain.out @@ -13,6 +13,7 @@ VACUUM ANALYZE customer_mx; VACUUM ANALYZE supplier_mx; \c - - - :worker_1_port -- Function that parses explain output as JSON +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ @@ -36,6 +37,7 @@ END; $BODY$ LANGUAGE plpgsql; \c - - - :worker_2_port -- Function that parses explain output as JSON +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ @@ -57,6 +59,7 @@ BEGIN RETURN result; END; $BODY$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx diff --git a/src/test/regress/expected/multi_mx_function_call_delegation.out b/src/test/regress/expected/multi_mx_function_call_delegation.out index 954c23420..552af7bf5 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation.out @@ -409,8 +409,11 @@ BEGIN WHERE id >= x ORDER BY 1, 2; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- before distribution ... select mx_call_func_tbl(10); +DEBUG: function does not have co-located tables mx_call_func_tbl --------------------------------------------------------------------- (10,-1) @@ -441,6 +444,8 @@ BEGIN RAISE WARNING 'warning'; RAISE EXCEPTION 'error'; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands select create_distributed_function('mx_call_func_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands @@ -593,6 +598,8 @@ SET citus.shard_replication_factor = 1; -- CREATE FUNCTION mx_call_add(int, int) RETURNS int AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('mx_call_add(int,int)', '$1'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands diff --git a/src/test/regress/expected/multi_mx_function_call_delegation_0.out b/src/test/regress/expected/multi_mx_function_call_delegation_0.out index 66c0f89d2..cab2f6394 100644 --- a/src/test/regress/expected/multi_mx_function_call_delegation_0.out +++ b/src/test/regress/expected/multi_mx_function_call_delegation_0.out @@ -409,8 +409,11 @@ BEGIN WHERE id >= x ORDER BY 1, 2; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- before distribution ... select mx_call_func_tbl(10); +DEBUG: function does not have co-located tables mx_call_func_tbl --------------------------------------------------------------------- (10,-1) @@ -441,6 +444,8 @@ BEGIN RAISE WARNING 'warning'; RAISE EXCEPTION 'error'; END;$$; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands select create_distributed_function('mx_call_func_raise(int)', '$1', 'mx_call_dist_table_1'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands @@ -593,6 +598,8 @@ SET citus.shard_replication_factor = 1; -- CREATE FUNCTION mx_call_add(int, int) RETURNS int AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT create_distributed_function('mx_call_add(int,int)', '$1'); DEBUG: switching to sequential query execution mode DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands diff --git a/src/test/regress/expected/multi_mx_metadata.out b/src/test/regress/expected/multi_mx_metadata.out index dc2365dda..a35b8edb1 100644 --- a/src/test/regress/expected/multi_mx_metadata.out +++ b/src/test/regress/expected/multi_mx_metadata.out @@ -344,16 +344,6 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx (3 rows) \c - no_access_mx - :worker_1_port --- see the comment in the top of the file -CREATE OR REPLACE FUNCTION raise_failed_aclcheck(query text) RETURNS void AS $$ -BEGIN - EXECUTE query; - EXCEPTION WHEN OTHERS THEN - IF SQLERRM LIKE 'must be owner of%' THEN - RAISE 'must be owner of the object'; - END IF; -END; -$$LANGUAGE plpgsql; SELECT raise_failed_aclcheck($$ DROP TABLE distributed_mx_table; $$); diff --git a/src/test/regress/expected/multi_mx_modifications.out b/src/test/regress/expected/multi_mx_modifications.out index c290c4e5a..276766c30 100644 --- a/src/test/regress/expected/multi_mx_modifications.out +++ b/src/test/regress/expected/multi_mx_modifications.out @@ -266,6 +266,7 @@ UPDATE limit_orders_mx SET array_of_values = 1 || array_of_values WHERE id = 246 -- immutable function calls with vars are also allowed UPDATE limit_orders_mx SET array_of_values = immutable_append_mx(array_of_values, 2) WHERE id = 246; +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION stable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$ LANGUAGE plpgsql STABLE; @@ -282,6 +283,7 @@ SELECT array_of_values FROM limit_orders_mx WHERE id = 246; -- STRICT functions work as expected CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS 'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT; +RESET citus.enable_metadata_sync; UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246; ERROR: null value in column "bidder_id" violates not-null constraint SELECT array_of_values FROM limit_orders_mx WHERE id = 246; diff --git a/src/test/regress/expected/multi_mx_modifying_xacts.out b/src/test/regress/expected/multi_mx_modifying_xacts.out index 8c08673b2..e486b8b1b 100644 --- a/src/test/regress/expected/multi_mx_modifying_xacts.out +++ b/src/test/regress/expected/multi_mx_modifying_xacts.out @@ -227,6 +227,7 @@ SELECT * FROM objects_mx WHERE id = 1; -- create trigger on one worker to reject certain values \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -236,6 +237,7 @@ CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON objects_mx_1220103 DEFERRABLE INITIALLY IMMEDIATE diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index d46f49caf..a4f49f320 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -829,10 +829,12 @@ SELECT master_update_node(nodeid, 'localhost', 12345) FROM pg_dist_node; (1 row) +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION trigger_metadata_sync() RETURNS void LANGUAGE C STRICT AS 'citus'; +RESET citus.enable_metadata_sync; SELECT trigger_metadata_sync(); trigger_metadata_sync --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out index 114e55a0f..c9357e954 100644 --- a/src/test/regress/expected/multi_mx_repartition_udt_prepare.out +++ b/src/test/regress/expected/multi_mx_repartition_udt_prepare.out @@ -52,13 +52,7 @@ CREATE TABLE repartition_udt_other ( -- so that the OID is off. \c - - - :worker_1_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -68,13 +62,6 @@ CREATE OPERATOR = ( ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 @@ -87,13 +74,7 @@ FUNCTION 1 test_udt_hash(test_udt); -- END type creation \c - - - :worker_2_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -103,13 +84,6 @@ CREATE OPERATOR = ( ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index 4dcbac57e..d006b4bb8 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -1363,6 +1363,7 @@ DEBUG: query has a single distribution column value: 1 (5 rows) -- queries inside plpgsql functions could be router plannable +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$ DECLARE max_id integer; @@ -1418,6 +1419,7 @@ PL/pgSQL function author_articles_id_word_count() line XX at RETURN QUERY 41 | 11814 (5 rows) +RESET citus.enable_metadata_sync; -- materialized views can be created for router plannable queries CREATE MATERIALIZED VIEW mv_articles_hash_mx AS SELECT * FROM articles_hash_mx WHERE author_id = 1; diff --git a/src/test/regress/expected/multi_mx_schema_support.out b/src/test/regress/expected/multi_mx_schema_support.out index 71dfde3fd..6c7173c1f 100644 --- a/src/test/regress/expected/multi_mx_schema_support.out +++ b/src/test/regress/expected/multi_mx_schema_support.out @@ -515,12 +515,30 @@ SELECT table_schema AS "Shards' Schema" mx_new_schema (1 row) +-- check that we can drop a user-defined schema from workers +SET citus.enable_ddl_propagation TO OFF; +CREATE SCHEMA localschema; +RESET citus.enable_ddl_propagation; +DROP SCHEMA localschema; \c - - - :master_port SELECT * FROM mx_new_schema.table_set_schema; id --------------------------------------------------------------------- (0 rows) +-- verify local schema does not exist on the worker +-- worker errors out as "schema does not exist" +SET citus.enable_ddl_propagation TO OFF; +CREATE SCHEMA localschema; +-- should error out +SELECT run_command_on_workers($$DROP SCHEMA localschema;$$); + run_command_on_workers +--------------------------------------------------------------------- + (localhost,57637,f,"ERROR: schema ""localschema"" does not exist") + (localhost,57638,f,"ERROR: schema ""localschema"" does not exist") +(2 rows) + +RESET citus.enable_ddl_propagation; DROP SCHEMA mx_old_schema CASCADE; DROP SCHEMA mx_new_schema CASCADE; NOTICE: drop cascades to table mx_new_schema.table_set_schema diff --git a/src/test/regress/expected/multi_name_lengths.out b/src/test/regress/expected/multi_name_lengths.out index e100ba7d8..e3cbca911 100644 --- a/src/test/regress/expected/multi_name_lengths.out +++ b/src/test/regress/expected/multi_name_lengths.out @@ -4,6 +4,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000; SET citus.shard_count TO 2; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -20,6 +21,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; -- Verify that a table name > 56 characters gets hashed properly. CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, diff --git a/src/test/regress/expected/multi_prepare_sql.out b/src/test/regress/expected/multi_prepare_sql.out index 4f30bc716..6681f4112 100644 --- a/src/test/regress/expected/multi_prepare_sql.out +++ b/src/test/regress/expected/multi_prepare_sql.out @@ -1132,10 +1132,6 @@ DROP TABLE http_request; -- (re-)planning, but not when executing. -- first create helper function CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; -\c - - - :worker_1_port -CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; -\c - - - :worker_2_port -CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; \c - - - :master_port -- test table CREATE TABLE test_table (test_id integer NOT NULL, data text); diff --git a/src/test/regress/expected/multi_repartition_udt.out b/src/test/regress/expected/multi_repartition_udt.out index c37bbba02..ee6ac8800 100644 --- a/src/test/regress/expected/multi_repartition_udt.out +++ b/src/test/regress/expected/multi_repartition_udt.out @@ -53,13 +53,7 @@ CREATE TABLE repartition_udt_other ( -- so that the OID is off. \c - - :public_worker_1_host :worker_1_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -69,13 +63,6 @@ CREATE OPERATOR = ( ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 @@ -88,13 +75,7 @@ FUNCTION 1 test_udt_hash(test_udt); -- END type creation \c - - :public_worker_2_host :worker_2_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -104,13 +85,6 @@ CREATE OPERATOR = ( ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 diff --git a/src/test/regress/expected/multi_repartitioned_subquery_udf.out b/src/test/regress/expected/multi_repartitioned_subquery_udf.out index 0048bb7eb..f6017d2ea 100644 --- a/src/test/regress/expected/multi_repartitioned_subquery_udf.out +++ b/src/test/regress/expected/multi_repartitioned_subquery_udf.out @@ -13,28 +13,6 @@ LANGUAGE sql IMMUTABLE AS $_$ ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; -\c - - :public_worker_1_host :worker_1_port -DROP FUNCTION IF EXISTS median(double precision[]); -NOTICE: function median(pg_catalog.float8[]) does not exist, skipping -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; -$_$; -\c - - :public_worker_2_host :worker_2_port -DROP FUNCTION IF EXISTS median(double precision[]); -NOTICE: function median(pg_catalog.float8[]) does not exist, skipping -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; -$_$; --- Run query on master -\c - - :master_host :master_port SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*) FROM lineitem GROUP BY l_partkey) AS a WHERE median > 2; diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index 405659544..cc503f766 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -1592,10 +1592,13 @@ BEGIN RETURN QUERY SELECT 1; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT 1 FROM authors_reference r JOIN ( SELECT s.datid FROM number1() s LEFT JOIN pg_database d ON s.datid = d.oid ) num_db ON (r.id = num_db.datid) LIMIT 1; DEBUG: found no worker with all shard placements +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT datid FROM public.number1() s(datid) DEBUG: Creating router plan DEBUG: generating subplan XXX_2 for subquery SELECT s.datid FROM ((SELECT intermediate_result.datid FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) s LEFT JOIN pg_database d ON (((s.datid)::oid OPERATOR(pg_catalog.=) d.oid))) @@ -1610,6 +1613,7 @@ CREATE VIEW num_db AS SELECT s.datid FROM number1() s LEFT JOIN pg_database d ON s.datid = d.oid; SELECT 1 FROM authors_reference r JOIN num_db ON (r.id = num_db.datid) LIMIT 1; DEBUG: found no worker with all shard placements +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT datid FROM public.number1() s(datid) DEBUG: Creating router plan DEBUG: generating subplan XXX_2 for subquery SELECT s.datid FROM ((SELECT intermediate_result.datid FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(datid integer)) s LEFT JOIN pg_database d ON (((s.datid)::oid OPERATOR(pg_catalog.=) d.oid))) @@ -2014,6 +2018,8 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SET client_min_messages TO ERROR; \set VERBOSITY terse SELECT raise_failed_execution_router($$ @@ -2307,7 +2313,10 @@ BEGIN return max_id; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables DEBUG: Creating router plan CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1" @@ -2332,7 +2341,10 @@ BEGIN END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands SELECT * FROM author_articles_id_word_count() ORDER BY 1; +DEBUG: function does not have co-located tables DEBUG: Creating router plan CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash ah diff --git a/src/test/regress/expected/multi_router_planner_fast_path.out b/src/test/regress/expected/multi_router_planner_fast_path.out index eb4a91f7e..8fb8917ab 100644 --- a/src/test/regress/expected/multi_router_planner_fast_path.out +++ b/src/test/regress/expected/multi_router_planner_fast_path.out @@ -1225,6 +1225,7 @@ DEBUG: query has a single distribution column value: 1 (0 rows) -- create a dummy function to be used in filtering +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION someDummyFunction(regclass) RETURNS text AS $$ @@ -1247,6 +1248,7 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SET client_min_messages TO ERROR; \set VERBOSITY terse -- fast path router plannable, but errors @@ -1611,11 +1613,14 @@ BEGIN return max_id; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- we don't want too many details. though we're omitting -- "DETAIL: distribution column value:", we see it acceptable -- since the query results verifies the correctness \set VERBOSITY terse SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: query has a single distribution column value: 1 @@ -1625,30 +1630,35 @@ DEBUG: query has a single distribution column value: 1 (1 row) SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables author_articles_max_id --------------------------------------------------------------------- 41 (1 row) SELECT author_articles_max_id(); +DEBUG: function does not have co-located tables author_articles_max_id --------------------------------------------------------------------- 41 @@ -1665,7 +1675,9 @@ BEGIN return max_id; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1674,6 +1686,7 @@ DEBUG: Creating router plan (1 row) SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1682,6 +1695,7 @@ DEBUG: Creating router plan (1 row) SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1690,6 +1704,7 @@ DEBUG: Creating router plan (1 row) SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1698,6 +1713,7 @@ DEBUG: Creating router plan (1 row) SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1706,6 +1722,7 @@ DEBUG: Creating router plan (1 row) SELECT author_articles_max_id(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan author_articles_max_id @@ -1724,7 +1741,9 @@ BEGIN END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan DEBUG: query has a single distribution column value: 1 @@ -1738,6 +1757,7 @@ DEBUG: query has a single distribution column value: 1 (5 rows) SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables id | word_count --------------------------------------------------------------------- 1 | 9572 @@ -1748,6 +1768,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables id | word_count --------------------------------------------------------------------- 1 | 9572 @@ -1758,6 +1779,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables id | word_count --------------------------------------------------------------------- 1 | 9572 @@ -1768,6 +1790,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables id | word_count --------------------------------------------------------------------- 1 | 9572 @@ -1778,6 +1801,7 @@ SELECT * FROM author_articles_id_word_count(); (5 rows) SELECT * FROM author_articles_id_word_count(); +DEBUG: function does not have co-located tables id | word_count --------------------------------------------------------------------- 1 | 9572 @@ -1798,7 +1822,9 @@ BEGIN END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count @@ -1811,6 +1837,7 @@ DEBUG: Creating router plan (5 rows) SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count @@ -1823,6 +1850,7 @@ DEBUG: Creating router plan (5 rows) SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count @@ -1835,6 +1863,7 @@ DEBUG: Creating router plan (5 rows) SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count @@ -1847,6 +1876,7 @@ DEBUG: Creating router plan (5 rows) SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count @@ -1859,6 +1889,7 @@ DEBUG: Creating router plan (5 rows) SELECT * FROM author_articles_id_word_count(1); +DEBUG: function does not have co-located tables DEBUG: Deferred pruning for a fast-path router query DEBUG: Creating router plan id | word_count diff --git a/src/test/regress/expected/multi_schema_support.out b/src/test/regress/expected/multi_schema_support.out index 6e3ac0dc8..eb1c47401 100644 --- a/src/test/regress/expected/multi_schema_support.out +++ b/src/test/regress/expected/multi_schema_support.out @@ -194,31 +194,6 @@ BEGIN END; $$ LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 1 -\c - - - :worker_1_port -CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 2 -\c - - - :worker_2_port -CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; -\c - - - :master_port -- UDF in public, table in a schema other than public, search_path is not set SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction @@ -260,33 +235,6 @@ BEGIN END; $$ LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 1 in schema -\c - - - :worker_1_port -SET search_path TO test_schema_support; -CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 2 in schema -\c - - - :worker_2_port -SET search_path TO test_schema_support; -CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; -\c - - - :master_port -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; diff --git a/src/test/regress/expected/multi_subquery_behavioral_analytics.out b/src/test/regress/expected/multi_subquery_behavioral_analytics.out index e357132b4..9284b7f92 100644 --- a/src/test/regress/expected/multi_subquery_behavioral_analytics.out +++ b/src/test/regress/expected/multi_subquery_behavioral_analytics.out @@ -1563,22 +1563,8 @@ LIMIT 10; (10 rows) -- this is one complex join query derived from a user's production query --- first declare the function on workers on master +-- declare the function on workers and master -- With array_index: -SELECT * FROM run_command_on_workers('CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) - RETURNS INT AS $$ - SELECT i - FROM (SELECT generate_series(array_lower($1, 1), array_upper($1, 1))) g(i) - WHERE $1 [i] = $2 - LIMIT 1; - $$ LANGUAGE sql') -ORDER BY 1,2; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | CREATE FUNCTION - localhost | 57638 | t | CREATE FUNCTION -(2 rows) - CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) RETURNS INT AS $$ SELECT i @@ -1745,14 +1731,6 @@ LIMIT 10; (10 rows) -- drop created functions -SELECT * FROM run_command_on_workers('DROP FUNCTION array_index(ANYARRAY, ANYELEMENT)') -ORDER BY 1,2; - nodename | nodeport | success | result ---------------------------------------------------------------------- - localhost | 57637 | t | DROP FUNCTION - localhost | 57638 | t | DROP FUNCTION -(2 rows) - DROP FUNCTION array_index(ANYARRAY, ANYELEMENT); -- a query with a constant subquery SELECT count(*) as subquery_count @@ -1883,6 +1861,8 @@ CREATE OR REPLACE FUNCTION volatile_func_test() RETURNS INT AS $$ SELECT 1; $$ LANGUAGE sql VOLATILE; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands -- Citus should be able to evalute functions/row comparisons on the LIMIT/OFFSET SELECT user_id, array_length(events_table, 1) FROM ( @@ -1987,21 +1967,6 @@ CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -SELECT run_command_on_workers($f$ - -CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool - AS 'select $1 > $2;' - LANGUAGE SQL - IMMUTABLE - RETURNS NULL ON NULL INPUT; - -$f$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") - (localhost,57638,t,"CREATE FUNCTION") -(2 rows) - -- we don't support joins via functions SELECT user_id, array_length(events_table, 1) FROM ( @@ -2285,16 +2250,5 @@ LIMIT 1; (1 row) DROP FUNCTION test_join_function_2(integer, integer); -SELECT run_command_on_workers($f$ - - DROP FUNCTION test_join_function_2(integer, integer); - -$f$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"DROP FUNCTION") - (localhost,57638,t,"DROP FUNCTION") -(2 rows) - SET citus.enable_router_execution TO TRUE; SET citus.subquery_pushdown to OFF; diff --git a/src/test/regress/expected/multi_test_helpers_superuser.out b/src/test/regress/expected/multi_test_helpers_superuser.out index 238ed9f2d..4bd8b941d 100644 --- a/src/test/regress/expected/multi_test_helpers_superuser.out +++ b/src/test/regress/expected/multi_test_helpers_superuser.out @@ -38,6 +38,7 @@ SELECT dist_node_check.matches AND dist_placement_check.matches FROM dist_node_check CROSS JOIN dist_placement_check $$; -- partition_task_list_results tests the internal PartitionTasklistResults function +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix text, query text, target_table regclass, @@ -63,3 +64,4 @@ BEGIN PERFORM public.wait_until_metadata_sync(30000); END; $function$; +RESET citus.enable_metadata_sync; diff --git a/src/test/regress/expected/multi_transactional_drop_shards.out b/src/test/regress/expected/multi_transactional_drop_shards.out index 5d59069a3..fd06e5d14 100644 --- a/src/test/regress/expected/multi_transactional_drop_shards.out +++ b/src/test/regress/expected/multi_transactional_drop_shards.out @@ -292,11 +292,13 @@ SET citus.override_table_visibility TO false; (1 row) -- test DROP table with failing worker +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION fail_drop_table() RETURNS event_trigger AS $fdt$ BEGIN RAISE 'illegal value'; END; $fdt$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER fail_drop_table ON sql_drop EXECUTE PROCEDURE fail_drop_table(); \c - - - :master_port \set VERBOSITY terse diff --git a/src/test/regress/expected/multi_unsupported_worker_operations.out b/src/test/regress/expected/multi_unsupported_worker_operations.out index 2e412a4a8..d54e5e84b 100644 --- a/src/test/regress/expected/multi_unsupported_worker_operations.out +++ b/src/test/regress/expected/multi_unsupported_worker_operations.out @@ -62,6 +62,7 @@ SELECT * FROM mx_table ORDER BY col_1; -- Try commands from metadata worker \c - - - :worker_1_port -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -78,6 +79,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE mx_table_worker(col_1 text); -- master_create_distributed_table SELECT master_create_distributed_table('mx_table_worker', 'col_1', 'hash'); diff --git a/src/test/regress/expected/non_colocated_leaf_subquery_joins.out b/src/test/regress/expected/non_colocated_leaf_subquery_joins.out index 1a4f20529..976f3d438 100644 --- a/src/test/regress/expected/non_colocated_leaf_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_leaf_subquery_joins.out @@ -22,6 +22,7 @@ BEGIN RETURN result; END; $BODY$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode SHOW log_error_verbosity; log_error_verbosity --------------------------------------------------------------------- @@ -36,6 +37,7 @@ FROM (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8)) as bar WHERE foo.user_id = bar.user_id;$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) valid @@ -51,6 +53,7 @@ FROM (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6,7,8)) as bar WHERE foo.user_id = bar.user_id;$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) @@ -73,6 +76,7 @@ WHERE users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6));$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) valid @@ -89,6 +93,7 @@ SELECT count(*) FROM q1, (SELECT users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); +DEBUG: function does not have co-located tables DEBUG: CTE q1 is going to be inlined via distributed planning DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id FROM public.users_table) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) @@ -101,6 +106,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c SELECT true AS valid FROM explain_json($$ (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) UNION (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8));$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) @@ -137,6 +143,7 @@ FROM ( ) q ORDER BY 2 DESC, 1; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: push down of limit count: 5 DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) LIMIT 5 @@ -158,6 +165,7 @@ FROM (SELECT users_table.user_id, value_1 FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8)) as bar WHERE foo.user_id = bar.value_1;$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.value_1) valid diff --git a/src/test/regress/expected/non_colocated_subquery_joins.out b/src/test/regress/expected/non_colocated_subquery_joins.out index 0776f47cf..c14502569 100644 --- a/src/test/regress/expected/non_colocated_subquery_joins.out +++ b/src/test/regress/expected/non_colocated_subquery_joins.out @@ -32,6 +32,7 @@ BEGIN RETURN result; END; $BODY$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode -- leaf queries contain colocated joins -- but not the subquery SELECT true AS valid FROM explain_json_2($$ @@ -43,6 +44,7 @@ SELECT true AS valid FROM explain_json_2($$ WHERE foo.value_2 = bar.value_2; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.value_2 FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar WHERE (foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) valid @@ -63,6 +65,7 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT event_type FROM events_table WHERE user_id < 100); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 100) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer))) valid @@ -82,6 +85,7 @@ SELECT true AS valid FROM explain_json_2($$ NOT IN (SELECT user_id FROM events_table WHERE event_type = 2); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.events_table WHERE (event_type OPERATOR(pg_catalog.=) 2) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.events_table WHERE (NOT (user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) valid @@ -103,6 +107,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.event_type IN (SELECT event_type FROM events_table WHERE user_id < 3); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 3) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) valid @@ -123,6 +128,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id AND foo.user_id IN (SELECT user_id FROM events_table WHERE user_id < 10); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT events_table.user_id FROM public.events_table WHERE (events_table.user_id OPERATOR(pg_catalog.<) 10)))) valid @@ -143,6 +149,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id AND foo.user_id NOT IN (SELECT user_id FROM events_table WHERE user_id < 10); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT (users_table.user_id OPERATOR(pg_catalog./) 2) AS user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 10) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (NOT (foo.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))))) @@ -164,6 +171,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id AND foo.event_type IN (SELECT event_type FROM events_table WHERE user_id < 4); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.<) 4) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id FROM (SELECT intermediate_result.user_id, intermediate_result.event_type FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, event_type integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE ((foo.user_id OPERATOR(pg_catalog.=) bar.user_id) AND (foo.event_type OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.event_type FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(event_type integer)))) @@ -188,6 +196,7 @@ SELECT true AS valid FROM explain_json_2($$ ) as foo_top, events_table WHERE events_table.user_id = foo_top.user_id; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, events_table.event_type FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.event_type) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan XXX_3 for subquery SELECT event_type FROM public.events_table WHERE (user_id OPERATOR(pg_catalog.=) 5) @@ -222,6 +231,7 @@ SELECT true AS valid FROM explain_json_2($$ ) as foo_top; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top valid @@ -253,6 +263,7 @@ SELECT true AS valid FROM explain_json_2($$ foo1.user_id = foo5.user_id ) as foo_top; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.user_id))) foo_top valid @@ -282,6 +293,7 @@ SELECT true AS valid FROM explain_json_2($$ foo1.user_id = foo5.value_1 ) as foo_top; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top @@ -313,6 +325,7 @@ SELECT true AS valid FROM explain_json_2($$ foo2.user_id = foo5.value_1 ) as foo_top; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[17, 18, 19, 20]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT user_id, random FROM (SELECT foo1.user_id, random() AS random FROM (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo1, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo2, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo3, (SELECT users_table.user_id, users_table.value_1 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) foo4, (SELECT intermediate_result.user_id, intermediate_result.value_1 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_1 integer)) foo5 WHERE ((foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo2.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo3.user_id) AND (foo1.user_id OPERATOR(pg_catalog.=) foo4.user_id) AND (foo2.user_id OPERATOR(pg_catalog.=) foo5.value_1))) foo_top @@ -346,6 +359,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id) as bar_top ON (foo_top.user_id = bar_top.user_id); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT foo.user_id FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) foo, (SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) bar_top ON ((foo_top.user_id OPERATOR(pg_catalog.=) bar_top.user_id))) @@ -380,6 +394,7 @@ SELECT true AS valid FROM explain_json_2($$ ON (foo_top.value_2 = bar_top.user_id); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) valid @@ -411,6 +426,7 @@ SELECT true AS valid FROM explain_json_2($$ foo.user_id = bar.user_id) as bar_top ON (foo_top.value_2 = bar_top.user_id); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[13, 14, 15, 16]))) DEBUG: generating subplan XXX_2 for subquery SELECT foo.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12])))) foo, (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT foo.user_id, foo.value_2 FROM (SELECT DISTINCT users_table.user_id, users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT DISTINCT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8])))) bar WHERE (foo.user_id OPERATOR(pg_catalog.=) bar.user_id)) foo_top JOIN (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) bar_top ON ((foo_top.value_2 OPERATOR(pg_catalog.=) bar_top.user_id))) @@ -432,6 +448,7 @@ SELECT true AS valid FROM explain_json_2($$ WHERE foo.my_users = users_table.user_id) as mid_level_query ) as bar; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT events_table.user_id AS my_users FROM public.events_table, public.users_table WHERE (events_table.event_type OPERATOR(pg_catalog.=) users_table.user_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid @@ -451,6 +468,7 @@ SELECT true AS valid FROM explain_json_2($$ WHERE foo.my_users = users_table.user_id) as mid_level_query ) as bar; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT events_table.event_type AS my_users, random() AS random FROM public.events_table, public.users_table WHERE (events_table.user_id OPERATOR(pg_catalog.=) users_table.user_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT intermediate_result.my_users, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(my_users integer, random double precision)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid @@ -474,6 +492,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c WHERE foo.my_users = users_table.user_id) as mid_level_query ) as bar; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT events_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) selected_users WHERE (events_table.event_type OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid @@ -507,6 +526,7 @@ SELECT true AS valid FROM explain_json_2($$ ) as bar; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_2 FROM public.events_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT mid_level_query.user_id FROM (SELECT DISTINCT users_table.user_id FROM public.users_table, (SELECT events_table.user_id AS my_users FROM public.events_table, (SELECT events_table_1.user_id FROM public.users_table users_table_1, public.events_table events_table_1 WHERE ((users_table_1.user_id OPERATOR(pg_catalog.=) events_table_1.user_id) AND (users_table_1.user_id OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer))))) selected_users WHERE (events_table.user_id OPERATOR(pg_catalog.=) selected_users.user_id)) foo WHERE (foo.my_users OPERATOR(pg_catalog.=) users_table.user_id)) mid_level_query) bar valid @@ -528,6 +548,7 @@ WHERE users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (5,6));$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.=) ANY (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer))) valid @@ -544,6 +565,7 @@ SELECT count(*) FROM q1, (SELECT users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); +DEBUG: function does not have co-located tables DEBUG: CTE q1 is going to be inlined via distributed planning DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.user_id FROM public.users_table) q1, (SELECT intermediate_result.user_id, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, random double precision)) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) @@ -561,6 +583,7 @@ SELECT true AS valid FROM explain_json_2($$ users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (1,2,3,4)) as bar WHERE bar.user_id = q1.user_id ;$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for CTE q1: SELECT user_id FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)) q1, (SELECT users_table.user_id, random() AS random FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) bar WHERE (bar.user_id OPERATOR(pg_catalog.=) q1.user_id) valid @@ -572,6 +595,7 @@ DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS c SELECT true AS valid FROM explain_json_2($$ (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.value_2 AND event_type IN (1,2,3,4)) UNION (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (5,6,7,8));$$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) @@ -608,6 +632,7 @@ FROM ( ) q ORDER BY 2 DESC, 1; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table.user_id FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.value_2) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4]))) DEBUG: push down of limit count: 5 DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table WHERE ((value_2 OPERATOR(pg_catalog.>=) 5) AND (EXISTS (SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer)))) LIMIT 5 @@ -629,6 +654,7 @@ SELECT true AS valid FROM explain_json_2($$ FROM (users_table u1 JOIN users_table u2 using(value_1)) a JOIN (SELECT value_1, random() FROM users_table) as u3 USING (value_1); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_1, random() AS random FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((public.users_table u1 JOIN public.users_table u2 USING (value_1)) a(value_1, user_id, "time", value_2, value_3, value_4, user_id_1, time_1, value_2_1, value_3_1, value_4_1) JOIN (SELECT intermediate_result.value_1, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, random double precision)) u3 USING (value_1)) ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns @@ -641,6 +667,7 @@ SELECT true AS valid FROM explain_json_2($$ FROM (SELECT * FROM users_table u1 JOIN users_table u2 using(value_1)) a JOIN (SELECT value_1, random() FROM users_table) as u3 USING (value_1); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT u1.value_1, u1.user_id, u1."time", u1.value_2, u1.value_3, u1.value_4, u2.user_id, u2."time", u2.value_2, u2.value_3, u2.value_4 FROM (public.users_table u1 JOIN public.users_table u2 USING (value_1)) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_1, intermediate_result.user_id, intermediate_result."time", intermediate_result.value_2, intermediate_result.value_3, intermediate_result.value_4, intermediate_result.user_id_1 AS user_id, intermediate_result.time_1 AS "time", intermediate_result.value_2_1 AS value_2, intermediate_result.value_3_1 AS value_3, intermediate_result.value_4_1 AS value_4 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer, user_id integer, "time" timestamp without time zone, value_2 integer, value_3 double precision, value_4 bigint, user_id_1 integer, time_1 timestamp without time zone, value_2_1 integer, value_3_1 double precision, value_4_1 bigint)) a(value_1, user_id, "time", value_2, value_3, value_4, user_id_1, time_1, value_2_1, value_3_1, value_4_1) JOIN (SELECT users_table.value_1, random() AS random FROM public.users_table) u3 USING (value_1)) valid @@ -660,6 +687,7 @@ SELECT true AS valid FROM explain_json_2($$ events_table using (value_2); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_2, random() AS random FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u1 JOIN public.events_table USING (value_2)) valid @@ -678,6 +706,7 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT value_2, random() FROM users_table) as u2 USING(value_2); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_2, random() AS random FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT users_table.value_2, random() AS random FROM public.users_table) u1 LEFT JOIN (SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u2 USING (value_2)) valid @@ -698,6 +727,7 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT value_2, random() FROM users_table) as u2 USING(value_2); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_2, random() AS random FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM ((SELECT users_table.value_2, random() AS random FROM public.users_table) u1 RIGHT JOIN (SELECT intermediate_result.value_2, intermediate_result.random FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer, random double precision)) u2 USING (value_2)) ERROR: cannot pushdown the subquery @@ -717,6 +747,7 @@ SELECT true AS valid FROM explain_json_2($$ (SELECT value_1 FROM users_table) as foo ON (a.user_id = foo.value_1) ); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) @@ -741,6 +772,7 @@ SELECT true AS valid FROM explain_json_2($$ users_table as foo ON (a.user_id = foo.value_1) ); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT user_id FROM public.users_table DEBUG: generating subplan XXX_2 for subquery SELECT user_id FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) UNION SELECT intermediate_result.user_id FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer) @@ -774,6 +806,7 @@ SELECT true AS valid FROM explain_json_2($$ ON(foo.user_id = bar.value_1) ); $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_1 FROM public.users_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT foo.user_id, a.user_id, bar.value_1 FROM (((SELECT users_table.user_id FROM public.users_table) foo JOIN (SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])) UNION SELECT users_table.user_id FROM public.users_table WHERE (users_table.user_id OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) a ON ((a.user_id OPERATOR(pg_catalog.=) foo.user_id))) JOIN (SELECT intermediate_result.value_1 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_1 integer)) bar ON ((foo.user_id OPERATOR(pg_catalog.=) bar.value_1))) valid @@ -813,6 +846,7 @@ SELECT true AS valid FROM explain_json_2($$ WHERE non_colocated_subquery.value_2 != non_colocated_subquery_2.cnt $$); +DEBUG: function does not have co-located tables DEBUG: CTE non_colocated_subquery is going to be inlined via distributed planning DEBUG: CTE non_colocated_subquery_2 is going to be inlined via distributed planning DEBUG: generating subplan XXX_1 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) @@ -837,6 +871,7 @@ SELECT true AS valid FROM explain_json_2($$ AND foo.value_2 = baz.value_2 $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT users_table_local.value_2 FROM non_colocated_subquery.users_table_local, non_colocated_subquery.events_table_local WHERE ((users_table_local.user_id OPERATOR(pg_catalog.=) events_table_local.user_id) AND (events_table_local.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[5, 6, 7, 8]))) DEBUG: generating subplan XXX_2 for subquery SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[9, 10, 11, 12]))) DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT users_table.value_2 FROM public.users_table, public.events_table WHERE ((users_table.user_id OPERATOR(pg_catalog.=) events_table.user_id) AND (events_table.event_type OPERATOR(pg_catalog.=) ANY (ARRAY[1, 2, 3, 4])))) foo, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) bar, (SELECT intermediate_result.value_2 FROM read_intermediate_result('XXX_2'::text, 'binary'::citus_copy_format) intermediate_result(value_2 integer)) baz WHERE ((foo.value_2 OPERATOR(pg_catalog.=) bar.value_2) AND (foo.value_2 OPERATOR(pg_catalog.=) baz.value_2)) @@ -872,6 +907,7 @@ SELECT true AS valid FROM explain_json_2($$ AND foo.user_id IN (SELECT users_table.user_id FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND event_type IN (1,2)) $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT value_1, value_2 FROM public.users_table DEBUG: generating subplan XXX_2 for subquery SELECT value_1 FROM public.users_table WHERE (value_2 OPERATOR(pg_catalog.<) 1) DEBUG: generating subplan XXX_3 for subquery SELECT value_2 FROM public.users_table WHERE (value_1 OPERATOR(pg_catalog.<) 2) @@ -894,6 +930,7 @@ SELECT true AS valid FROM explain_json_2($$ users_table_ref.user_id = foo.user_id AND foo.user_id = bar.value_2; $$); +DEBUG: function does not have co-located tables DEBUG: generating subplan XXX_1 for subquery SELECT user_id, value_2 FROM public.events_table DEBUG: Plan XXX query after replacing subqueries and CTEs: SELECT count(*) AS count FROM public.users_reference_table users_table_ref, (SELECT users_table.user_id FROM public.users_table) foo, (SELECT intermediate_result.user_id, intermediate_result.value_2 FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id integer, value_2 integer)) bar WHERE ((users_table_ref.user_id OPERATOR(pg_catalog.=) foo.user_id) AND (foo.user_id OPERATOR(pg_catalog.=) bar.value_2)) valid @@ -942,6 +979,7 @@ JOIN LATERAL WHERE user_id = users_table.user_id) AS bar LEFT JOIN users_table u2 ON u2.user_id = bar.value_2) AS foo ON TRUE $$); +DEBUG: function does not have co-located tables DEBUG: Router planner cannot handle multi-shard select queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries DEBUG: skipping recursive planning for the subquery since it contains references to outer queries diff --git a/src/test/regress/expected/pg13.out b/src/test/regress/expected/pg13.out index 7e91e3d2a..2ba782aaf 100644 --- a/src/test/regress/expected/pg13.out +++ b/src/test/regress/expected/pg13.out @@ -145,9 +145,13 @@ CREATE TYPE myvarchar; CREATE FUNCTION myvarcharin(cstring, oid, integer) RETURNS myvarchar LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharin'; NOTICE: return type myvarchar is only a shell +WARNING: Citus can't distribute functions having dependency on unsupported object of type "type" +DETAIL: Function will be created only locally CREATE FUNCTION myvarcharout(myvarchar) RETURNS cstring LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharout'; NOTICE: argument type myvarchar is only a shell +WARNING: Citus can't distribute functions having dependency on unsupported object of type "type" +DETAIL: Function will be created only locally CREATE TYPE myvarchar ( input = myvarcharin, output = myvarcharout, diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index f315fccdc..c49880109 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -1349,3 +1349,72 @@ SELECT * FROM nested_obj_update ORDER BY 1,2,3; set client_min_messages to error; drop schema pg14 cascade; +create schema pg14; +set search_path to pg14; +select 1 from citus_add_node('localhost',:master_port,groupid=>0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- test adding foreign table to metadata with the guc +-- will test truncating foreign tables later +CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial); +INSERT INTO foreign_table_test VALUES (1, 'text_test'); +SELECT citus_add_local_table_to_metadata('foreign_table_test'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +CREATE EXTENSION postgres_fdw; +CREATE SERVER foreign_server + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'localhost', port :'master_port', dbname 'regression'); +CREATE USER MAPPING FOR CURRENT_USER + SERVER foreign_server + OPTIONS (user 'postgres'); +CREATE FOREIGN TABLE foreign_table ( + id integer NOT NULL, + data text, + a bigserial +) + SERVER foreign_server + OPTIONS (schema_name 'pg14', table_name 'foreign_table_test'); +SELECT citus_add_local_table_to_metadata('foreign_table'); + citus_add_local_table_to_metadata +--------------------------------------------------------------------- + +(1 row) + +SELECT count(*) FROM foreign_table; + count +--------------------------------------------------------------------- + 1 +(1 row) + +TRUNCATE foreign_table; +\c - - - :worker_1_port +set search_path to pg14; +-- verify the foreign table is truncated +SELECT count(*) FROM pg14.foreign_table; + count +--------------------------------------------------------------------- + 0 +(1 row) + +-- should error out +TRUNCATE foreign_table; +ERROR: truncating foreign tables that are added to metadata can only be excuted on the coordinator +\c - - - :master_port +-- cleanup +set client_min_messages to error; +drop extension postgres_fdw cascade; +drop schema pg14 cascade; +reset client_min_messages; +select 1 from citus_remove_node('localhost',:master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + diff --git a/src/test/regress/expected/propagate_extension_commands.out b/src/test/regress/expected/propagate_extension_commands.out index 71966ed39..1c2fd19b4 100644 --- a/src/test/regress/expected/propagate_extension_commands.out +++ b/src/test/regress/expected/propagate_extension_commands.out @@ -186,22 +186,6 @@ CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") -(1 row) - -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") -(1 row) - SELECT run_command_on_workers($$ CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); $$); diff --git a/src/test/regress/expected/propagate_extension_commands_1.out b/src/test/regress/expected/propagate_extension_commands_1.out index 482ee5580..e6c85bee4 100644 --- a/src/test/regress/expected/propagate_extension_commands_1.out +++ b/src/test/regress/expected/propagate_extension_commands_1.out @@ -186,22 +186,6 @@ CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") -(1 row) - -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") -(1 row) - SELECT run_command_on_workers($$ CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); $$); diff --git a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out index 987c52bdc..948adb050 100644 --- a/src/test/regress/expected/replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/replicate_reference_tables_to_coordinator.out @@ -43,11 +43,13 @@ SELECT create_reference_table('numbers'); INSERT INTO numbers VALUES (20), (21); NOTICE: executing the command locally: INSERT INTO replicate_ref_to_coordinator.numbers_8000001 AS citus_table_alias (a) VALUES (20), (21) +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION my_volatile_fn() RETURNS INT AS $$ BEGIN RETURN 1; END; $$ language plpgsql VOLATILE; +RESET citus.enable_metadata_sync; -- INSERT ... SELECT between reference tables BEGIN; EXPLAIN (COSTS OFF) INSERT INTO squares SELECT a, a*a FROM numbers; diff --git a/src/test/regress/expected/shard_move_deferred_delete.out b/src/test/regress/expected/shard_move_deferred_delete.out index ed0d46500..d878c18a4 100644 --- a/src/test/regress/expected/shard_move_deferred_delete.out +++ b/src/test/regress/expected/shard_move_deferred_delete.out @@ -142,22 +142,35 @@ SELECT master_move_shard_placement(20000000, 'localhost', :worker_2_port, 'local (1 row) -SELECT run_command_on_workers($cmd$ - -- override the function for testing purpose - create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) - as $BODY$ - begin - select 20 into available_disk_size; - select 8500 into total_disk_size; - end - $BODY$ language plpgsql; -$cmd$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") - (localhost,57638,t,"CREATE FUNCTION") -(2 rows) - +-- override the function for testing purpose +-- since it is extension owned function, propagate it to workers manually +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :master_port +SET search_path TO shard_move_deferred_delete; SELECT citus_shard_cost_by_disk_size(20000001); citus_shard_cost_by_disk_size --------------------------------------------------------------------- @@ -187,22 +200,35 @@ $cmd$); (localhost,57638,t,1) (2 rows) -SELECT run_command_on_workers($cmd$ - -- override the function for testing purpose - create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) - as $BODY$ - begin - select 8300 into available_disk_size; - select 8500 into total_disk_size; - end - $BODY$ language plpgsql; -$cmd$); - run_command_on_workers ---------------------------------------------------------------------- - (localhost,57637,t,"CREATE FUNCTION") - (localhost,57638,t,"CREATE FUNCTION") -(2 rows) - +-- override the function for testing purpose +-- since it is extension owned function, propagate it to workers manually +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :master_port +SET search_path TO shard_move_deferred_delete; -- When there would not be enough free space left after the move, the move should fail SELECT master_move_shard_placement(20000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port); ERROR: not enough empty space on node if the shard is moved, actual available space after move will be 108 bytes, desired available space after move is 850 bytes,estimated size increase on node after move is 8192 bytes. diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index bbf537000..c63b50842 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -241,6 +241,7 @@ RETURNS boolean AS 'citus' LANGUAGE C STRICT VOLATILE; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -257,6 +258,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; SET citus.next_shard_id TO 123000; SELECT worker_node_responsive(node_name, node_port::int) FROM master_get_active_worker_nodes() diff --git a/src/test/regress/expected/subquery_and_cte.out b/src/test/regress/expected/subquery_and_cte.out index 4360bb69e..f3f12b975 100644 --- a/src/test/regress/expected/subquery_and_cte.out +++ b/src/test/regress/expected/subquery_and_cte.out @@ -126,6 +126,7 @@ WITH cte1 AS MATERIALIZED (SELECT id, value FROM func()) UPDATE dist_table dt SET value = cte1.value FROM cte1 WHERE dt.id = 1; DEBUG: generating subplan XXX_1 for CTE cte1: SELECT id, value FROM subquery_and_ctes.func() func(id, value) +DEBUG: function does not have co-located tables DEBUG: Plan XXX query after replacing subqueries and CTEs: UPDATE subquery_and_ctes.dist_table dt SET value = cte1.value FROM (SELECT intermediate_result.id, intermediate_result.value FROM read_intermediate_result('XXX_1'::text, 'binary'::citus_copy_format) intermediate_result(id integer, value integer)) cte1 WHERE (dt.id OPERATOR(pg_catalog.=) 1) -- CTEs are recursively planned, and subquery foo is also recursively planned -- final plan becomes a real-time plan since we also have events_table in the diff --git a/src/test/regress/expected/text_search.out b/src/test/regress/expected/text_search.out new file mode 100644 index 000000000..1b4f652c1 --- /dev/null +++ b/src/test/regress/expected/text_search.out @@ -0,0 +1,489 @@ +CREATE SCHEMA text_search; +CREATE SCHEMA text_search2; +SET search_path TO text_search; +-- create a new configruation from scratch +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE t1; +DROP TEXT SEARCH CONFIGURATION my_text_search_config; +-- try to create table and index in 1 transaction +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ABORT; +-- try again, should not fail with my_text_search_config being retained on the worker +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'on demand propagation of text search object with a comment'; +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.my_text_search_config'::regconfig); +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | on demand propagation of text search object with a comment + localhost | 57638 | t | on demand propagation of text search object with a comment +(2 rows) + +-- verify that changing anything on a managed TEXT SEARCH CONFIGURATION fails after parallel execution +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'this comment can''t be set right now'; +ERROR: cannot run text search configuration command because there was a parallel operation on a distributed table in the transaction +DETAIL: When running command on/for a distributed text search configuration, Citus needs to perform all operations over a single connection per node to ensure consistency. +HINT: Try re-running the transaction with "SET LOCAL citus.multi_shard_modify_mode TO 'sequential';" +ABORT; +-- create an index on an already distributed table +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config2 ( parser = default ); +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config2 IS 'on demand propagation of text search object with a comment 2'; +CREATE TABLE t1(id int, name text); +SELECT create_distributed_table('t1', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config2'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.my_text_search_config2'::regconfig); +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | on demand propagation of text search object with a comment 2 + localhost | 57638 | t | on demand propagation of text search object with a comment 2 +(2 rows) + +ABORT; +-- should be able to create a configuration based on a copy of an existing configuration +CREATE TEXT SEARCH CONFIGURATION french_noaccent ( COPY = french ); +CREATE TABLE t2(id int, name text); +CREATE INDEX t2_search_name ON t2 USING gin (to_tsvector('text_search.french_noaccent'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t2', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- spot check that french_noaccent copied settings from french +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'comment tu t''appelle') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciiword,french_stem) + localhost | 57638 | t | (asciiword,french_stem) +(2 rows) + +-- makes no sense, however we expect that the dictionary for the first token changes accordingly +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING FOR asciiword WITH dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'comment tu t''appelle') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciiword,dutch_stem) + localhost | 57638 | t | (asciiword,dutch_stem) +(2 rows) + +-- do the same but we will replace all french dictionaries +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciihword,french_stem) + localhost | 57638 | t | (asciihword,french_stem) +(2 rows) + +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING REPLACE french_stem WITH dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciihword,dutch_stem) + localhost | 57638 | t | (asciihword,dutch_stem) +(2 rows) + +-- once more but now back via yet a different DDL command +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING FOR asciihword REPLACE dutch_stem WITH french_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciihword,french_stem) + localhost | 57638 | t | (asciihword,french_stem) +(2 rows) + +-- drop a mapping +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING FOR asciihword; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciihword,) + localhost | 57638 | t | (asciihword,) +(2 rows) + +-- also with exists, doesn't change anything, but should not error +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING IF EXISTS FOR asciihword; +NOTICE: mapping for token type "asciihword" does not exist, skipping +-- Comment on a text search configuration +COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS 'a text configuration that is butcherd to test all edge cases'; +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.french_noaccent'::regconfig); +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | a text configuration that is butcherd to test all edge cases + localhost | 57638 | t | a text configuration that is butcherd to test all edge cases +(2 rows) + +-- Remove a comment +COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS NULL; +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.french_noaccent'::regconfig); +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | + localhost | 57638 | t | +(2 rows) + +-- verify adding 2 dictionaries for two tokes at once +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING IF EXISTS FOR asciiword, asciihword; +NOTICE: mapping for token type "asciihword" does not exist, skipping +ALTER TEXT SEARCH CONFIGURATION french_noaccent ADD MAPPING FOR asciiword, asciihword WITH french_stem, dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionaries) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciiword,"{french_stem,dutch_stem}") + localhost | 57638 | t | (asciiword,"{french_stem,dutch_stem}") +(2 rows) + +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionaries) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | (asciihword,"{french_stem,dutch_stem}") + localhost | 57638 | t | (asciihword,"{french_stem,dutch_stem}") +(2 rows) + +--verify we can drop cascade a configuration that is in use +-- verify it is in use +DROP TEXT SEARCH CONFIGURATION text_search.french_noaccent; +ERROR: cannot drop text search configuration french_noaccent because other objects depend on it +DETAIL: index t2_search_name depends on text search configuration french_noaccent +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- drop cascade +DROP TEXT SEARCH CONFIGURATION text_search.french_noaccent CASCADE; +NOTICE: drop cascades to index t2_search_name +-- verify the configuration is dropped from the workers +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.french_noaccent'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.french_noaccent" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.french_noaccent" does not exist +(2 rows) + +SET client_min_messages TO 'warning'; +SELECT * FROM run_command_on_workers($$CREATE ROLE text_search_owner;$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | CREATE ROLE + localhost | 57638 | t | CREATE ROLE +(2 rows) + +CREATE ROLE text_search_owner; +RESET client_min_messages; +CREATE TEXT SEARCH CONFIGURATION changed_owner ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | postgres + localhost | 57638 | t | postgres +(2 rows) + +ALTER TEXT SEARCH CONFIGURATION changed_owner OWNER TO text_search_owner; +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search_owner + localhost | 57638 | t | text_search_owner +(2 rows) + +-- redo test with propagating object after it was created and changed of owner +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION changed_owner2 ( PARSER = default ); +ALTER TEXT SEARCH CONFIGURATION changed_owner2 OWNER TO text_search_owner; +RESET citus.enable_ddl_propagation; +-- verify object doesn't exist before propagating +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.changed_owner2'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.changed_owner2" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.changed_owner2" does not exist +(2 rows) + +-- distribute configuration +CREATE TABLE t3(id int, name text); +CREATE INDEX t3_search_name ON t3 USING gin (to_tsvector('text_search.changed_owner2'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t3', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- verify config owner +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner2'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search_owner + localhost | 57638 | t | text_search_owner +(2 rows) + +-- rename tests +CREATE TEXT SEARCH CONFIGURATION change_name ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.change_name'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.change_name + localhost | 57638 | t | text_search.change_name +(2 rows) + +ALTER TEXT SEARCH CONFIGURATION change_name RENAME TO changed_name; +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.changed_name'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.changed_name + localhost | 57638 | t | text_search.changed_name +(2 rows) + +-- test move of schema +CREATE TEXT SEARCH CONFIGURATION change_schema ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.change_schema'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.change_schema + localhost | 57638 | t | text_search.change_schema +(2 rows) + +ALTER TEXT SEARCH CONFIGURATION change_schema SET SCHEMA text_search2; +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search2.change_schema'::regconfig; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search2.change_schema + localhost | 57638 | t | text_search2.change_schema +(2 rows) + +-- verify we get an error that the configuration change_schema is not found, even though the object address will be +-- found in its new schema, and is distributed +ALTER TEXT SEARCH CONFIGURATION change_schema SET SCHEMA text_search2; +ERROR: text search configuration "change_schema" does not exist +-- should tell us that text_search.does_not_exist does not exist, covers a complex edgecase +-- in resolving the object address +ALTER TEXT SEARCH CONFIGURATION text_search.does_not_exist SET SCHEMA text_search2; +ERROR: text search configuration "text_search.does_not_exist" does not exist +-- verify edgecases in deparsers +CREATE TEXT SEARCH CONFIGURATION config1 ( PARSER = default ); +CREATE TEXT SEARCH CONFIGURATION config2 ( PARSER = default ); +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION config3 ( PARSER = default ); +RESET citus.enable_ddl_propagation; +-- verify config1, config2 exist on workers, config3 not +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.config1 + localhost | 57638 | t | text_search.config1 +(2 rows) + +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.config2 + localhost | 57638 | t | text_search.config2 +(2 rows) + +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.config3" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.config3" does not exist +(2 rows) + +-- DROP all config's, only 1&2 are distributed, they should propagate well to remotes +DROP TEXT SEARCH CONFIGURATION config1, config2, config3; +-- verify all existing ones have been removed (checking config3 for consistency) +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.config1" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.config1" does not exist +(2 rows) + +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.config2" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.config2" does not exist +(2 rows) + +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.config3" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.config3" does not exist +(2 rows) + +-- verify they are all removed locally +SELECT 'text_search.config1'::regconfig; +ERROR: text search configuration "text_search.config1" does not exist +SELECT 'text_search.config2'::regconfig; +ERROR: text search configuration "text_search.config2" does not exist +SELECT 'text_search.config3'::regconfig; +ERROR: text search configuration "text_search.config3" does not exist +-- verify that indexes created concurrently that would propagate a TEXT SEARCH CONFIGURATION object +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION concurrent_index_config ( PARSER = default ); +RESET citus.enable_ddl_propagation; +-- verify it doesn't exist on the workers +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.concurrent_index_config'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | f | ERROR: text search configuration "text_search.concurrent_index_config" does not exist + localhost | 57638 | f | ERROR: text search configuration "text_search.concurrent_index_config" does not exist +(2 rows) + +-- create distributed table that then concurrently would have an index created. +CREATE TABLE t4(id int, name text); +SELECT create_distributed_table('t4', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE INDEX CONCURRENTLY t4_search_name ON t4 USING gin (to_tsvector('text_search.concurrent_index_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +-- now the configuration should be on the worker, and the above index creation shouldn't have failed. +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.concurrent_index_config'::regconfig; $$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | text_search.concurrent_index_config + localhost | 57638 | t | text_search.concurrent_index_config +(2 rows) + +-- verify the objid is correctly committed locally due to the somewhat convoluted commit and new transaction starting when creating an index concurrently +SELECT pg_catalog.pg_identify_object_as_address(classid, objid, objsubid) + FROM citus.pg_dist_object + WHERE classid = 3602 AND objid = 'text_search.concurrent_index_config'::regconfig::oid; + pg_identify_object_as_address +--------------------------------------------------------------------- + ("text search configuration","{text_search,concurrent_index_config}",{}) +(1 row) + +-- verify old text search configurations get renamed if they are not the same as the newly propagated configuration. +-- We do this by creating configurations on the workers as a copy from a different existing catalog. +SELECT * FROM run_command_on_workers($$ + set citus.enable_metadata_sync TO off; + CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = dutch ); + reset citus.enable_metadata_sync; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | SET + localhost | 57638 | t | SET +(2 rows) + +CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = french ); +-- now we expect manually_created_wrongly(citus_backup_XXX) to show up when querying the configurations +SELECT * FROM run_command_on_workers($$ + SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | {manually_created_wrongly(citus_backup_0),manually_created_wrongly} + localhost | 57638 | t | {manually_created_wrongly(citus_backup_0),manually_created_wrongly} +(2 rows) + +-- verify the objects get reused appropriately when the specification is the same +SELECT * FROM run_command_on_workers($$ + set citus.enable_metadata_sync TO off; + CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = french ); + reset citus.enable_metadata_sync; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | SET + localhost | 57638 | t | SET +(2 rows) + +CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = french ); +-- now we don't expect manually_created_correct(citus_backup_XXX) to show up when querying the configurations as the +-- original one is reused +SELECT * FROM run_command_on_workers($$ + SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; +$$) ORDER BY 1,2; + nodename | nodeport | success | result +--------------------------------------------------------------------- + localhost | 57637 | t | {manually_created_correct} + localhost | 57638 | t | {manually_created_correct} +(2 rows) + +CREATE SCHEMA "Text Search Requiring Quote's"; +CREATE TEXT SEARCH CONFIGURATION "Text Search Requiring Quote's"."Quoted Config Name" ( parser = default ); +CREATE TABLE t5(id int, name text); +CREATE INDEX t5_search_name ON t5 USING gin (to_tsvector('"Text Search Requiring Quote''s"."Quoted Config Name"'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t5', 'name'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SET client_min_messages TO 'warning'; +DROP SCHEMA text_search, text_search2, "Text Search Requiring Quote's" CASCADE; +DROP ROLE text_search_owner; diff --git a/src/test/regress/expected/upgrade_basic_before.out b/src/test/regress/expected/upgrade_basic_before.out index 28a358428..2e63447df 100644 --- a/src/test/regress/expected/upgrade_basic_before.out +++ b/src/test/regress/expected/upgrade_basic_before.out @@ -53,6 +53,7 @@ SELECT create_distributed_table('tr', 'pk'); INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -69,6 +70,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE t_range(id int, value_1 int); SELECT create_distributed_table('t_range', 'id', 'range'); create_distributed_table diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 86c121568..185bf19c5 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -74,6 +74,8 @@ ORDER BY 1; function citus_internal_add_placement_metadata(bigint,integer,bigint,integer,bigint) function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text) function citus_internal_delete_shard_metadata(bigint) + function citus_internal_global_blocked_processes() + function citus_internal_local_blocked_processes() function citus_internal_update_placement_metadata(bigint,integer,integer) function citus_internal_update_relation_colocation(oid,integer) function citus_isolation_test_session_is_blocked(integer,integer[]) @@ -172,6 +174,8 @@ ORDER BY 1; function master_update_shard_statistics(bigint) function master_update_table_statistics(regclass) function notify_constraint_dropped() + function pg_cancel_backend(bigint) + function pg_terminate_backend(bigint, bigint) function poolinfo_valid(text) function read_intermediate_result(text,citus_copy_format) function read_intermediate_results(text[],citus_copy_format) @@ -203,6 +207,7 @@ ORDER BY 1; function worker_cleanup_job_schema_cache() function worker_create_or_alter_role(text,text,text) function worker_create_or_replace_object(text) + function worker_create_or_replace_object(text[]) function worker_create_schema(bigint,text) function worker_create_truncate_trigger(regclass) function worker_drop_distributed_table(text) @@ -267,5 +272,5 @@ ORDER BY 1; view citus_worker_stat_activity view pg_dist_shard_placement view time_partitions -(251 rows) +(256 rows) diff --git a/src/test/regress/expected/values.out b/src/test/regress/expected/values.out index 8509cd971..cc26f7432 100644 --- a/src/test/regress/expected/values.out +++ b/src/test/regress/expected/values.out @@ -587,6 +587,8 @@ CREATE OR REPLACE FUNCTION fixed_volatile_value() RETURNS integer VOLATILE AS $$ RETURN 1; END; $$ LANGUAGE plpgsql; +DEBUG: switching to sequential query execution mode +DETAIL: A command for a distributed function is run. To make sure subsequent commands see the function correctly we need to make sure to use only one connection for all future commands EXECUTE test_values_pushdown(fixed_volatile_value(),2,3); NOTICE: evaluated on the coordinator CONTEXT: PL/pgSQL function fixed_volatile_value() line XX at RAISE diff --git a/src/test/regress/input/multi_alter_table_statements.source b/src/test/regress/input/multi_alter_table_statements.source index fb719a243..fa828bd7b 100644 --- a/src/test/regress/input/multi_alter_table_statements.source +++ b/src/test/regress/input/multi_alter_table_statements.source @@ -294,11 +294,13 @@ ALTER TABLE lineitem_alter_220000 DROP COLUMN first; CREATE TABLE ddl_commands (command text UNIQUE DEFERRABLE INITIALLY DEFERRED); -- Use an event trigger to log all DDL event tags in it +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION log_ddl_tag() RETURNS event_trigger AS $ldt$ BEGIN INSERT INTO ddl_commands VALUES (tg_tag); END; $ldt$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag(); @@ -499,13 +501,14 @@ CREATE TABLE trigger_table ( SELECT create_distributed_table('trigger_table', 'id'); -- first set a trigger on a shard \c - - - :worker_1_port - +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION update_value() RETURNS trigger AS $up$ BEGIN NEW.value := 'trigger enabled'; RETURN NEW; END; $up$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE TRIGGER update_value BEFORE INSERT ON trigger_table_220017 diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index 280f5df28..e5602618f 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -19,6 +19,7 @@ test: isolation_cluster_management # the following tests depend on the distributed # transactionId, so should not be parallelized # and no tests should be added before these +test: isolation_metadata_sync_vs_all test: isolation_distributed_transaction_id test: isolation_dump_global_wait_edges test: isolation_citus_dist_activity @@ -92,7 +93,6 @@ test: isolation_metadata_sync_deadlock test: isolation_replicated_dist_on_mx test: isolation_replicate_reference_tables_to_coordinator test: isolation_multiuser_locking -test: isolation_metadata_sync_vs_all # MXless tests test: isolation_check_mx diff --git a/src/test/regress/mitmscripts/fluent.py b/src/test/regress/mitmscripts/fluent.py index eb392ab7c..2fc408e03 100644 --- a/src/test/regress/mitmscripts/fluent.py +++ b/src/test/regress/mitmscripts/fluent.py @@ -20,9 +20,6 @@ logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s", level=loggin # I. Command Strings -class Stop(Exception): - pass - class Handler: ''' This class hierarchy serves two purposes: @@ -46,19 +43,13 @@ class Handler: if not self.next: raise Exception("we don't know what to do!") - try: - self.next._accept(flow, message) - except Stop: + if self.next._accept(flow, message) == 'stop': if self.root is not self: - raise + return 'stop' self.next = KillHandler(self) flow.kill() - elif result == 'done': - # stop processing this packet, move on to the next one - return - elif result == 'stop': - # from now on kill all connections - raise Stop() + else: + return result def _handle(self, flow, message): ''' diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 1a071da8a..a95efbac9 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -282,6 +282,7 @@ test: create_citus_local_table_cascade test: fkeys_between_local_ref test: auto_undist_citus_local test: mx_regular_user +test: global_cancel test: remove_coordinator # ---------- @@ -312,13 +313,16 @@ test: ssl_by_default # --------- # object distribution tests +# TODO: After deprecating parameterless create_distributed_function combine +# distributed_functions and function_propagation tests # --------- -test: distributed_types distributed_types_conflict disable_object_propagation distributed_types_xact_add_enum_value +test: distributed_types distributed_types_conflict disable_object_propagation distributed_types_xact_add_enum_value text_search test: check_mx test: distributed_functions distributed_functions_conflict test: distributed_collations test: distributed_procedure test: distributed_collations_conflict +test: function_propagation test: check_mx # --------- diff --git a/src/test/regress/output/multi_alter_table_statements.source b/src/test/regress/output/multi_alter_table_statements.source index f1132e9d2..2e26ea425 100644 --- a/src/test/regress/output/multi_alter_table_statements.source +++ b/src/test/regress/output/multi_alter_table_statements.source @@ -642,11 +642,13 @@ ALTER TABLE lineitem_alter_220000 DROP COLUMN first; -- Create table to trigger at-xact-end (deferred) failure CREATE TABLE ddl_commands (command text UNIQUE DEFERRABLE INITIALLY DEFERRED); -- Use an event trigger to log all DDL event tags in it +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION log_ddl_tag() RETURNS event_trigger AS $ldt$ BEGIN INSERT INTO ddl_commands VALUES (tg_tag); END; $ldt$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag(); \c - - - :master_port -- The above trigger will cause failure at transaction end on one placement. @@ -985,12 +987,14 @@ SELECT create_distributed_table('trigger_table', 'id'); -- first set a trigger on a shard \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION update_value() RETURNS trigger AS $up$ BEGIN NEW.value := 'trigger enabled'; RETURN NEW; END; $up$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE TRIGGER update_value BEFORE INSERT ON trigger_table_220017 FOR EACH ROW EXECUTE PROCEDURE update_value(); diff --git a/src/test/regress/output/multi_copy.source b/src/test/regress/output/multi_copy.source index 2bd0c7b77..d6d72d963 100644 --- a/src/test/regress/output/multi_copy.source +++ b/src/test/regress/output/multi_copy.source @@ -635,6 +635,8 @@ INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::re INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'table_to_distribute'::regclass::oid, 0); INSERT INTO citus.pg_dist_object(classid, objid, objsubid) values('pg_class'::regclass::oid, 'second_dustbunnies'::regclass::oid, 0); SELECT 1 FROM master_activate_node('localhost', :worker_1_port); +NOTICE: Replicating postgres objects to node localhost:57637 +DETAIL: There are 114 objects to replicate, depending on your environment this might take a while ?column? --------------------------------------------------------------------- 1 diff --git a/src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec b/src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec index b43708ab1..84daaf792 100644 --- a/src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec +++ b/src/test/regress/spec/isolation_get_distributed_wait_queries_mx.spec @@ -73,6 +73,11 @@ step "s1-select-for-update" SELECT run_commands_on_session_level_connection_to_node('SELECT * FROM ref_table FOR UPDATE'); } +step "s1-update-dist-table-id-1" +{ + SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4 WHERE user_id = 1'); +} + step "s1-commit-worker" { SELECT run_commands_on_session_level_connection_to_node('COMMIT'); @@ -115,6 +120,11 @@ step "s2-update-dist-table" SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 5'); } +step "s2-update-dist-table-id-1" +{ + SELECT run_commands_on_session_level_connection_to_node('UPDATE tt1 SET value_1 = 4 WHERE user_id = 1'); +} + step "s2-update-ref-table" { SELECT run_commands_on_session_level_connection_to_node('UPDATE ref_table SET value_1 = 12 WHERE user_id = 1'); @@ -149,7 +159,7 @@ session "s3" step "s3-select-distributed-waiting-queries" { - SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits; + SELECT blocked_statement, current_statement_in_blocking_process, waiting_node_name, blocking_node_name, waiting_node_port, blocking_node_port FROM citus_lock_waits WHERE blocked_statement NOT ILIKE '%run_commands_on_session_level_connection_to_node%' AND current_statement_in_blocking_process NOT ILIKE '%run_commands_on_session_level_connection_to_node%'; } // session s1 and s4 executes the commands on the same worker node @@ -196,3 +206,9 @@ permutation "s2-start-session-level-connection" "s2-begin-on-worker" "s2-insert- // blocked on the same node permutation "s1-begin" "s1-update-on-the-coordinator" "s2-update-on-the-coordinator" "s3-select-distributed-waiting-queries" "s1-commit" permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update-dist-table" "s4-start-session-level-connection" "s4-begin-on-worker" "s4-update-dist-table" "s3-select-distributed-waiting-queries" "s1-commit-worker" "s4-commit-worker" "s1-stop-connection" "s4-stop-connection" + + +// show that even if the commands are not in a transaction block +// we can find the blocking relationship +permutation "s1-start-session-level-connection" "s1-begin-on-worker" "s1-update-dist-table-id-1" "s2-start-session-level-connection" "s2-update-dist-table-id-1" "s3-select-distributed-waiting-queries" "s1-commit-worker" "s1-stop-connection" "s2-stop-connection" +permutation "s1-begin" "s1-update-ref-table-from-coordinator" "s2-start-session-level-connection" "s2-update-ref-table" "s3-select-distributed-waiting-queries" "s1-commit" "s2-stop-connection" diff --git a/src/test/regress/spec/isolation_mx_common.include.spec b/src/test/regress/spec/isolation_mx_common.include.spec index 497d57fc4..4749e334c 100644 --- a/src/test/regress/spec/isolation_mx_common.include.spec +++ b/src/test/regress/spec/isolation_mx_common.include.spec @@ -7,6 +7,16 @@ setup LANGUAGE C STRICT VOLATILE AS 'citus', $$start_session_level_connection_to_node$$; + CREATE OR REPLACE FUNCTION override_backend_data_command_originator(bool) + RETURNS void + LANGUAGE C STRICT IMMUTABLE + AS 'citus', $$override_backend_data_command_originator$$; + + SELECT run_command_on_workers($$SET citus.enable_metadata_sync TO off;CREATE OR REPLACE FUNCTION override_backend_data_command_originator(bool) + RETURNS void + LANGUAGE C STRICT IMMUTABLE + AS 'citus'$$); + CREATE OR REPLACE FUNCTION run_commands_on_session_level_connection_to_node(text) RETURNS void LANGUAGE C STRICT VOLATILE diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index 0defcf549..fa2079ba5 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -93,7 +93,8 @@ step "s2-view-worker" FROM citus_worker_stat_activity WHERE query NOT ILIKE '%pg_prepared_xacts%' AND query NOT ILIKE '%COMMIT%' AND - query NOT ILIKE '%dump_local_wait_edges%' + query NOT ILIKE '%dump_local_%' AND + query NOT ILIKE '%citus_internal_local_blocked_processes%' ORDER BY query, query_hostport DESC; } diff --git a/src/test/regress/sql/aggregate_support.sql b/src/test/regress/sql/aggregate_support.sql index dc7215f79..7c82418a7 100644 --- a/src/test/regress/sql/aggregate_support.sql +++ b/src/test/regress/sql/aggregate_support.sql @@ -459,6 +459,7 @@ RETURN $1 * $1; END; $function$; +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION square_func(int) RETURNS int LANGUAGE plpgsql @@ -467,6 +468,7 @@ BEGIN RETURN $1 * $1; END; $function$; +RESET citus.enable_metadata_sync; SELECT const_function(1), string_agg(a::character, ',') FROM t1; SELECT const_function(1), count(b) FROM t1; diff --git a/src/test/regress/sql/citus_local_tables_mx.sql b/src/test/regress/sql/citus_local_tables_mx.sql index bc6f84feb..58745ef1c 100644 --- a/src/test/regress/sql/citus_local_tables_mx.sql +++ b/src/test/regress/sql/citus_local_tables_mx.sql @@ -382,6 +382,36 @@ $$ SELECT count(*) FROM pg_catalog.pg_tables WHERE tablename='citus_local_table_4' $$); +-- verify that partitioned citus local tables with dropped columns can be distributed. issue: #5577 +CREATE TABLE parent_dropped_col(a int, eventtime date) PARTITION BY RANGE ( eventtime); +SELECT citus_add_local_table_to_metadata('parent_dropped_col'); +ALTER TABLE parent_dropped_col DROP column a; +CREATE TABLE parent_dropped_col_1 PARTITION OF parent_dropped_col for VALUES FROM ('2000-01-01') TO ('2001-01-01'); +SELECT create_distributed_table('parent_dropped_col', 'eventtime'); +-- another example to test +CREATE TABLE parent_dropped_col_2( + col_to_drop_0 text, + col_to_drop_1 text, + col_to_drop_2 date, + col_to_drop_3 inet, + col_to_drop_4 date, + measureid integer, + eventdatetime date, + measure_data jsonb, + PRIMARY KEY (measureid, eventdatetime, measure_data)) + PARTITION BY RANGE(eventdatetime); + +select citus_add_local_table_to_metadata('parent_dropped_col_2'); +ALTER TABLE parent_dropped_col_2 DROP COLUMN col_to_drop_1; +CREATE TABLE parent_dropped_col_2_2000 PARTITION OF parent_dropped_col_2 FOR VALUES FROM ('2000-01-01') TO ('2001-01-01'); + +SELECT create_distributed_table('parent_dropped_col_2', 'measureid'); + +-- verify that the partitioned tables are distributed with the correct distribution column +SELECT logicalrelid, partmethod, partkey FROM pg_dist_partition + WHERE logicalrelid IN ('parent_dropped_col'::regclass, 'parent_dropped_col_2'::regclass) + ORDER BY logicalrelid; + -- cleanup at exit set client_min_messages to error; DROP SCHEMA citus_local_tables_mx CASCADE; diff --git a/src/test/regress/sql/citus_local_tables_queries_mx.sql b/src/test/regress/sql/citus_local_tables_queries_mx.sql index f4a53bffc..ddcc95d84 100644 --- a/src/test/regress/sql/citus_local_tables_queries_mx.sql +++ b/src/test/regress/sql/citus_local_tables_queries_mx.sql @@ -43,6 +43,7 @@ CREATE TABLE postgres_local_table(a int, b int); -- We shouldn't use LIMIT in INSERT SELECT queries to make the test faster as -- LIMIT would force planner to wrap SELECT query in an intermediate result and -- this might reduce the coverage of the test cases. +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$ BEGIN SET client_min_messages to ERROR; @@ -59,6 +60,7 @@ CREATE FUNCTION clear_and_init_test_tables() RETURNS void AS $$ RESET client_min_messages; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; ---------------- ---- SELECT ---- diff --git a/src/test/regress/sql/coordinator_evaluation.sql b/src/test/regress/sql/coordinator_evaluation.sql index e7479fd1a..1f313a76a 100644 --- a/src/test/regress/sql/coordinator_evaluation.sql +++ b/src/test/regress/sql/coordinator_evaluation.sql @@ -165,6 +165,7 @@ BEGIN END; $$ LANGUAGE 'plpgsql' STRICT; +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE coordinator_evaluation.test_procedure(int) LANGUAGE plpgsql AS $procedure$ @@ -174,6 +175,7 @@ BEGIN PERFORM DISTINCT value FROM coordinator_evaluation_table_2 WHERE key = filterKey; END; $procedure$; +RESET citus.enable_metadata_sync; -- we couldn't find a meaningful query to write for this -- however this query fails before https://github.com/citusdata/citus/pull/3454 diff --git a/src/test/regress/sql/coordinator_evaluation_modify.sql b/src/test/regress/sql/coordinator_evaluation_modify.sql index e4f104ad9..a4c9f75b5 100644 --- a/src/test/regress/sql/coordinator_evaluation_modify.sql +++ b/src/test/regress/sql/coordinator_evaluation_modify.sql @@ -275,11 +275,13 @@ SET citus.log_local_commands TO ON; SET search_path TO coordinator_evaluation_combinations_modify; -- returns 2 on the worker +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_constant_stable() RETURNS INT AS $$ BEGIN RETURN 2; END; $$ language plpgsql STABLE; +RESET citus.enable_metadata_sync; -- all local values diff --git a/src/test/regress/sql/coordinator_shouldhaveshards.sql b/src/test/regress/sql/coordinator_shouldhaveshards.sql index 159a42d6c..bc79513c3 100644 --- a/src/test/regress/sql/coordinator_shouldhaveshards.sql +++ b/src/test/regress/sql/coordinator_shouldhaveshards.sql @@ -373,9 +373,11 @@ inserts AS ( -- a helper function which return true if the coordinated -- trannsaction uses 2PC +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION coordinated_transaction_should_use_2PC() RETURNS BOOL LANGUAGE C STRICT VOLATILE AS 'citus', $$coordinated_transaction_should_use_2PC$$; +RESET citus.enable_metadata_sync; -- a local SELECT followed by remote SELECTs -- does not trigger 2PC diff --git a/src/test/regress/sql/distributed_collations.sql b/src/test/regress/sql/distributed_collations.sql index 8b2bffc7e..669577a09 100644 --- a/src/test/regress/sql/distributed_collations.sql +++ b/src/test/regress/sql/distributed_collations.sql @@ -93,3 +93,19 @@ DROP SCHEMA collation_tests CASCADE; DROP SCHEMA collation_tests2 CASCADE; DROP USER collationuser; SELECT run_command_on_workers($$DROP USER collationuser;$$); + +\c - - - :worker_1_port +-- test creating a collation on a worker +CREATE COLLATION another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); + +-- test if creating a collation on a worker on a local +-- schema raises the right error +SET citus.enable_ddl_propagation TO off; +CREATE SCHEMA collation_creation_on_worker; +SET citus.enable_ddl_propagation TO on; + +CREATE COLLATION collation_creation_on_worker.another_german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); + +SET citus.enable_ddl_propagation TO off; +DROP SCHEMA collation_creation_on_worker; +SET citus.enable_ddl_propagation TO on; diff --git a/src/test/regress/sql/distributed_functions_conflict.sql b/src/test/regress/sql/distributed_functions_conflict.sql index b4877880f..cee80548b 100644 --- a/src/test/regress/sql/distributed_functions_conflict.sql +++ b/src/test/regress/sql/distributed_functions_conflict.sql @@ -5,6 +5,7 @@ CREATE SCHEMA proc_conflict; \c - - - :worker_1_port SET search_path TO proc_conflict; +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ BEGIN RETURN state * 2 + i; @@ -14,6 +15,7 @@ CREATE AGGREGATE existing_agg(int) ( SFUNC = existing_func, STYPE = int ); +RESET citus.enable_metadata_sync; \c - - - :master_port SET search_path TO proc_conflict; @@ -58,7 +60,7 @@ DROP FUNCTION existing_func(int, int) CASCADE; \c - - - :worker_1_port SET search_path TO proc_conflict; - +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION existing_func(state int, i int) RETURNS int AS $$ BEGIN RETURN state * 3 + i; @@ -68,6 +70,7 @@ CREATE AGGREGATE existing_agg(int) ( SFUNC = existing_func, STYPE = int ); +RESET citus.enable_metadata_sync; \c - - - :master_port SET search_path TO proc_conflict; diff --git a/src/test/regress/sql/follower_single_node.sql b/src/test/regress/sql/follower_single_node.sql index 4cd7d9d0a..482c0b575 100644 --- a/src/test/regress/sql/follower_single_node.sql +++ b/src/test/regress/sql/follower_single_node.sql @@ -107,10 +107,10 @@ SELECT * FROM test WHERE x = 1; \c -reuse-previous=off regression - - :master_port SET search_path TO single_node; -SELECT 1 FROM master_add_node('localhost', :follower_master_port, groupid => 0, noderole => 'secondary'); +SELECT 1 FROM master_add_node('localhost', :follower_master_port, groupid => 0, noderole => 'secondary', nodecluster => 'second-cluster'); SELECT 1 FROM master_set_node_property('localhost', :master_port, 'shouldhaveshards', true); -\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" +\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" SET search_path TO single_node; SELECT * FROM test WHERE x = 1; @@ -169,6 +169,29 @@ INSERT INTO columnar_test(a, b) VALUES (1, 8); \c - - - :follower_master_port SELECT * FROM columnar_test ORDER BY 1,2; + +\c -reuse-previous=off regression - - :master_port +SET citus.shard_replication_factor TO 1; +SET search_path TO single_node; + +CREATE TABLE dist_table (a INT, b INT); +SELECT create_distributed_table ('dist_table', 'a', shard_count:=4); +INSERT INTO dist_table VALUES (1, 1); + +\c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" +SET search_path TO single_node; + +SELECT * FROM dist_table; + +SELECT global_pid AS follower_coordinator_gpid FROM get_all_active_transactions() WHERE process_id = pg_backend_pid() \gset +SELECT pg_typeof(:follower_coordinator_gpid); + +SELECT pg_cancel_backend(:follower_coordinator_gpid); + +SET citus.log_remote_commands TO ON; +SELECT pg_cancel_backend(:follower_coordinator_gpid) FROM dist_table WHERE a = 1; + + -- Cleanup \c -reuse-previous=off regression - - :master_port SET search_path TO single_node; diff --git a/src/test/regress/sql/forcedelegation_functions.sql b/src/test/regress/sql/forcedelegation_functions.sql index 6f62c41d5..d95cfc75a 100644 --- a/src/test/regress/sql/forcedelegation_functions.sql +++ b/src/test/regress/sql/forcedelegation_functions.sql @@ -189,12 +189,16 @@ COMMIT; SELECT func_calls_forcepush_func(); +-- Block distributing that function as distributing it causes +-- different test output on PG 14. +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_val() RETURNS INT AS $$ BEGIN RETURN 100::INT; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- -- UDF calling another UDF in a FROM clause @@ -683,6 +687,250 @@ BEGIN END; $$ LANGUAGE plpgsql; +CREATE TABLE testnested_table (x int, y int); +SELECT create_distributed_table('testnested_table','x'); + +CREATE OR REPLACE FUNCTION inner_fn(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; + +-- Non-force function calling force-delegation function +CREATE OR REPLACE FUNCTION outer_local_fn() +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM 1 FROM inner_fn(1); + INSERT INTO forcepushdown_schema.testnested_table VALUES (2,3); + PERFORM 1 FROM inner_fn(4); + INSERT INTO forcepushdown_schema.testnested_table VALUES (5,6); +END; +$$ LANGUAGE plpgsql; + +SELECT create_distributed_function('inner_fn(int)','x', + colocate_with:='testnested_table', force_delegation := true); + +SELECT outer_local_fn(); +-- Rows from 1-6 should appear +SELECT * FROM testnested_table ORDER BY 1; + +BEGIN; +SELECT outer_local_fn(); +END; +SELECT * FROM testnested_table ORDER BY 1; + +DROP FUNCTION inner_fn(int); +DROP FUNCTION outer_local_fn(); +TRUNCATE TABLE testnested_table; + +CREATE OR REPLACE FUNCTION inner_fn(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; + +-- Force-delegation function calling non-force function +CREATE OR REPLACE FUNCTION outer_fn(y int, z int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM 1 FROM forcepushdown_schema.inner_fn(y); + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); + PERFORM 1 FROM forcepushdown_schema.inner_fn(z); + INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z); +END; +$$ LANGUAGE plpgsql; + +SELECT create_distributed_function('inner_fn(int)','x', + colocate_with:='testnested_table', force_delegation := false); +SELECT create_distributed_function('outer_fn(int, int)','y', + colocate_with:='testnested_table', force_delegation := true); + +SELECT outer_fn(1, 2); +BEGIN; +SELECT outer_fn(1, 2); +END; + +-- No rows +SELECT * FROM testnested_table ORDER BY 1; + +-- Force-delegation function calling force-delegation function +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); + PERFORM forcepushdown_schema.force_push_inner(x+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; + +SELECT create_distributed_function( + 'force_push_outer(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); +SELECT create_distributed_function( + 'force_push_inner(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); + +-- Keys 7,8,9,14 fall on one node and 15 on a different node + +-- Function gets delegated to node with shard-key = 7 and inner function +-- will not be delegated but inserts shard-key = 8 locally +SELECT force_push_outer(7); + +BEGIN; +-- Function gets delegated to node with shard-key = 8 and inner function +-- will not be delegated but inserts shard-key = 9 locally +SELECT force_push_outer(8); +END; + +BEGIN; +-- Function gets delegated to node with shard-key = 14 and inner function +-- will not be delegated but fails to insert shard-key = 15 remotely +SELECT force_push_outer(14); +END; +SELECT * FROM testnested_table ORDER BY 1; + +-- +-- Function-1() --> function-2() --> function-3() +-- +CREATE OR REPLACE FUNCTION force_push_1(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); + PERFORM forcepushdown_schema.force_push_2(x+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION force_push_2(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); + PERFORM forcepushdown_schema.force_push_3(y+1) LIMIT 1; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION force_push_3(z int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (z,z); +END; +$$ LANGUAGE plpgsql; + +SELECT create_distributed_function( + 'force_push_1(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); +SELECT create_distributed_function( + 'force_push_2(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); +SELECT create_distributed_function( + 'force_push_3(int)', 'z', + colocate_with := 'testnested_table', + force_delegation := true +); + +TRUNCATE TABLE testnested_table; +BEGIN; +-- All local inserts +SELECT force_push_1(7); +END; + +BEGIN; +-- Local(shard-keys 13, 15) + remote insert (shard-key 14) +SELECT force_push_1(13); +END; + +SELECT * FROM testnested_table ORDER BY 1; + +TRUNCATE TABLE testnested_table; +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + INSERT INTO forcepushdown_schema.testnested_table VALUES (y,y); +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM FROM forcepushdown_schema.force_push_inner(x); + INSERT INTO forcepushdown_schema.testnested_table VALUES (x+1,x+1); +END; +$$ LANGUAGE plpgsql; +SELECT create_distributed_function( + 'force_push_inner(int)', 'y', + colocate_with := 'testnested_table', + force_delegation := true +); +SELECT create_distributed_function( + 'force_push_outer(int)', 'x', + colocate_with := 'testnested_table', + force_delegation := true +); + +BEGIN; +SELECT force_push_outer(7); +END; +TABLE testnested_table ORDER BY 1; + +CREATE OR REPLACE FUNCTION force_push_inner(y int) +RETURNS void +AS $$ +DECLARE +BEGIN + RAISE NOTICE '%', y; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION force_push_outer(x int) +RETURNS void +AS $$ +DECLARE +BEGIN + PERFORM FROM forcepushdown_schema.force_push_inner(x+1); + INSERT INTO forcepushdown_schema.testnested_table VALUES (x,x); +END; +$$ LANGUAGE plpgsql; + +BEGIN; +SELECT force_push_outer(9); +END; +TABLE testnested_table ORDER BY 1; + RESET client_min_messages; SET citus.log_remote_commands TO off; DROP SCHEMA forcepushdown_schema CASCADE; diff --git a/src/test/regress/sql/function_propagation.sql b/src/test/regress/sql/function_propagation.sql new file mode 100644 index 000000000..79168497b --- /dev/null +++ b/src/test/regress/sql/function_propagation.sql @@ -0,0 +1,193 @@ +CREATE SCHEMA function_propagation_schema; +SET search_path TO 'function_propagation_schema'; + +-- Check whether supported dependencies can be distributed while propagating functions + +-- Check types +SET citus.enable_metadata_sync TO OFF; + CREATE TYPE function_prop_type AS (a int, b int); +RESET citus.enable_metadata_sync; + +CREATE OR REPLACE FUNCTION func_1(param_1 function_prop_type) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +-- Check all dependent objects and function depends on all nodes +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema'::regnamespace::oid;$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type'::regtype::oid;$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_1'::regproc::oid;$$) ORDER BY 1,2; + +SET citus.enable_metadata_sync TO OFF; + CREATE TYPE function_prop_type_2 AS (a int, b int); +RESET citus.enable_metadata_sync; + +CREATE OR REPLACE FUNCTION func_2(param_1 int) +RETURNS function_prop_type_2 +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_2'::regtype::oid;$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_2'::regproc::oid;$$) ORDER BY 1,2; + +-- Have a separate check for type created in transaction +BEGIN; + CREATE TYPE function_prop_type_3 AS (a int, b int); +COMMIT; + +-- Objects in the body part is not found as dependency +CREATE OR REPLACE FUNCTION func_3(param_1 int) +RETURNS int +LANGUAGE plpgsql AS +$$ +DECLARE + internal_param1 function_prop_type_3; +BEGIN + return 1; +END; +$$; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.function_prop_type_3'::regtype::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_3'::regproc::oid;$$) ORDER BY 1,2; + +-- Check table +CREATE TABLE function_prop_table(a int, b int); + +-- Non-distributed table is not distributed as dependency +CREATE OR REPLACE FUNCTION func_4(param_1 function_prop_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +CREATE OR REPLACE FUNCTION func_5(param_1 int) +RETURNS function_prop_table +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +-- Functions can be created with distributed table dependency +SELECT create_distributed_table('function_prop_table', 'a'); +CREATE OR REPLACE FUNCTION func_6(param_1 function_prop_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_6'::regproc::oid;$$) ORDER BY 1,2; + +-- Views are not supported +CREATE VIEW function_prop_view AS SELECT * FROM function_prop_table; +CREATE OR REPLACE FUNCTION func_7(param_1 function_prop_view) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +CREATE OR REPLACE FUNCTION func_8(param_1 int) +RETURNS function_prop_view +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +-- Check within transaction +BEGIN; + CREATE TYPE type_in_transaction AS (a int, b int); + CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction) + RETURNS int + LANGUAGE plpgsql AS + $$ + BEGIN + return 1; + END; + $$; + + -- Within transaction functions are not distributed + SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid; + SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid; +COMMIT; + +-- Show that recreating it outside transaction distributes the function and dependencies +CREATE OR REPLACE FUNCTION func_in_transaction(param_1 type_in_transaction) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regtype::oid;$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.func_in_transaction'::regproc::oid;$$) ORDER BY 1,2; + +-- Test for SQL function with unsupported object in function body +CREATE TABLE table_in_sql_body(id int); + +CREATE FUNCTION max_of_table() +RETURNS int +LANGUAGE SQL AS +$$ + SELECT max(id) FROM table_in_sql_body +$$; + +-- Show that only function has propagated, since the table is not resolved as dependency +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.type_in_transaction'::regclass::oid; +SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid; +SELECT * FROM run_command_on_workers($$SELECT pg_identify_object_as_address(classid, objid, objsubid) from citus.pg_dist_object where objid = 'function_propagation_schema.max_of_table'::regproc::oid;$$) ORDER BY 1,2; + +-- Check extension owned table +CREATE TABLE extension_owned_table(a int); +SELECT run_command_on_workers($$ +CREATE TABLE function_propagation_schema.extension_owned_table(a int); +$$ +); +CREATE EXTENSION seg; +ALTER EXTENSION seg ADD TABLE extension_owned_table; +SELECT run_command_on_workers($$ +ALTER EXTENSION seg ADD TABLE function_propagation_schema.extension_owned_table; +$$); + +CREATE OR REPLACE FUNCTION func_for_ext_check(param_1 extension_owned_table) +RETURNS int +LANGUAGE plpgsql AS +$$ +BEGIN + return 1; +END; +$$; + +RESET search_path; +SET client_min_messages TO WARNING; +DROP SCHEMA function_propagation_schema CASCADE; diff --git a/src/test/regress/sql/global_cancel.sql b/src/test/regress/sql/global_cancel.sql new file mode 100644 index 000000000..edf380771 --- /dev/null +++ b/src/test/regress/sql/global_cancel.sql @@ -0,0 +1,51 @@ +CREATE SCHEMA global_cancel; +SET search_path TO global_cancel; +SET citus.next_shard_id TO 56789000; + +CREATE TABLE dist_table (a INT, b INT); +SELECT create_distributed_table ('dist_table', 'a', shard_count:=4); +INSERT INTO dist_table VALUES (1, 1); + +SELECT global_pid AS coordinator_gpid FROM get_all_active_transactions() WHERE process_id = pg_backend_pid() \gset +SELECT pg_typeof(:coordinator_gpid); + +SELECT pg_cancel_backend(:coordinator_gpid); + +SET citus.log_remote_commands TO ON; +SELECT pg_cancel_backend(:coordinator_gpid) FROM dist_table WHERE a = 1; + +BEGIN; +SELECT pg_cancel_backend(:coordinator_gpid) FROM dist_table WHERE a = 1; +END; + +SET citus.log_remote_commands TO OFF; + +SELECT global_pid AS maintenance_daemon_gpid +FROM pg_stat_activity psa JOIN get_all_active_transactions() gaat ON psa.pid = gaat.process_id +WHERE application_name = 'Citus Maintenance Daemon' \gset + +SET client_min_messages TO ERROR; +CREATE USER global_cancel_user; +SELECT 1 FROM run_command_on_workers('CREATE USER global_cancel_user'); +RESET client_min_messages; + +SET ROLE global_cancel_user; + +SELECT pg_typeof(:maintenance_daemon_gpid); + +SELECT pg_cancel_backend(:maintenance_daemon_gpid); +SELECT pg_terminate_backend(:maintenance_daemon_gpid); + +RESET ROLE; + +SELECT nodeid AS coordinator_node_id FROM pg_dist_node WHERE nodeport = :master_port \gset + +SET client_min_messages TO DEBUG; + +-- 10000000000 is the node id multiplier for global pid +SELECT pg_cancel_backend(10000000000 * :coordinator_node_id + 0); +SELECT pg_terminate_backend(10000000000 * :coordinator_node_id + 0); + +RESET client_min_messages; + +DROP SCHEMA global_cancel CASCADE; diff --git a/src/test/regress/sql/local_shard_copy.sql b/src/test/regress/sql/local_shard_copy.sql index 688ba8b4b..094ba8fb4 100644 --- a/src/test/regress/sql/local_shard_copy.sql +++ b/src/test/regress/sql/local_shard_copy.sql @@ -48,6 +48,7 @@ SET citus.log_local_commands TO ON; -- returns true of the distribution key filter -- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard -- placement which is local to this not +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$ DECLARE shard_is_local BOOLEAN := FALSE; @@ -70,6 +71,7 @@ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) R RETURN shard_is_local; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- pick some example values that reside on the shards locally and remote diff --git a/src/test/regress/sql/local_shard_execution.sql b/src/test/regress/sql/local_shard_execution.sql index 6bda63d8d..c7dad6f00 100644 --- a/src/test/regress/sql/local_shard_execution.sql +++ b/src/test/regress/sql/local_shard_execution.sql @@ -71,6 +71,7 @@ SET search_path TO local_shard_execution; -- returns true of the distribution key filter -- on the distributed tables (e.g., WHERE key = 1), we'll hit a shard -- placement which is local to this not +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) RETURNS bool AS $$ DECLARE shard_is_local BOOLEAN := FALSE; @@ -93,6 +94,7 @@ CREATE OR REPLACE FUNCTION shard_of_distribution_column_is_local(dist_key int) R RETURN shard_is_local; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- test case for issue #3556 SET citus.log_intermediate_results TO TRUE; @@ -418,6 +420,7 @@ BEGIN; ROLLBACK; -- make sure that functions can use local execution +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE only_local_execution() AS $$ DECLARE cnt INT; BEGIN @@ -491,6 +494,7 @@ CREATE OR REPLACE PROCEDURE local_execution_followed_by_dist() AS $$ SELECT count(*) INTO cnt FROM distributed_table; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CALL local_execution_followed_by_dist(); diff --git a/src/test/regress/sql/local_shard_execution_replicated.sql b/src/test/regress/sql/local_shard_execution_replicated.sql index a341ac619..01af172a3 100644 --- a/src/test/regress/sql/local_shard_execution_replicated.sql +++ b/src/test/regress/sql/local_shard_execution_replicated.sql @@ -384,6 +384,7 @@ BEGIN; ROLLBACK; -- make sure that functions can use local execution +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE PROCEDURE only_local_execution() AS $$ DECLARE cnt INT; BEGIN @@ -457,6 +458,7 @@ CREATE OR REPLACE PROCEDURE local_execution_followed_by_dist() AS $$ SELECT count(*) INTO cnt FROM distributed_table; END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CALL local_execution_followed_by_dist(); diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index 1ab302ba2..cf123ff49 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -618,6 +618,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ROLLBACK; -- create a volatile function that returns the local node id +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION get_node_id() RETURNS INT AS $$ DECLARE localGroupId int; @@ -630,6 +631,7 @@ BEGIN nodeport = 57637 AND nodename = 'localhost' AND isactive AND nodecluster = 'default'; RETURN localGroupId; END; $$ language plpgsql; +RESET citus.enable_metadata_sync; -- fails because we ingest more placements for the same shards to the same worker node BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; diff --git a/src/test/regress/sql/multi_create_table.sql b/src/test/regress/sql/multi_create_table.sql index 4c318e73a..4a1070514 100644 --- a/src/test/regress/sql/multi_create_table.sql +++ b/src/test/regress/sql/multi_create_table.sql @@ -10,6 +10,7 @@ SET citus.next_shard_id TO 360000; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -27,6 +28,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE lineitem ( l_orderkey bigint not null, diff --git a/src/test/regress/sql/multi_deparse_function.sql b/src/test/regress/sql/multi_deparse_function.sql index 1574ff22f..2b8450b4f 100644 --- a/src/test/regress/sql/multi_deparse_function.sql +++ b/src/test/regress/sql/multi_deparse_function.sql @@ -359,10 +359,7 @@ SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_custom_param(intpair)'); RESET citus.enable_metadata_sync; -SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_custom_param RENAME TO func_with_custom_param; -$cmd$); - -- a function that returns TABLE CREATE FUNCTION func_returns_table(IN count INT) @@ -373,9 +370,7 @@ SET citus.enable_metadata_sync TO OFF; SELECT create_distributed_function('func_returns_table(INT)'); RESET citus.enable_metadata_sync; -SELECT deparse_and_run_on_workers($cmd$ ALTER FUNCTION func_returns_table ROWS 100; -$cmd$); -- clear objects SET client_min_messages TO WARNING; -- suppress cascading objects dropping diff --git a/src/test/regress/sql/multi_extension.sql b/src/test/regress/sql/multi_extension.sql index 43f24fb10..aaed45803 100644 --- a/src/test/regress/sql/multi_extension.sql +++ b/src/test/regress/sql/multi_extension.sql @@ -105,6 +105,8 @@ SET citus.enable_metadata_sync TO 'false'; SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; + CREATE EXTENSION citus VERSION '8.0-1'; ALTER EXTENSION citus UPDATE TO '8.0-2'; ALTER EXTENSION citus UPDATE TO '8.0-3'; @@ -303,6 +305,7 @@ SELECT * FROM multi_extension.print_extension_changes(); \set VERBOSITY terse CREATE TABLE columnar_table(a INT, b INT) USING columnar; SET citus.enable_version_checks TO ON; +SET columnar.enable_version_checks TO ON; -- all should throw an error due to version mismatch VACUUM FULL columnar_table; @@ -324,6 +327,7 @@ CREATE TABLE new_columnar_table (a int) USING columnar; -- do cleanup for the rest of the tests SET citus.enable_version_checks TO OFF; +SET columnar.enable_version_checks TO OFF; DROP TABLE columnar_table; RESET columnar.enable_custom_scan; \set VERBOSITY default @@ -472,13 +476,16 @@ ORDER BY 1, 2; -- see incompatible version errors out RESET citus.enable_version_checks; +RESET columnar.enable_version_checks; DROP EXTENSION citus; CREATE EXTENSION citus VERSION '8.0-1'; -- Test non-distributed queries work even in version mismatch SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.1-1'; SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; -- Test CREATE TABLE CREATE TABLE version_mismatch_table(column1 int); @@ -517,14 +524,17 @@ END; $function$; SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; -- This will fail because of previous function declaration ALTER EXTENSION citus UPDATE TO '8.1-1'; -- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; DROP FUNCTION pg_catalog.relation_is_a_known_shard(regclass); SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; ALTER EXTENSION citus UPDATE TO '8.1-1'; -- Test updating to the latest version without specifying the version number @@ -540,8 +550,10 @@ CREATE EXTENSION citus; DROP EXTENSION citus; SET citus.enable_version_checks TO 'false'; +SET columnar.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '8.0-1'; SET citus.enable_version_checks TO 'true'; +SET columnar.enable_version_checks TO 'true'; -- during ALTER EXTENSION, we should invalidate the cache ALTER EXTENSION citus UPDATE; diff --git a/src/test/regress/sql/multi_function_in_join.sql b/src/test/regress/sql/multi_function_in_join.sql index 6abdafdad..1e2844f4e 100644 --- a/src/test/regress/sql/multi_function_in_join.sql +++ b/src/test/regress/sql/multi_function_in_join.sql @@ -33,7 +33,6 @@ SELECT * FROM table1 JOIN nextval('numbers') n ON (id = n) ORDER BY id ASC; CREATE FUNCTION add(integer, integer) RETURNS integer AS 'SELECT $1 + $2;' LANGUAGE SQL; -SELECT create_distributed_function('add(integer,integer)'); SELECT * FROM table1 JOIN add(3,5) sum ON (id = sum) ORDER BY id ASC; -- Check join of plpgsql functions @@ -46,6 +45,8 @@ $$ LANGUAGE plpgsql; SELECT * FROM table1 JOIN increment(2) val ON (id = val) ORDER BY id ASC; -- a function that returns a set of integers +-- Block distributing function as we have tests below to test it locally +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION next_k_integers(IN first_value INTEGER, IN k INTEGER DEFAULT 3, OUT result INTEGER) @@ -54,6 +55,7 @@ BEGIN RETURN QUERY SELECT x FROM generate_series(first_value, first_value+k-1) f(x); END; $$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT * FROM table1 JOIN next_k_integers(3,2) next_integers ON (id = next_integers.result) ORDER BY id ASC; @@ -124,6 +126,7 @@ SET client_min_messages TO ERROR; -- function joins in CTE results can create lateral joins that are not supported -- we execute the query within a function to consolidate the error messages -- between different executors +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION raise_failed_execution_func_join(query text) RETURNS void AS $$ BEGIN EXECUTE query; @@ -135,6 +138,7 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ WITH one_row AS ( @@ -146,8 +150,10 @@ SELECT raise_failed_execution_func_join($$ $$); -- a user-defined immutable function +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION the_answer_to_life() RETURNS INTEGER IMMUTABLE AS 'SELECT 42' LANGUAGE SQL; +RESET citus.enable_metadata_sync; SELECT raise_failed_execution_func_join($$ SELECT * FROM table1 JOIN the_answer_to_life() the_answer ON (id = the_answer); diff --git a/src/test/regress/sql/multi_insert_select.sql b/src/test/regress/sql/multi_insert_select.sql index 7de9f9763..92c802fe1 100644 --- a/src/test/regress/sql/multi_insert_select.sql +++ b/src/test/regress/sql/multi_insert_select.sql @@ -106,6 +106,7 @@ WHERE user_id < 0; -- make sure we don't evaluate stable functions with column arguments +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION evaluate_on_master(x int) RETURNS int LANGUAGE plpgsql STABLE AS $function$ @@ -114,6 +115,7 @@ BEGIN RETURN x; END; $function$; +RESET citus.enable_metadata_sync; INSERT INTO raw_events_second (user_id, value_1) SELECT diff --git a/src/test/regress/sql/multi_metadata_sync.sql b/src/test/regress/sql/multi_metadata_sync.sql index d85bede5f..13c984396 100644 --- a/src/test/regress/sql/multi_metadata_sync.sql +++ b/src/test/regress/sql/multi_metadata_sync.sql @@ -20,10 +20,12 @@ SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset -- Create the necessary test utility function +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION activate_node_snapshot() RETURNS text[] LANGUAGE C STRICT AS 'citus'; +RESET citus.enable_metadata_sync; COMMENT ON FUNCTION activate_node_snapshot() IS 'commands to activate node snapshot'; @@ -43,12 +45,14 @@ ALTER ROLE CURRENT_USER WITH PASSWORD 'dummypassword'; SELECT unnest(activate_node_snapshot()) order by 1; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) RETURNS void LANGUAGE C STRICT AS 'citus', $$master_create_distributed_table$$; +RESET citus.enable_metadata_sync; COMMENT ON FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) diff --git a/src/test/regress/sql/multi_modifications.sql b/src/test/regress/sql/multi_modifications.sql index 55453e78b..7977325ea 100644 --- a/src/test/regress/sql/multi_modifications.sql +++ b/src/test/regress/sql/multi_modifications.sql @@ -363,16 +363,6 @@ UPDATE limit_orders SET array_of_values = 1 || array_of_values WHERE id = 246; CREATE FUNCTION immutable_append(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -\c - - - :worker_1_port -CREATE FUNCTION immutable_append(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; - -\c - - - :worker_2_port -CREATE FUNCTION immutable_append(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; - -\c - - - :master_port - -- immutable function calls with vars are also allowed UPDATE limit_orders SET array_of_values = immutable_append(array_of_values, 2) WHERE id = 246; diff --git a/src/test/regress/sql/multi_modifying_xacts.sql b/src/test/regress/sql/multi_modifying_xacts.sql index a3b004546..a53153b9b 100644 --- a/src/test/regress/sql/multi_modifying_xacts.sql +++ b/src/test/regress/sql/multi_modifying_xacts.sql @@ -282,18 +282,16 @@ SELECT * FROM researchers WHERE lab_id = 6; -- verify 2pc SELECT count(*) FROM pg_dist_transaction; - -- create a check function -SELECT * from run_command_on_workers('CREATE FUNCTION reject_large_id() RETURNS trigger AS $rli$ +CREATE FUNCTION reject_large_id() RETURNS trigger AS $rli$ BEGIN IF (NEW.id > 30) THEN - RAISE ''illegal value''; + RAISE 'illegal value'; END IF; RETURN NEW; END; -$rli$ LANGUAGE plpgsql;') -ORDER BY nodeport; +$rli$ LANGUAGE plpgsql; -- register after insert trigger SELECT * FROM run_command_on_placements('researchers', 'CREATE CONSTRAINT TRIGGER reject_large_researcher_id AFTER INSERT ON %s DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_large_id()') @@ -344,9 +342,7 @@ SELECT * FROM researchers WHERE lab_id = 6; SELECT * from run_command_on_placements('researchers', 'drop trigger reject_large_researcher_id on %s') ORDER BY nodeport, shardid; -SELECT * FROM run_command_on_workers('drop function reject_large_id()') -ORDER BY nodeport; - +DROP FUNCTION reject_large_id(); -- ALTER and copy are compatible BEGIN; ALTER TABLE labs ADD COLUMN motto text; @@ -416,6 +412,7 @@ AND s.logicalrelid = 'objects'::regclass; -- create trigger on one worker to reject certain values \c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -425,6 +422,7 @@ CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON objects_1200003 @@ -460,6 +458,7 @@ DELETE FROM objects; -- there cannot be errors on different shards at different times -- because the first failure will fail the whole transaction \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -469,6 +468,7 @@ CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON labs_1200002 @@ -668,7 +668,7 @@ SELECT * FROM reference_modifying_xacts; -- lets fail on of the workers at before the commit time \c - - - :worker_1_port - +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 999) THEN @@ -678,6 +678,7 @@ CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_reference AFTER INSERT ON reference_modifying_xacts_1200006 @@ -762,7 +763,7 @@ ABORT; -- lets fail one of the workers before COMMIT time for the hash table \c - - - :worker_1_port - +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 997) THEN @@ -772,6 +773,7 @@ CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_hash AFTER INSERT ON hash_modifying_xacts_1200007 diff --git a/src/test/regress/sql/multi_multiuser.sql b/src/test/regress/sql/multi_multiuser.sql index d22fae1ec..f379c75f1 100644 --- a/src/test/regress/sql/multi_multiuser.sql +++ b/src/test/regress/sql/multi_multiuser.sql @@ -369,21 +369,24 @@ RESET ROLE; -- to test access to files created during repartition we will create some on worker 1 \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION citus_rm_job_directory(bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; SET ROLE full_access; SELECT worker_hash_partition_table(42,1,'SELECT a FROM generate_series(1,100) AS a', 'a', 23, ARRAY[-2147483648, -1073741824, 0, 1073741824]::int4[]); RESET ROLE; -- all attempts for transfer are initiated from other workers \c - - - :worker_2_port - +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION citus_rm_job_directory(bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; -- super user should not be able to copy files created by a user SELECT worker_fetch_partition_file(42, 1, 1, 1, 'localhost', :worker_1_port); diff --git a/src/test/regress/sql/multi_mx_call.sql b/src/test/regress/sql/multi_mx_call.sql index 4728b8948..ccdffd9aa 100644 --- a/src/test/regress/sql/multi_mx_call.sql +++ b/src/test/regress/sql/multi_mx_call.sql @@ -180,6 +180,7 @@ select colocate_proc_with_table('mx_call_proc', 'mx_call_dist_table_1'::regclass -- Test that we handle transactional constructs correctly inside a procedure -- that is routed to the workers. +SET citus.enable_metadata_sync TO OFF; CREATE PROCEDURE mx_call_proc_tx(x int) LANGUAGE plpgsql AS $$ BEGIN INSERT INTO multi_mx_call.mx_call_dist_table_1 VALUES (x, -1), (x+1, 4); @@ -189,6 +190,7 @@ BEGIN -- Now do the final update! UPDATE multi_mx_call.mx_call_dist_table_1 SET val = val-1 WHERE id >= x; END;$$; +RESET citus.enable_metadata_sync; -- before distribution ... CALL multi_mx_call.mx_call_proc_tx(10); diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql index 4aa5a0164..0a685e5ce 100644 --- a/src/test/regress/sql/multi_mx_create_table.sql +++ b/src/test/regress/sql/multi_mx_create_table.sql @@ -63,33 +63,7 @@ CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); --- create UDFs in worker node -CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - SET search_path TO citus_mx_test_schema; -CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; - -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, @@ -103,32 +77,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( -- now create required stuff in the worker 2 \c - - - :worker_2_port --- create UDF -CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - SET search_path TO citus_mx_test_schema; -CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) -RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( diff --git a/src/test/regress/sql/multi_mx_explain.sql b/src/test/regress/sql/multi_mx_explain.sql index 9417b94f3..347d79d67 100644 --- a/src/test/regress/sql/multi_mx_explain.sql +++ b/src/test/regress/sql/multi_mx_explain.sql @@ -18,6 +18,7 @@ VACUUM ANALYZE supplier_mx; \c - - - :worker_1_port -- Function that parses explain output as JSON +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ @@ -43,6 +44,7 @@ $BODY$ LANGUAGE plpgsql; \c - - - :worker_2_port -- Function that parses explain output as JSON +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ @@ -65,6 +67,7 @@ BEGIN RETURN result; END; $BODY$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; -- Test Text format diff --git a/src/test/regress/sql/multi_mx_metadata.sql b/src/test/regress/sql/multi_mx_metadata.sql index 7bd09b900..169f3aa8b 100644 --- a/src/test/regress/sql/multi_mx_metadata.sql +++ b/src/test/regress/sql/multi_mx_metadata.sql @@ -218,17 +218,6 @@ SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx \c - no_access_mx - :worker_1_port --- see the comment in the top of the file -CREATE OR REPLACE FUNCTION raise_failed_aclcheck(query text) RETURNS void AS $$ -BEGIN - EXECUTE query; - EXCEPTION WHEN OTHERS THEN - IF SQLERRM LIKE 'must be owner of%' THEN - RAISE 'must be owner of the object'; - END IF; -END; -$$LANGUAGE plpgsql; - SELECT raise_failed_aclcheck($$ DROP TABLE distributed_mx_table; $$); diff --git a/src/test/regress/sql/multi_mx_modifications.sql b/src/test/regress/sql/multi_mx_modifications.sql index 72ad17b65..852bf3a42 100644 --- a/src/test/regress/sql/multi_mx_modifications.sql +++ b/src/test/regress/sql/multi_mx_modifications.sql @@ -202,6 +202,7 @@ UPDATE limit_orders_mx SET array_of_values = 1 || array_of_values WHERE id = 246 UPDATE limit_orders_mx SET array_of_values = immutable_append_mx(array_of_values, 2) WHERE id = 246; +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION stable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$ LANGUAGE plpgsql STABLE; @@ -215,6 +216,8 @@ SELECT array_of_values FROM limit_orders_mx WHERE id = 246; -- STRICT functions work as expected CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS 'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT; +RESET citus.enable_metadata_sync; + UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246; SELECT array_of_values FROM limit_orders_mx WHERE id = 246; diff --git a/src/test/regress/sql/multi_mx_modifying_xacts.sql b/src/test/regress/sql/multi_mx_modifying_xacts.sql index 2c7d65b7c..cf60f023d 100644 --- a/src/test/regress/sql/multi_mx_modifying_xacts.sql +++ b/src/test/regress/sql/multi_mx_modifying_xacts.sql @@ -195,6 +195,7 @@ SELECT * FROM objects_mx WHERE id = 1; -- create trigger on one worker to reject certain values \c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN @@ -204,6 +205,7 @@ CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$ RETURN NEW; END; $rb$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON objects_mx_1220103 diff --git a/src/test/regress/sql/multi_mx_node_metadata.sql b/src/test/regress/sql/multi_mx_node_metadata.sql index 09e8e49f1..a54917f87 100644 --- a/src/test/regress/sql/multi_mx_node_metadata.sql +++ b/src/test/regress/sql/multi_mx_node_metadata.sql @@ -367,11 +367,12 @@ UPDATE pg_dist_node SET hasmetadata = true; SELECT master_update_node(nodeid, 'localhost', 12345) FROM pg_dist_node; +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION trigger_metadata_sync() RETURNS void LANGUAGE C STRICT AS 'citus'; - +RESET citus.enable_metadata_sync; SELECT trigger_metadata_sync(); \c :datname - - :master_port diff --git a/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql b/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql index a2663c7a4..779f4c24e 100644 --- a/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql +++ b/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql @@ -68,14 +68,7 @@ CREATE TABLE repartition_udt_other ( \c - - - :worker_1_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -87,15 +80,6 @@ CREATE OPERATOR = ( -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - - -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 @@ -112,14 +96,7 @@ FUNCTION 1 test_udt_hash(test_udt); \c - - - :worker_2_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -131,15 +108,6 @@ CREATE OPERATOR = ( -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - - -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 diff --git a/src/test/regress/sql/multi_mx_router_planner.sql b/src/test/regress/sql/multi_mx_router_planner.sql index e8efbe244..fdfd81b07 100644 --- a/src/test/regress/sql/multi_mx_router_planner.sql +++ b/src/test/regress/sql/multi_mx_router_planner.sql @@ -609,6 +609,7 @@ PREPARE author_articles(int) as EXECUTE author_articles(1); -- queries inside plpgsql functions could be router plannable +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$ DECLARE max_id integer; @@ -635,6 +636,7 @@ END; $$ LANGUAGE plpgsql; SELECT * FROM author_articles_id_word_count(); +RESET citus.enable_metadata_sync; -- materialized views can be created for router plannable queries CREATE MATERIALIZED VIEW mv_articles_hash_mx AS diff --git a/src/test/regress/sql/multi_mx_schema_support.sql b/src/test/regress/sql/multi_mx_schema_support.sql index 555794f92..80cc61311 100644 --- a/src/test/regress/sql/multi_mx_schema_support.sql +++ b/src/test/regress/sql/multi_mx_schema_support.sql @@ -333,8 +333,25 @@ SELECT table_schema AS "Shards' Schema" FROM information_schema.tables WHERE table_name LIKE 'table\_set\_schema\_%' GROUP BY table_schema; + +-- check that we can drop a user-defined schema from workers +SET citus.enable_ddl_propagation TO OFF; +CREATE SCHEMA localschema; +RESET citus.enable_ddl_propagation; +DROP SCHEMA localschema; + \c - - - :master_port SELECT * FROM mx_new_schema.table_set_schema; +-- verify local schema does not exist on the worker +-- worker errors out as "schema does not exist" +SET citus.enable_ddl_propagation TO OFF; +CREATE SCHEMA localschema; + +-- should error out +SELECT run_command_on_workers($$DROP SCHEMA localschema;$$); + +RESET citus.enable_ddl_propagation; + DROP SCHEMA mx_old_schema CASCADE; DROP SCHEMA mx_new_schema CASCADE; diff --git a/src/test/regress/sql/multi_name_lengths.sql b/src/test/regress/sql/multi_name_lengths.sql index df0a68f33..34a509fad 100644 --- a/src/test/regress/sql/multi_name_lengths.sql +++ b/src/test/regress/sql/multi_name_lengths.sql @@ -7,6 +7,7 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000; SET citus.shard_count TO 2; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -24,6 +25,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; -- Verify that a table name > 56 characters gets hashed properly. CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( diff --git a/src/test/regress/sql/multi_prepare_sql.sql b/src/test/regress/sql/multi_prepare_sql.sql index 47fad0021..54893d025 100644 --- a/src/test/regress/sql/multi_prepare_sql.sql +++ b/src/test/regress/sql/multi_prepare_sql.sql @@ -610,10 +610,6 @@ DROP TABLE http_request; -- first create helper function CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; -\c - - - :worker_1_port -CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; -\c - - - :worker_2_port -CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; \c - - - :master_port -- test table diff --git a/src/test/regress/sql/multi_repartition_udt.sql b/src/test/regress/sql/multi_repartition_udt.sql index c244a2b61..2e5c1de24 100644 --- a/src/test/regress/sql/multi_repartition_udt.sql +++ b/src/test/regress/sql/multi_repartition_udt.sql @@ -69,14 +69,7 @@ CREATE TABLE repartition_udt_other ( \c - - :public_worker_1_host :worker_1_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -88,15 +81,6 @@ CREATE OPERATOR = ( -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - - -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 @@ -113,14 +97,7 @@ FUNCTION 1 test_udt_hash(test_udt); \c - - :public_worker_2_host :worker_2_port -- START type creation --- ... as well as a function to use as its comparator... -CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean -AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - --- ... use that function to create a custom equality operator... +-- Use function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, @@ -132,15 +109,6 @@ CREATE OPERATOR = ( -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; --- ... create a test HASH function. Though it is a poor hash function, --- it is acceptable for our tests -CREATE FUNCTION test_udt_hash(test_udt) RETURNS int -AS 'SELECT hashtext( ($1.i + $1.i2)::text);' -LANGUAGE SQL -IMMUTABLE -RETURNS NULL ON NULL INPUT; - - -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 diff --git a/src/test/regress/sql/multi_repartitioned_subquery_udf.sql b/src/test/regress/sql/multi_repartitioned_subquery_udf.sql index ed432b797..6708c0ae1 100644 --- a/src/test/regress/sql/multi_repartitioned_subquery_udf.sql +++ b/src/test/regress/sql/multi_repartitioned_subquery_udf.sql @@ -18,32 +18,6 @@ LANGUAGE sql IMMUTABLE AS $_$ OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; -\c - - :public_worker_1_host :worker_1_port -DROP FUNCTION IF EXISTS median(double precision[]); - -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; -$_$; - -\c - - :public_worker_2_host :worker_2_port -DROP FUNCTION IF EXISTS median(double precision[]); - -CREATE FUNCTION median(double precision[]) RETURNS double precision -LANGUAGE sql IMMUTABLE AS $_$ - SELECT AVG(val) FROM - (SELECT val FROM unnest($1) val - ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) - OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; -$_$; - --- Run query on master -\c - - :master_host :master_port - - SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*) FROM lineitem GROUP BY l_partkey) AS a WHERE median > 2; diff --git a/src/test/regress/sql/multi_router_planner_fast_path.sql b/src/test/regress/sql/multi_router_planner_fast_path.sql index 1bf416826..01c9035d5 100644 --- a/src/test/regress/sql/multi_router_planner_fast_path.sql +++ b/src/test/regress/sql/multi_router_planner_fast_path.sql @@ -531,6 +531,7 @@ SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) WHERE author_id = 1 and 1=0; -- create a dummy function to be used in filtering +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION someDummyFunction(regclass) RETURNS text AS $$ @@ -554,6 +555,7 @@ BEGIN END IF; END; $$LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; SET client_min_messages TO ERROR; \set VERBOSITY terse diff --git a/src/test/regress/sql/multi_schema_support.sql b/src/test/regress/sql/multi_schema_support.sql index fdb922102..02ca41d01 100644 --- a/src/test/regress/sql/multi_schema_support.sql +++ b/src/test/regress/sql/multi_schema_support.sql @@ -203,34 +203,6 @@ END; $$ LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 1 -\c - - - :worker_1_port -CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - --- create UDF in worker node 2 -\c - - - :worker_2_port -CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - -\c - - - :master_port - -- UDF in public, table in a schema other than public, search_path is not set SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; @@ -251,36 +223,6 @@ END; $$ LANGUAGE 'plpgsql' IMMUTABLE; --- create UDF in worker node 1 in schema -\c - - - :worker_1_port -SET search_path TO test_schema_support; -CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - --- create UDF in worker node 2 in schema -\c - - - :worker_2_port -SET search_path TO test_schema_support; -CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) - RETURNS text AS -$$ -DECLARE - strresult text; -BEGIN - RETURN theValue * 3 / 2 + 1; -END; -$$ -LANGUAGE 'plpgsql' IMMUTABLE; - -\c - - - :master_port - -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; diff --git a/src/test/regress/sql/multi_subquery_behavioral_analytics.sql b/src/test/regress/sql/multi_subquery_behavioral_analytics.sql index 3e8953ead..f7860ce58 100644 --- a/src/test/regress/sql/multi_subquery_behavioral_analytics.sql +++ b/src/test/regress/sql/multi_subquery_behavioral_analytics.sql @@ -1278,17 +1278,8 @@ ORDER BY LIMIT 10; -- this is one complex join query derived from a user's production query --- first declare the function on workers on master +-- declare the function on workers and master -- With array_index: -SELECT * FROM run_command_on_workers('CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) - RETURNS INT AS $$ - SELECT i - FROM (SELECT generate_series(array_lower($1, 1), array_upper($1, 1))) g(i) - WHERE $1 [i] = $2 - LIMIT 1; - $$ LANGUAGE sql') -ORDER BY 1,2; - CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) RETURNS INT AS $$ SELECT i @@ -1431,8 +1422,6 @@ LIMIT 10; -- drop created functions -SELECT * FROM run_command_on_workers('DROP FUNCTION array_index(ANYARRAY, ANYELEMENT)') -ORDER BY 1,2; DROP FUNCTION array_index(ANYARRAY, ANYELEMENT); -- a query with a constant subquery @@ -1629,16 +1618,6 @@ CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool IMMUTABLE RETURNS NULL ON NULL INPUT; -SELECT run_command_on_workers($f$ - -CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool - AS 'select $1 > $2;' - LANGUAGE SQL - IMMUTABLE - RETURNS NULL ON NULL INPUT; - -$f$); - -- we don't support joins via functions SELECT user_id, array_length(events_table, 1) FROM ( @@ -1869,11 +1848,5 @@ LIMIT 1; DROP FUNCTION test_join_function_2(integer, integer); -SELECT run_command_on_workers($f$ - - DROP FUNCTION test_join_function_2(integer, integer); - -$f$); - SET citus.enable_router_execution TO TRUE; SET citus.subquery_pushdown to OFF; diff --git a/src/test/regress/sql/multi_test_helpers_superuser.sql b/src/test/regress/sql/multi_test_helpers_superuser.sql index 2df7b1724..85900ee00 100644 --- a/src/test/regress/sql/multi_test_helpers_superuser.sql +++ b/src/test/regress/sql/multi_test_helpers_superuser.sql @@ -37,6 +37,7 @@ $$; -- partition_task_list_results tests the internal PartitionTasklistResults function +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix text, query text, target_table regclass, @@ -49,7 +50,6 @@ CREATE OR REPLACE FUNCTION pg_catalog.partition_task_list_results(resultIdPrefix LANGUAGE C STRICT VOLATILE AS 'citus', $$partition_task_list_results$$; - -- get size of TopTransactionContext CREATE OR REPLACE FUNCTION top_transaction_context_size() RETURNS BIGINT LANGUAGE C STRICT VOLATILE @@ -65,3 +65,4 @@ BEGIN PERFORM public.wait_until_metadata_sync(30000); END; $function$; +RESET citus.enable_metadata_sync; diff --git a/src/test/regress/sql/multi_transactional_drop_shards.sql b/src/test/regress/sql/multi_transactional_drop_shards.sql index f88839d77..25c69031e 100644 --- a/src/test/regress/sql/multi_transactional_drop_shards.sql +++ b/src/test/regress/sql/multi_transactional_drop_shards.sql @@ -168,11 +168,13 @@ SET citus.override_table_visibility TO false; -- test DROP table with failing worker +SET citus.enable_metadata_sync TO OFF; CREATE FUNCTION fail_drop_table() RETURNS event_trigger AS $fdt$ BEGIN RAISE 'illegal value'; END; $fdt$ LANGUAGE plpgsql; +RESET citus.enable_metadata_sync; CREATE EVENT TRIGGER fail_drop_table ON sql_drop EXECUTE PROCEDURE fail_drop_table(); diff --git a/src/test/regress/sql/multi_unsupported_worker_operations.sql b/src/test/regress/sql/multi_unsupported_worker_operations.sql index 02e5397cc..5fac64e3d 100644 --- a/src/test/regress/sql/multi_unsupported_worker_operations.sql +++ b/src/test/regress/sql/multi_unsupported_worker_operations.sql @@ -51,6 +51,7 @@ SELECT * FROM mx_table ORDER BY col_1; \c - - - :worker_1_port -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -68,6 +69,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE mx_table_worker(col_1 text); diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql index 1e625f5cc..bcfd12e5b 100644 --- a/src/test/regress/sql/pg14.sql +++ b/src/test/regress/sql/pg14.sql @@ -701,3 +701,46 @@ SELECT * FROM nested_obj_update ORDER BY 1,2,3; set client_min_messages to error; drop schema pg14 cascade; + +create schema pg14; +set search_path to pg14; + +select 1 from citus_add_node('localhost',:master_port,groupid=>0); + +-- test adding foreign table to metadata with the guc +-- will test truncating foreign tables later +CREATE TABLE foreign_table_test (id integer NOT NULL, data text, a bigserial); +INSERT INTO foreign_table_test VALUES (1, 'text_test'); +SELECT citus_add_local_table_to_metadata('foreign_table_test'); +CREATE EXTENSION postgres_fdw; +CREATE SERVER foreign_server + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'localhost', port :'master_port', dbname 'regression'); +CREATE USER MAPPING FOR CURRENT_USER + SERVER foreign_server + OPTIONS (user 'postgres'); +CREATE FOREIGN TABLE foreign_table ( + id integer NOT NULL, + data text, + a bigserial +) + SERVER foreign_server + OPTIONS (schema_name 'pg14', table_name 'foreign_table_test'); +SELECT citus_add_local_table_to_metadata('foreign_table'); + +SELECT count(*) FROM foreign_table; +TRUNCATE foreign_table; +\c - - - :worker_1_port +set search_path to pg14; +-- verify the foreign table is truncated +SELECT count(*) FROM pg14.foreign_table; + +-- should error out +TRUNCATE foreign_table; +\c - - - :master_port +-- cleanup +set client_min_messages to error; +drop extension postgres_fdw cascade; +drop schema pg14 cascade; +reset client_min_messages; +select 1 from citus_remove_node('localhost',:master_port); diff --git a/src/test/regress/sql/propagate_extension_commands.sql b/src/test/regress/sql/propagate_extension_commands.sql index 6356d71e3..3640e2f17 100644 --- a/src/test/regress/sql/propagate_extension_commands.sql +++ b/src/test/regress/sql/propagate_extension_commands.sql @@ -116,13 +116,6 @@ CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); CREATE TEXT SEARCH DICTIONARY intdict (TEMPLATE = intdict_template); COMMENT ON TEXT SEARCH DICTIONARY intdict IS 'dictionary for integers'; -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_init(internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); - -SELECT run_command_on_workers($$ -CREATE FUNCTION dintdict_lexize(internal, internal, internal, internal) RETURNS internal AS 'dict_int.so' LANGUAGE C STRICT; -$$); SELECT run_command_on_workers($$ CREATE TEXT SEARCH TEMPLATE intdict_template (LEXIZE = dintdict_lexize, INIT = dintdict_init ); diff --git a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql index 0587ebfaa..a3a5c94c8 100644 --- a/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql +++ b/src/test/regress/sql/replicate_reference_tables_to_coordinator.sql @@ -32,12 +32,13 @@ CREATE TABLE numbers(a int); SELECT create_reference_table('numbers'); INSERT INTO numbers VALUES (20), (21); +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION my_volatile_fn() RETURNS INT AS $$ BEGIN RETURN 1; END; $$ language plpgsql VOLATILE; - +RESET citus.enable_metadata_sync; -- INSERT ... SELECT between reference tables BEGIN; diff --git a/src/test/regress/sql/shard_move_deferred_delete.sql b/src/test/regress/sql/shard_move_deferred_delete.sql index a052590d5..9dc4f82e3 100644 --- a/src/test/regress/sql/shard_move_deferred_delete.sql +++ b/src/test/regress/sql/shard_move_deferred_delete.sql @@ -80,17 +80,39 @@ $cmd$); SELECT master_move_shard_placement(20000000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); -SELECT run_command_on_workers($cmd$ - -- override the function for testing purpose - create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) - as $BODY$ - begin - select 20 into available_disk_size; - select 8500 into total_disk_size; - end - $BODY$ language plpgsql; -$cmd$); +-- override the function for testing purpose +-- since it is extension owned function, propagate it to workers manually +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; +\c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; + +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 20 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; + +\c - - - :master_port + +SET search_path TO shard_move_deferred_delete; SELECT citus_shard_cost_by_disk_size(20000001); @@ -108,16 +130,39 @@ SELECT run_command_on_workers($cmd$ SELECT count(*) FROM pg_class WHERE relname = 't1_20000000'; $cmd$); -SELECT run_command_on_workers($cmd$ - -- override the function for testing purpose - create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) - as $BODY$ - begin - select 8300 into available_disk_size; - select 8500 into total_disk_size; - end - $BODY$ language plpgsql; -$cmd$); +-- override the function for testing purpose +-- since it is extension owned function, propagate it to workers manually +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; + +\c - - - :worker_1_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; + +\c - - - :worker_2_port +SET citus.enable_metadata_sync TO OFF; +create or replace function pg_catalog.citus_local_disk_space_stats(OUT available_disk_size bigint, OUT total_disk_size bigint) +as $BODY$ +begin + select 8300 into available_disk_size; + select 8500 into total_disk_size; +end +$BODY$ language plpgsql; + +\c - - - :master_port + +SET search_path TO shard_move_deferred_delete; -- When there would not be enough free space left after the move, the move should fail SELECT master_move_shard_placement(20000001, 'localhost', :worker_2_port, 'localhost', :worker_1_port); diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql index c9bad1983..8a9ae16d9 100644 --- a/src/test/regress/sql/shard_rebalancer.sql +++ b/src/test/regress/sql/shard_rebalancer.sql @@ -137,6 +137,7 @@ AS 'citus' LANGUAGE C STRICT VOLATILE; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -154,6 +155,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; SET citus.next_shard_id TO 123000; diff --git a/src/test/regress/sql/text_search.sql b/src/test/regress/sql/text_search.sql new file mode 100644 index 000000000..916644fd6 --- /dev/null +++ b/src/test/regress/sql/text_search.sql @@ -0,0 +1,263 @@ +CREATE SCHEMA text_search; +CREATE SCHEMA text_search2; +SET search_path TO text_search; + +-- create a new configruation from scratch +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); + +DROP TABLE t1; +DROP TEXT SEARCH CONFIGURATION my_text_search_config; + +-- try to create table and index in 1 transaction +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); +ABORT; + +-- try again, should not fail with my_text_search_config being retained on the worker +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config ( parser = default ); +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'on demand propagation of text search object with a comment'; +CREATE TABLE t1(id int, name text); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t1', 'name'); +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.my_text_search_config'::regconfig); +$$) ORDER BY 1,2; + +-- verify that changing anything on a managed TEXT SEARCH CONFIGURATION fails after parallel execution +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config IS 'this comment can''t be set right now'; +ABORT; + +-- create an index on an already distributed table +BEGIN; +CREATE TEXT SEARCH CONFIGURATION my_text_search_config2 ( parser = default ); +COMMENT ON TEXT SEARCH CONFIGURATION my_text_search_config2 IS 'on demand propagation of text search object with a comment 2'; +CREATE TABLE t1(id int, name text); +SELECT create_distributed_table('t1', 'name'); +CREATE INDEX t1_search_name ON t1 USING gin (to_tsvector('text_search.my_text_search_config2'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.my_text_search_config2'::regconfig); +$$) ORDER BY 1,2; +ABORT; + +-- should be able to create a configuration based on a copy of an existing configuration +CREATE TEXT SEARCH CONFIGURATION french_noaccent ( COPY = french ); +CREATE TABLE t2(id int, name text); +CREATE INDEX t2_search_name ON t2 USING gin (to_tsvector('text_search.french_noaccent'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t2', 'id'); + +-- spot check that french_noaccent copied settings from french +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'comment tu t''appelle') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; +-- makes no sense, however we expect that the dictionary for the first token changes accordingly +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING FOR asciiword WITH dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'comment tu t''appelle') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; +-- do the same but we will replace all french dictionaries +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING REPLACE french_stem WITH dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; +-- once more but now back via yet a different DDL command +ALTER TEXT SEARCH CONFIGURATION french_noaccent ALTER MAPPING FOR asciihword REPLACE dutch_stem WITH french_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; +-- drop a mapping +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING FOR asciihword; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionary) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; +-- also with exists, doesn't change anything, but should not error +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING IF EXISTS FOR asciihword; + +-- Comment on a text search configuration +COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS 'a text configuration that is butcherd to test all edge cases'; +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.french_noaccent'::regconfig); +$$) ORDER BY 1,2; + +-- Remove a comment +COMMENT ON TEXT SEARCH CONFIGURATION french_noaccent IS NULL; +SELECT * FROM run_command_on_workers($$ + SELECT obj_description('text_search.french_noaccent'::regconfig); +$$) ORDER BY 1,2; + +-- verify adding 2 dictionaries for two tokes at once +ALTER TEXT SEARCH CONFIGURATION french_noaccent DROP MAPPING IF EXISTS FOR asciiword, asciihword; +ALTER TEXT SEARCH CONFIGURATION french_noaccent ADD MAPPING FOR asciiword, asciihword WITH french_stem, dutch_stem; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionaries) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciiword' LIMIT 1; +$$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$ + SELECT ROW(alias,dictionaries) FROM ts_debug('text_search.french_noaccent', 'un chou-fleur') WHERE alias = 'asciihword' LIMIT 1; +$$) ORDER BY 1,2; + +--verify we can drop cascade a configuration that is in use +-- verify it is in use +DROP TEXT SEARCH CONFIGURATION text_search.french_noaccent; +-- drop cascade +DROP TEXT SEARCH CONFIGURATION text_search.french_noaccent CASCADE; +-- verify the configuration is dropped from the workers +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.french_noaccent'::regconfig; $$) ORDER BY 1,2; + +SET client_min_messages TO 'warning'; +SELECT * FROM run_command_on_workers($$CREATE ROLE text_search_owner;$$) ORDER BY 1,2; +CREATE ROLE text_search_owner; +RESET client_min_messages; + +CREATE TEXT SEARCH CONFIGURATION changed_owner ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner'::regconfig; +$$) ORDER BY 1,2; +ALTER TEXT SEARCH CONFIGURATION changed_owner OWNER TO text_search_owner; +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner'::regconfig; +$$) ORDER BY 1,2; + +-- redo test with propagating object after it was created and changed of owner +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION changed_owner2 ( PARSER = default ); +ALTER TEXT SEARCH CONFIGURATION changed_owner2 OWNER TO text_search_owner; +RESET citus.enable_ddl_propagation; +-- verify object doesn't exist before propagating +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.changed_owner2'::regconfig; $$) ORDER BY 1,2; + +-- distribute configuration +CREATE TABLE t3(id int, name text); +CREATE INDEX t3_search_name ON t3 USING gin (to_tsvector('text_search.changed_owner2'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t3', 'name'); + +-- verify config owner +SELECT * FROM run_command_on_workers($$ + SELECT cfgowner::regrole + FROM pg_ts_config + WHERE oid = 'text_search.changed_owner2'::regconfig; +$$) ORDER BY 1,2; + + +-- rename tests +CREATE TEXT SEARCH CONFIGURATION change_name ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.change_name'::regconfig; +$$) ORDER BY 1,2; +ALTER TEXT SEARCH CONFIGURATION change_name RENAME TO changed_name; +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.changed_name'::regconfig; +$$) ORDER BY 1,2; + +-- test move of schema +CREATE TEXT SEARCH CONFIGURATION change_schema ( PARSER = default ); +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search.change_schema'::regconfig; +$$) ORDER BY 1,2; +ALTER TEXT SEARCH CONFIGURATION change_schema SET SCHEMA text_search2; +SELECT * FROM run_command_on_workers($$ -- verify the name exists on the worker + SELECT 'text_search2.change_schema'::regconfig; +$$) ORDER BY 1,2; + +-- verify we get an error that the configuration change_schema is not found, even though the object address will be +-- found in its new schema, and is distributed +ALTER TEXT SEARCH CONFIGURATION change_schema SET SCHEMA text_search2; +-- should tell us that text_search.does_not_exist does not exist, covers a complex edgecase +-- in resolving the object address +ALTER TEXT SEARCH CONFIGURATION text_search.does_not_exist SET SCHEMA text_search2; + + +-- verify edgecases in deparsers +CREATE TEXT SEARCH CONFIGURATION config1 ( PARSER = default ); +CREATE TEXT SEARCH CONFIGURATION config2 ( PARSER = default ); +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION config3 ( PARSER = default ); +RESET citus.enable_ddl_propagation; + +-- verify config1, config2 exist on workers, config3 not +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; $$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; + +-- DROP all config's, only 1&2 are distributed, they should propagate well to remotes +DROP TEXT SEARCH CONFIGURATION config1, config2, config3; + +-- verify all existing ones have been removed (checking config3 for consistency) +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; $$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; +-- verify they are all removed locally +SELECT 'text_search.config1'::regconfig; +SELECT 'text_search.config2'::regconfig; +SELECT 'text_search.config3'::regconfig; + +-- verify that indexes created concurrently that would propagate a TEXT SEARCH CONFIGURATION object +SET citus.enable_ddl_propagation TO off; +CREATE TEXT SEARCH CONFIGURATION concurrent_index_config ( PARSER = default ); +RESET citus.enable_ddl_propagation; + +-- verify it doesn't exist on the workers +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.concurrent_index_config'::regconfig; $$) ORDER BY 1,2; + +-- create distributed table that then concurrently would have an index created. +CREATE TABLE t4(id int, name text); +SELECT create_distributed_table('t4', 'name'); +CREATE INDEX CONCURRENTLY t4_search_name ON t4 USING gin (to_tsvector('text_search.concurrent_index_config'::regconfig, (COALESCE(name, ''::character varying))::text)); + +-- now the configuration should be on the worker, and the above index creation shouldn't have failed. +SELECT * FROM run_command_on_workers($$ SELECT 'text_search.concurrent_index_config'::regconfig; $$) ORDER BY 1,2; + +-- verify the objid is correctly committed locally due to the somewhat convoluted commit and new transaction starting when creating an index concurrently +SELECT pg_catalog.pg_identify_object_as_address(classid, objid, objsubid) + FROM citus.pg_dist_object + WHERE classid = 3602 AND objid = 'text_search.concurrent_index_config'::regconfig::oid; + +-- verify old text search configurations get renamed if they are not the same as the newly propagated configuration. +-- We do this by creating configurations on the workers as a copy from a different existing catalog. +SELECT * FROM run_command_on_workers($$ + set citus.enable_metadata_sync TO off; + CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = dutch ); + reset citus.enable_metadata_sync; +$$) ORDER BY 1,2; +CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = french ); + +-- now we expect manually_created_wrongly(citus_backup_XXX) to show up when querying the configurations +SELECT * FROM run_command_on_workers($$ + SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; +$$) ORDER BY 1,2; + +-- verify the objects get reused appropriately when the specification is the same +SELECT * FROM run_command_on_workers($$ + set citus.enable_metadata_sync TO off; + CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = french ); + reset citus.enable_metadata_sync; +$$) ORDER BY 1,2; +CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = french ); + +-- now we don't expect manually_created_correct(citus_backup_XXX) to show up when querying the configurations as the +-- original one is reused +SELECT * FROM run_command_on_workers($$ + SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; +$$) ORDER BY 1,2; + +CREATE SCHEMA "Text Search Requiring Quote's"; +CREATE TEXT SEARCH CONFIGURATION "Text Search Requiring Quote's"."Quoted Config Name" ( parser = default ); +CREATE TABLE t5(id int, name text); +CREATE INDEX t5_search_name ON t5 USING gin (to_tsvector('"Text Search Requiring Quote''s"."Quoted Config Name"'::regconfig, (COALESCE(name, ''::character varying))::text)); +SELECT create_distributed_table('t5', 'name'); + +SET client_min_messages TO 'warning'; +DROP SCHEMA text_search, text_search2, "Text Search Requiring Quote's" CASCADE; +DROP ROLE text_search_owner; diff --git a/src/test/regress/sql/upgrade_basic_before.sql b/src/test/regress/sql/upgrade_basic_before.sql index 3b236cca0..7d518e510 100644 --- a/src/test/regress/sql/upgrade_basic_before.sql +++ b/src/test/regress/sql/upgrade_basic_before.sql @@ -34,6 +34,7 @@ CREATE TABLE tr(pk int, a int REFERENCES r(a) ON DELETE CASCADE ON UPDATE CASCAD SELECT create_distributed_table('tr', 'pk'); INSERT INTO tr SELECT c, c FROM generate_series(1, 5) as c; -- this function is dropped in Citus10, added here for tests +SET citus.enable_metadata_sync TO OFF; CREATE OR REPLACE FUNCTION pg_catalog.master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) @@ -50,6 +51,7 @@ CREATE OR REPLACE FUNCTION pg_catalog.master_create_worker_shards(table_name tex RETURNS void AS 'citus', $$master_create_worker_shards$$ LANGUAGE C STRICT; +RESET citus.enable_metadata_sync; CREATE TABLE t_range(id int, value_1 int); SELECT create_distributed_table('t_range', 'id', 'range'); SELECT master_create_empty_shard('t_range') as shardid1 \gset